Compare commits

...

3 Commits

Author SHA1 Message Date
Χγφτ Kompanion ee37ed47bb Finish the kllm integration better
Thanks to Gemini we now have a cleaner build interface
2025-10-18 14:00:09 +02:00
Χγφτ Kompanion 2ecb214510 Add full integration tests and more debug outout 2025-10-18 09:37:32 +02:00
Χγφτ Kompanion d6640abcbd Ifx path to stestscript again - remove obsolete files 2025-10-18 09:35:37 +02:00
45 changed files with 381 additions and 1519 deletions

View File

@ -26,7 +26,6 @@ include(KDEClangFormat)
include(ECMDeprecationSettings) include(ECMDeprecationSettings)
include(KDEGitCommitHooks) include(KDEGitCommitHooks)
find_package(Qt6 ${QT_MIN_VERSION} CONFIG REQUIRED COMPONENTS find_package(Qt6 ${QT_MIN_VERSION} CONFIG REQUIRED COMPONENTS
Core Core
Network Network
@ -36,14 +35,11 @@ find_package(Qt6 ${QT_MIN_VERSION} CONFIG REQUIRED COMPONENTS
find_package(Qt6McpServer CONFIG REQUIRED) find_package(Qt6McpServer CONFIG REQUIRED)
find_package(Qt6McpCommon CONFIG REQUIRED) find_package(Qt6McpCommon CONFIG REQUIRED)
option(KOMPANION_USE_GUI "Build optional GUI components using Qt6Gui" ON) find_package(Qt6 ${KF6_MIN_VERSON} CONFIG REQUIRED COMPONENTS Gui)
if (KOMPANION_USE_GUI) find_package(KF6Config ${KF6_MIN_VERSION} CONFIG REQUIRED)
find_package(Qt6 ${QT_MIN_VERSION} CONFIG REQUIRED COMPONENTS Gui) find_package(KF6Parts ${KF6_MIN_VERSION} CONFIG REQUIRED)
endif() find_package(KF6TextEditor ${KF6_MIN_VERSION} CONFIG REQUIRED)
find_package(KF6 ${KF6_MIN_VERSION} REQUIRED COMPONENTS
Config
)
find_package(Qt6Test ${QT_MIN_VERSION} CONFIG QUIET) find_package(Qt6Test ${QT_MIN_VERSION} CONFIG QUIET)
set_package_properties(Qt6Test PROPERTIES set_package_properties(Qt6Test PROPERTIES
PURPOSE "Required for tests" PURPOSE "Required for tests"
@ -55,52 +51,10 @@ add_feature_info("Qt6Test" Qt6Test_FOUND "Required for building tests")
set(KOMPANION_DB_INIT_INSTALL_DIR "${KDE_INSTALL_FULL_DATADIR}/kompanion/db/init") set(KOMPANION_DB_INIT_INSTALL_DIR "${KDE_INSTALL_FULL_DATADIR}/kompanion/db/init")
install(DIRECTORY db/init/ DESTINATION ${KDE_INSTALL_DATADIR}/kompanion/db/init FILES_MATCHING PATTERN "*.sql") install(DIRECTORY db/init/ DESTINATION ${KDE_INSTALL_DATADIR}/kompanion/db/init FILES_MATCHING PATTERN "*.sql")
add_subdirectory(src/dal) add_subdirectory(src)
add_subdirectory(src/KompanionAI)
add_executable(kom_mcp
src/main.cpp
src/mcp/KompanionQtServer.cpp
)
target_include_directories(kom_mcp PRIVATE src)
qt_add_resources(kom_mcp kompanion_mcp_resources
PREFIX "/kompanion"
BASE "src/mcp"
FILES src/mcp/ToolSchemas.json
)
target_link_libraries(kom_mcp PRIVATE
kom_dal
kom_ai
KF6::ConfigCore
Qt6::Core
Qt6::Network
Qt6::McpServer
Qt6::McpCommon
)
target_compile_options(kom_mcp PRIVATE -fexceptions)
target_compile_definitions(kom_mcp PRIVATE
PROJECT_SOURCE_DIR="${CMAKE_SOURCE_DIR}"
KOMPANION_DB_INIT_INSTALL_DIR="${KOMPANION_DB_INIT_INSTALL_DIR}"
)
install(TARGETS kom_mcp RUNTIME DESTINATION bin)
install(FILES src/mcp/ToolSchemas.json DESTINATION ${KDE_INSTALL_DATADIR}/kompanion/mcp)
option(BUILD_TESTS "Build tests" ON) option(BUILD_TESTS "Build tests" ON)
add_executable(kompanion
src/cli/KompanionApp.cpp
)
target_include_directories(kompanion PRIVATE src)
target_link_libraries(kompanion PRIVATE
Qt6::Core
Qt6::Sql
KF6::ConfigCore
kom_dal
kom_ai
)
install(TARGETS kompanion RUNTIME ${KF_INSTALL_TARGETS_DEFAULT_ARGS})
if (BUILD_TESTS) if (BUILD_TESTS)
enable_testing() enable_testing()
add_subdirectory(tests) add_subdirectory(tests)

View File

@ -1,29 +0,0 @@
# metal-kompanion-mcp
MCP backend and memory provider for Kompanion. Uses `qtmcp` (Qt-based MCP) to expose tools under namespace `kom.memory.v1`.
## Build
```bash
cmake -S . -B build
cmake --build build -j
```
## Layout
- `src/main.cpp` QtMcp-backed entry point (stdio/SSE backends)
- `src/mcp/ToolSchemas.json` JSON Schemas for MCP tools
- `src/memory/` interfaces for embedder and vector store
- `docs/` design notes
## Next
- Add richer tool metadata + prompt support on top of the qtmcp server.
- Implement adapters: embedder(s) + vector store(s).
- Flesh out Postgres DAL paths (prepared statements + pgvector wiring).
## Memory Tools
- `kom.memory.v1.save_context` persists conversational or workspace state in a namespace.
- `kom.memory.v1.recall_context` retrieves stored context by key, tags, or time window.
- See `docs/using-memory-tools.md` for integration notes (Codey, Claude Code) and request samples.
## Integrations
- **Kompanion-Konsole** — demo plugin for KDE Konsole that lets agents hand terminals over to the Kompanion runtime. See `integrations/konsole/README.md`.
- **JavaScript helpers** — Node.js utilities that call the MCP memory tools from scripts or web extensions. See `integrations/js/`.

View File

@ -1,18 +0,0 @@
[ 0%] Built target kom_dal_autogen_timestamp_deps
[ 4%] Built target kom_dal_autogen
[ 16%] Built target kom_dal
[ 16%] Built target kom_mcp_autogen_timestamp_deps
[ 20%] Built target kom_mcp_autogen
[ 33%] Built target kom_mcp
[ 33%] Built target kompanion_autogen_timestamp_deps
[ 37%] Built target kompanion_autogen
[ 50%] Built target kompanion
[ 50%] Built target test_mcp_tools_autogen_timestamp_deps
[ 54%] Built target test_mcp_tools_autogen
[ 66%] Built target test_mcp_tools
[ 66%] Built target contract_memory_autogen_timestamp_deps
[ 70%] Built target contract_memory_autogen
[ 83%] Built target contract_memory
[ 83%] Built target test_memory_exchange_autogen_timestamp_deps
[ 87%] Built target test_memory_exchange_autogen
[100%] Built target test_memory_exchange

11
db/init/012_test_user.sql Normal file
View File

@ -0,0 +1,11 @@
-- Create the dev_knowledge namespace if it doesn't exist
INSERT INTO namespaces (name) VALUES ('dev_knowledge') ON CONFLICT (name) DO NOTHING;
-- Create a secret for the dev_knowledge namespace for testing
DO $$
DECLARE
ns_id UUID;
BEGIN
SELECT id INTO ns_id FROM namespaces WHERE name = 'dev_knowledge';
INSERT INTO auth_secrets (namespace_id, secret_hash) VALUES (ns_id, '8c6976e5b5410415bde908bd4dee15dfb167a9c873fc4bb8a81f6f2ab448a918'); -- 'test-secret'
END $$;

View File

@ -9,7 +9,7 @@ DROP DATABASE IF EXISTS "$DB_NAME";
CREATE DATABASE "$DB_NAME" OWNER "$ROLE"; CREATE DATABASE "$DB_NAME" OWNER "$ROLE";
SQL SQL
for f in `dirname($0)`/*.sql; do for f in "$(dirname "$0")"/../init/*.sql; do
if [[ "$f" == *"001_roles.sql"* ]]; then if [[ "$f" == *"001_roles.sql"* ]]; then
continue continue
fi fi

View File

@ -1,5 +0,0 @@
# Ledger
- 2025-10-13: Initialized project `metal-kompanion-mcp`; created docs and interfaces; scaffolded CMake and main stub.
- 2025-10-13: Added MCP tool schemas for `kom.memory.v1`.
- 2025-10-13: Built MCP skeleton with `ping` and `embed_text` stub; added local-first architecture docs; added backup/sync draft specs; created tasks for privacy hardening and cloud adapters.

6
src/CMakeLists.txt Normal file
View File

@ -0,0 +1,6 @@
add_subdirectory(cli)
add_subdirectory(dal)
add_subdirectory(gui)
add_subdirectory(KompanionAI)
add_subdirectory(mcp)

View File

@ -22,7 +22,10 @@ set(KOMPANION_AI_HDRS
add_library(kom_ai STATIC ${KOMPANION_AI_SRCS} ${KOMPANION_AI_HDRS}) add_library(kom_ai STATIC ${KOMPANION_AI_SRCS} ${KOMPANION_AI_HDRS})
target_include_directories(kom_ai PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}) target_include_directories(kom_ai PUBLIC
${CMAKE_CURRENT_SOURCE_DIR}
${Qt6Core_INCLUDE_DIRS}
)
target_link_libraries(kom_ai PUBLIC target_link_libraries(kom_ai PUBLIC
Qt6::Core Qt6::Core

12
src/cli/CMakeLists.txt Normal file
View File

@ -0,0 +1,12 @@
add_executable(kompanion
KompanionApp.cpp
)
target_include_directories(kompanion PRIVATE ../)
target_link_libraries(kompanion PRIVATE
Qt6::Core
Qt6::Sql
KF6::ConfigCore
kom_dal
kom_ai
)
install(TARGETS kompanion RUNTIME ${KF_INSTALL_TARGETS_DEFAULT_ARGS})

BIN
src/gui/.CMakeLists.txt.swp Normal file

Binary file not shown.

17
src/gui/CMakeLists.txt Normal file
View File

@ -0,0 +1,17 @@
include_directories($CMAKE_SRC_DIR/src)
add_executable(kompanion_gui
MainWindow.cpp
)
target_include_directories(kompanion_gui PRIVATE ${PROJECT_SOURCE_DIR}/src)
target_link_libraries(kompanion_gui PRIVATE
KF6::Parts
KF6::TextEditor
KF6::ConfigCore
Qt6::McpServer
Qt6::McpCommon
kom_dal
kom_ai
)
install(TARGETS kompanion_gui RUNTIME DESTINATION bin)

120
src/gui/MainWindow.cpp Normal file
View File

@ -0,0 +1,120 @@
#include <KParts/MainWindow>
#include <KTextEditor/Editor>
#include <KTextEditor/View>
#include <KTextEditor/Document>
#include <KTextEditor/Cursor>
#include <QVBoxLayout>
#include <QLineEdit>
#include <QPushButton>
#include <QApplication>
#include <QFutureWatcher>
#include <KompanionAI/Client/KIClient.h>
#include <KompanionAI/Provider/OllamaProvider.h>
#include <KompanionAI/Message/KIMessage.h>
#include <KompanionAI/Message/KIThread.h>
#include <KompanionAI/Completion/KIReply.h>
#include <KompanionAI/Completion/KIChatOptions.h>
class MainWindow : public KParts::MainWindow
{
public:
MainWindow(QWidget *parent = nullptr) : KParts::MainWindow(parent)
{
QWidget *mainWidget = new QWidget(this);
setCentralWidget(mainWidget);
QVBoxLayout *layout = new QVBoxLayout(mainWidget);
auto editor = KTextEditor::Editor::instance();
m_document = editor->createDocument(this);
m_chatView = m_document->createView(mainWidget);
m_document->setReadWrite(false);
layout->addWidget(m_chatView);
m_chatInput = new QLineEdit(mainWidget);
layout->addWidget(m_chatInput);
QPushButton *sendButton = new QPushButton("Send", mainWidget);
layout->addWidget(sendButton);
connect(sendButton, &QPushButton::clicked, this, &MainWindow::sendMessage);
// Setup KompanionAI
m_ollamaProvider = new KompanionAI::OllamaProvider(this);
m_kompanionClient = new KompanionAI::KIClient(this);
m_kompanionClient->setProvider(m_ollamaProvider);
m_kompanionClient->setDefaultModel("llama2"); // Or some other default
}
private slots:
void sendMessage()
{
const QString message = m_chatInput->text();
if (message.isEmpty()) {
return;
}
m_chatInput->clear();
// Append user message to chat view
insertText(QString("User: %1\n").arg(message));
// Send message to KompanionAI
KompanionAI::KIThread thread;
KompanionAI::KIMessage kimessage;
kimessage.role = "user";
KompanionAI::KIMessagePart part;
part.mime = "text/plain";
part.text = message;
kimessage.parts.append(part);
thread.messages.append(kimessage);
KompanionAI::KIChatOptions opts;
opts.model = m_kompanionClient->defaultModel();
QFuture<KompanionAI::KIReply*> future = m_kompanionClient->chat(thread, opts);
QFutureWatcher<KompanionAI::KIReply*> *watcher = new QFutureWatcher<KompanionAI::KIReply*>(this);
connect(watcher, &QFutureWatcher<KompanionAI::KIReply*>::finished, this, [this, watcher]() {
KompanionAI::KIReply* reply = watcher->result();
connect(reply, &KompanionAI::KIReply::tokensAdded, this, [this](const QString& delta) {
insertText(delta);
});
connect(reply, &KompanionAI::KIReply::finished, this, [this, reply]() {
insertText("\n");
reply->deleteLater();
});
watcher->deleteLater();
});
watcher->setFuture(future);
}
private:
void insertText(const QString &text)
{
KTextEditor::Cursor endCursor(m_document->lines() - 1, m_document->lineLength(m_document->lines() - 1));
m_document->insertText(endCursor, text);
}
private:
KTextEditor::View *m_chatView;
KTextEditor::Document *m_document;
QLineEdit *m_chatInput;
KompanionAI::KIClient* m_kompanionClient;
KompanionAI::OllamaProvider* m_ollamaProvider;
};
int main(int argc, char *argv[])
{
QApplication app(argc, argv);
MainWindow window;
window.show();
return app.exec();
}

View File

@ -1,39 +0,0 @@
// SPDX-FileCopyrightText: 2023 Loren Burkholder <computersemiexpert@outlook.com>
// SPDX-FileCopyrightText: 2023 Klarälvdalens Datakonsult AB, a KDAB Group company <info@kdab.com>
//
// SPDX-License-Identifier: GPL-2.0-only OR GPL-3.0-only OR LicenseRef-KDE-Accepted-GPL
#pragma once
#include <QObject>
#include <QString>
#include <QList>
#include "KIToolSpec.h"
#include "KIPolicy.h"
namespace KompanionAI
{
class KIChatOptions
{
Q_GADGET
Q_PROPERTY(QString model MEMBER model)
Q_PROPERTY(bool stream MEMBER stream)
Q_PROPERTY(bool jsonMode MEMBER jsonMode)
Q_PROPERTY(int maxTokens MEMBER maxTokens)
Q_PROPERTY(double temperature MEMBER temperature)
Q_PROPERTY(QList<KIToolSpec> tools MEMBER tools)
Q_PROPERTY(KIPolicy policy MEMBER policy)
public:
QString model;
bool stream = true;
bool jsonMode = false;
int maxTokens = 512;
double temperature = 0.2;
QList<KIToolSpec> tools;
KIPolicy policy;
};
}

View File

@ -1,24 +0,0 @@
// SPDX-FileCopyrightText: 2023 Loren Burkholder <computersemiexpert@outlook.com>
// SPDX-FileCopyrightText: 2023 Klarälvdalens Datakonsult AB, a KDAB Group company <info@kdab.com>
//
// SPDX-License-Identifier: GPL-2.0-only OR GPL-3.0-only OR LicenseRef-KDE-Accepted-GPL
#pragma once
#include <QString>
namespace KompanionAI
{
class KIEmbedOptions
{
Q_GADGET
Q_PROPERTY(QString model MEMBER model)
Q_PROPERTY(QString normalize MEMBER normalize)
public:
QString model = "text-embed-local";
QString normalize = "l2";
};
}

View File

@ -1,23 +0,0 @@
// SPDX-FileCopyrightText: 2023 Loren Burkholder <computersemiexpert@outlook.com>
// SPDX-FileCopyrightText: 2023 Klarälvdalens Datakonsult AB, a KDAB Group company <info@kdab.com>
//
// SPDX-License-Identifier: GPL-2.0-only OR GPL-3.0-only OR LicenseRef-KDE-Accepted-GPL
#pragma once
#include <QVector>
#include <QString>
namespace KompanionAI
{
class KIEmbeddingResult
{
Q_GADGET
public:
QVector<QVector<float>> vectors;
QString model;
};
}

View File

@ -1,44 +0,0 @@
// SPDX-FileCopyrightText: 2023 Loren Burkholder <computersemiexpert@outlook.com>
// SPDX-FileCopyrightText: 2023 Klarälvdalens Datakonsult AB, a KDAB Group company <info@kdab.com>
//
// SPDX-License-Identifier: GPL-2.0-only OR GPL-3.0-only OR LicenseRef-KDE-Accepted-GPL
#pragma once
#include <QString>
namespace KompanionAI
{
class KIError
{
public:
enum ErrorCode {
NoError,
NetworkError,
ApiError,
InvalidJson,
Cancelled
};
KIError(ErrorCode code = NoError, const QString &message = QString(), int httpStatus = 0, int retryAfter = 0)
: m_code(code)
, m_message(message)
, m_httpStatus(httpStatus)
, m_retryAfter(retryAfter)
{
}
ErrorCode code() const { return m_code; }
QString message() const { return m_message; }
int httpStatus() const { return m_httpStatus; }
int retryAfter() const { return m_retryAfter; }
private:
ErrorCode m_code;
QString m_message;
int m_httpStatus;
int m_retryAfter;
};
}

View File

@ -1,48 +0,0 @@
// SPDX-FileCopyrightText: 2023 Loren Burkholder <computersemiexpert@outlook.com>
// SPDX-FileCopyrightText: 2023 Klarälvdalens Datakonsult AB, a KDAB Group company <info@kdab.com>
//
// SPDX-License-Identifier: GPL-2.0-only OR GPL-3.0-only OR LicenseRef-KDE-Accepted-GPL
#pragma once
#include <QObject>
#include <QString>
#include <QList>
#include <QVariantMap>
namespace KompanionAI
{
class KIMessagePart
{
Q_GADGET
Q_PROPERTY(QString mime READ mime)
Q_PROPERTY(QString text READ text)
public:
QString mime;
QString text;
};
class KIMessage
{
Q_GADGET
Q_PROPERTY(QString role READ role)
Q_PROPERTY(QList<KIMessagePart> parts READ parts)
public:
QString role;
QList<KIMessagePart> parts;
QVariantMap metadata;
};
class KIThread
{
Q_GADGET
Q_PROPERTY(QList<KIMessage> messages READ messages)
public:
QList<KIMessage> messages;
};
}

View File

@ -1,28 +0,0 @@
// SPDX-FileCopyrightText: 2023 Loren Burkholder <computersemiexpert@outlook.com>
// SPDX-FileCopyrightText: 2023 Klarälvdalens Datakonsult AB, a KDAB Group company <info@kdab.com>
//
// SPDX-License-Identifier: GPL-2.0-only OR GPL-3.0-only OR LicenseRef-KDE-Accepted-GPL
#pragma once
#include <QObject>
#include <QString>
#include <QStringList>
namespace KompanionAI
{
class KIPolicy
{
Q_GADGET
Q_PROPERTY(QString visibility MEMBER visibility)
Q_PROPERTY(bool allowNetwork MEMBER allowNetwork)
Q_PROPERTY(QStringList redactions MEMBER redactions)
public:
QString visibility = "private";
bool allowNetwork = false;
QStringList redactions;
};
}

View File

@ -1,43 +0,0 @@
// SPDX-FileCopyrightText: 2023 Loren Burkholder <computersemiexpert@outlook.com>
// SPDX-FileCopyrightText: 2023 Klarälvdalens Datakonsult AB, a KDAB Group company <info@kdab.com>
//
// SPDX-License-Identifier: GPL-2.0-only OR GPL-3.0-only OR LicenseRef-KDE-Accepted-GPL
#pragma once
#include <QObject>
#include <QStringList>
#include <QFuture>
#include "KIReply.h"
#include "KIThread.h"
#include "KIChatOptions.h"
#include "KIEmbeddingResult.h"
#include "KIEmbedOptions.h"
#include "KICapabilities.h"
namespace KompanionAI
{
class KIProvider : public QObject
{
Q_OBJECT
Q_PROPERTY(QString name READ name CONSTANT)
Q_PROPERTY(QStringList models READ models NOTIFY modelsChanged)
Q_PROPERTY(KICapabilities caps READ caps CONSTANT)
public:
explicit KIProvider(QObject *parent = nullptr);
virtual QString name() const = 0;
virtual QStringList models() const = 0;
virtual KICapabilities caps() const = 0;
virtual QFuture<KIReply*> chat(const KIThread &thread, const KIChatOptions &opts) = 0;
virtual QFuture<KIEmbeddingResult> embed(const QStringList &texts, const KIEmbedOptions &opts) = 0;
Q_SIGNALS:
void modelsChanged();
};
}

View File

@ -1,53 +0,0 @@
// SPDX-FileCopyrightText: 2023 Loren Burkholder <computersemiexpert@outlook.com>
// SPDX-FileCopyrightText: 2023 Klarälvdalens Datakonsult AB, a KDAB Group company <info@kdab.com>
//
// SPDX-License-Identifier: GPL-2.0-only OR GPL-3.0-only OR LicenseRef-KDE-Accepted-GPL
#pragma once
#include <QObject>
#include <QString>
#include <QVariantMap>
#include "KIToolSpec.h"
namespace KompanionAI
{
class KIError;
class KIReply : public QObject
{
Q_OBJECT
Q_PROPERTY(bool finished READ isFinished NOTIFY finishedChanged)
Q_PROPERTY(int promptTokens READ promptTokens CONSTANT)
Q_PROPERTY(int completionTokens READ completionTokens CONSTANT)
Q_PROPERTY(QString model READ model CONSTANT)
public:
explicit KIReply(QObject *parent = nullptr);
Q_INVOKABLE QString text() const;
bool isFinished() const;
int promptTokens() const;
int completionTokens() const;
QString model() const;
Q_SIGNALS:
void tokensAdded(const QString &delta);
void toolCallProposed(const KIToolCall &call);
void toolResultRequested(const KIToolCall &call);
void traceEvent(const QVariantMap &span);
void finished();
void errorOccurred(const KIError &error);
void finishedChanged();
private:
bool m_finished = false;
int m_promptTokens = 0;
int m_completionTokens = 0;
QString m_model;
QString m_text;
};
}

View File

@ -1,65 +0,0 @@
// SPDX-FileCopyrightText: 2023 Loren Burkholder <computersemiexpert@outlook.com>
// SPDX-FileCopyrightText: 2023 Klarälvdalens Datakonsult AB, a KDAB Group company <info@kdab.com>
//
// SPDX-License-Identifier: GPL-2.0-only OR GPL-3.0-only OR LicenseRef-KDE-Accepted-GPL
#pragma once
#include <QObject>
#include <QString>
#include <QList>
#include <QVariant>
#include <QVariantMap>
namespace KompanionAI
{
class KIToolParam
{
Q_GADGET
Q_PROPERTY(QString name READ name)
Q_PROPERTY(QString type READ type)
Q_PROPERTY(bool required READ required)
Q_PROPERTY(QVariant defaultValue READ defaultValue)
public:
QString name, type;
bool required = false;
QVariant defaultValue;
};
class KIToolSpec
{
Q_GADGET
Q_PROPERTY(QString name READ name)
Q_PROPERTY(QString description READ description)
Q_PROPERTY(QList<KIToolParam> params READ params)
public:
QString name, description;
QList<KIToolParam> params;
};
class KIToolCall
{
Q_GADGET
Q_PROPERTY(QString name READ name)
Q_PROPERTY(QVariantMap arguments READ arguments)
public:
QString name;
QVariantMap arguments;
};
class KIToolResult
{
Q_GADGET
Q_PROPERTY(QString name READ name)
Q_PROPERTY(QVariant result READ result)
public:
QString name;
QVariant result;
};
}

View File

@ -1,10 +0,0 @@
// SPDX-FileCopyrightText: 2023-2024 Laurent Montel <montel.org>
//
// SPDX-License-Identifier: GPL-2.0-only OR GPL-3.0-only OR LicenseRef-KDE-Accepted-GPL
#include "KLLMConstants.h"
QUrl KLLMCore::ollamaUrl()
{
return QUrl(QStringLiteral("http://127.0.0.1:11434"));
}

View File

@ -1,16 +0,0 @@
// SPDX-FileCopyrightText: 2023-2024 Laurent Montel <montel.org>
//
// SPDX-License-Identifier: GPL-2.0-only OR GPL-3.0-only OR LicenseRef-KDE-Accepted-GPL
#pragma once
#include "kllmcore_export.h"
#include <QUrl>
namespace KLLMCore
{
/**
* @brief Return default Ollama Url
* @return default Ollama Url.
*/
[[nodiscard]] KLLMCORE_EXPORT QUrl ollamaUrl();
}

View File

@ -1,31 +0,0 @@
// SPDX-FileCopyrightText: 2023 Klarälvdalens Datakonsult AB, a KDAB Group company <info@kdab.com>
//
// SPDX-License-Identifier: GPL-2.0-only OR GPL-3.0-only OR LicenseRef-KDE-Accepted-GPL
#include "KLLMContext.h"
#include <QJsonValue>
using namespace KLLMCore;
QJsonValue KLLMContext::toJson() const
{
switch (m_backend) {
case Backend::Ollama:
return m_data.value<QJsonArray>();
default:
return {};
}
}
void KLLMContext::setOllamaContext(const QJsonArray &context)
{
m_data = context;
m_backend = Backend::Ollama;
}
QDebug operator<<(QDebug d, const KLLMContext &t)
{
d << "m_data " << t.toJson();
return d;
}

View File

@ -1,48 +0,0 @@
// SPDX-FileCopyrightText: 2023 Loren Burkholder <computersemiexpert@outlook.com>
// SPDX-FileCopyrightText: 2023 Klarälvdalens Datakonsult AB, a KDAB Group company <info@kdab.com>
//
// SPDX-License-Identifier: GPL-2.0-only OR GPL-3.0-only OR LicenseRef-KDE-Accepted-GPL
#pragma once
#include "kllmcore_export.h"
#include <QDebug>
#include <QJsonArray>
namespace KLLMCore
{
/**
* @brief KLLMContext provides a representation of a conversation context.
*
* Most, if not all, LLMs have the concept of "context". This allows them to refer to previous messages in a conversation to
* enhance their replies. In most scenarios, this is the preferred behavior.
*
* To use KLLMContext, you simply need to get the context from each KLLMReply and set it on the next KLLMReqeust that you
* send. KLLMInterface will use this in KLLMInterface::getCompletion().
*/
struct KLLMCORE_EXPORT KLLMContext {
/**
* @brief Converts the context to a JSON representation.
*
* Different LLM backends represent context in different ways; for example, while Ollama represents context as an array
* of integer identifiers, OpenAI relies on a JSON array of all the messages in the conversation so far. Therefore, this
* function exists to take any representation set on it for any backend and convert it to a JSON value suitable for
* sending in a request.
*
* @return A JSON representation of the context.
*/
[[nodiscard]] QJsonValue toJson() const;
/**
* @brief Sets an Ollama context as the current context.
* @param context The context from Ollama.
*/
void setOllamaContext(const QJsonArray &context);
private:
enum class Backend {
Ollama,
} m_backend;
QVariant m_data;
};
}
KLLMCORE_EXPORT QDebug operator<<(QDebug d, const KLLMCore::KLLMContext &t);

View File

@ -1,126 +0,0 @@
#include "KLLMInterface.h"
#include <KLocalizedString>
using namespace Qt::StringLiterals;
using namespace KLLMCore;
KLLMInterface::KLLMInterface(QObject *parent)
: KLLMInterface{QString{}, parent}
{
}
KLLMInterface::KLLMInterface(const QString &ollamaUrl, QObject *parent)
: KLLMOriginalInterface{ollamaUrl, parent}
, m_kompanionClient{new KompanionAI::KIClient(this)}
, m_ollamaProvider{new KompanionAI::OllamaProvider(this)}
{
m_kompanionClient->setProvider(m_ollamaProvider);
setOllamaUrl(ollamaUrl);
}
KLLMInterface::KLLMInterface(const QUrl &ollamaUrl, QObject *parent)
: KLLMInterface{ollamaUrl.toString(), parent}
{
}
bool KLLMInterface::ready() const
{
// For now, assume ready if provider is set and has models
return m_kompanionClient->provider() != nullptr && !m_ollamaProvider->models().isEmpty();
}
bool KLLMInterface::hasError() const
{
// TODO: Implement proper error checking from KompanionAI client
return false;
}
QStringList KLLMInterface::models() const
{
return m_ollamaProvider->models();
}
KLLMReply *KLLMInterface::getCompletion(const KLLMRequest &request)
{
KompanionAI::KIThread thread;
KompanionAI::KIMessage message;
message.role = "user";
KompanionAI::KIMessagePart part;
part.mime = "text/plain";
part.text = request.message();
message.parts.append(part);
thread.messages.append(message);
KompanionAI::KIChatOptions opts;
opts.model = request.model().isEmpty() ? m_kompanionClient->defaultModel() : request.model();
// TODO: Map KLLMContext to KompanionAI thread/options
auto kiReply = m_kompanionClient->chat(thread, opts).result();
auto kllmReply = new KLLMReply(this);
connect(kiReply, &KompanionAI::KIReply::tokensAdded, kllmReply, [kllmReply](const QString& delta) {
kllmReply->addContent(delta);
});
connect(kiReply, &KompanionAI::KIReply::finished, kllmReply, [kllmReply]() {
kllmReply->setFinished(true);
});
connect(kiReply, &KompanionAI::KIReply::errorOccurred, kllmReply, [kllmReply](const KompanionAI::KIError& error) {
kllmReply->setError(error.message);
});
return kllmReply;
}
KLLMReply *KLLMInterface::getModelInfo(const KLLMRequest &request)
{
// This is a simplified implementation as KompanionAI does not have a direct getModelInfo.
// We will return a KLLMReply with the model name if it exists in the provider's models.
auto kllmReply = new KLLMReply(this);
if (m_ollamaProvider->models().contains(request.model())) {
kllmReply->addContent(QString("{ \"model\": \"%1\" }").arg(request.model()));
} else {
kllmReply->setError(i18n("Model %1 not found.", request.model()));
}
kllmReply->setFinished(true);
return kllmReply;
}
void KLLMInterface::reload()
{
m_ollamaProvider->reload();
}
QString KLLMInterface::ollamaUrl() const
{
// Ollama URL is managed by the OllamaProvider internally in KompanionAI
// For compatibility, we return an empty string or a placeholder.
return QString();
}
void KLLMInterface::setOllamaUrl(const QString &ollamaUrl)
{
// In KompanionAI, the Ollama URL is configured directly on the OllamaProvider.
// For compatibility, we can re-instantiate the provider or update its internal URL.
// For now, we'll just set the default model if the URL is treated as a model name.
// This needs proper handling if the URL is truly meant for provider configuration.
m_kompanionClient->setDefaultModel(ollamaUrl);
}
void KLLMInterface::setOllamaUrl(const QUrl &ollamaUrl)
{
setOllamaUrl(ollamaUrl.toString());
}
QString KLLMInterface::systemPrompt() const
{
// TODO: Extract system prompt from KompanionAI thread if available
return QString();
}
void KLLMInterface::setSystemPrompt(const QString &systemPrompt)
{
// TODO: Set system prompt in KompanionAI thread
}

View File

@ -1,43 +0,0 @@
#ifndef KLLMINTERFACE_H
#define KLLMINTERFACE_H
#include "KLLMOriginalInterface.h"
#include "KompanionAI/Client/KIClient.h"
#include "KompanionAI/Provider/OllamaProvider.h"
namespace KLLMCore {
class KLLMInterface : public KLLMOriginalInterface
{
Q_OBJECT
public:
explicit KLLMCompatInterface(QObject *parent = nullptr);
explicit KLLMCompatInterface(const QString &ollamaUrl, QObject *parent = nullptr);
explicit KLLMCompatInterface(const QUrl &ollamaUrl, QObject *parent = nullptr);
bool ready() const override;
bool hasError() const override;
QStringList models() const override;
KLLMReply *getCompletion(const KLLMRequest &request) override;
KLLMReply *getModelInfo(const KLLMRequest &request) override;
void reload() override;
QString ollamaUrl() const override;
void setOllamaUrl(const QString &ollamaUrl) override;
void setOllamaUrl(const QUrl &ollamaUrl) override;
QString systemPrompt() const override;
void setSystemPrompt(const QString &systemPrompt) override;
private:
KompanionAI::KIClient* m_kompanionClient;
KompanionAI::OllamaProvider* m_ollamaProvider;
};
} // namespace KLLMCore
#endif // KLLMCOMPAT_H

View File

@ -1,190 +0,0 @@
// SPDX-FileCopyrightText: 2023 Loren Burkholder <computersemiexpert@outlook.com>
// SPDX-FileCopyrightText: 2023 Klarälvdalens Datakonsult AB, a KDAB Group company <info@kdab.com>
//
// SPDX-License-Identifier: GPL-2.0-only OR GPL-3.0-only OR LicenseRef-KDE-Accepted-GPL
#include "KLLMInterface.h"
#include <KLocalizedString>
#include <QBuffer>
#include <QJsonDocument>
#include <QJsonObject>
#include <QNetworkAccessManager>
#include <QNetworkReply>
using namespace Qt::StringLiterals;
using namespace KLLMCore;
KLLMInterface::KLLMInterface(QObject *parent)
: KLLMInterface{QString{}, parent}
{
}
KLLMInterface::KLLMInterface(const QString &ollamaUrl, QObject *parent)
: QObject{parent}
, m_manager{new QNetworkAccessManager{this}}
, m_ollamaUrl{ollamaUrl}
{
if (!m_ollamaUrl.isEmpty())
reload();
}
KLLMInterface::KLLMInterface(const QUrl &ollamaUrl, QObject *parent)
: KLLMInterface{ollamaUrl.toString(), parent}
{
}
bool KLLMInterface::ready() const
{
return m_ready && !m_hasError;
}
bool KLLMInterface::hasError() const
{
return m_hasError;
}
QStringList KLLMInterface::models() const
{
return m_models;
}
#if 0
void KLLMInterface::deleteModel(const QString &modelName)
{
Q_ASSERT(ready());
QNetworkRequest req{QUrl::fromUserInput(m_ollamaUrl + QStringLiteral("/api/delete"))};
req.setHeader(QNetworkRequest::ContentTypeHeader, QStringLiteral("application/json"));
QJsonObject data;
data["name"_L1] = modelName;
// Delete resource doesn't take argument. Need to look at how to do it.
auto buf = new QBuffer{this};
buf->setData(QJsonDocument(data).toJson(QJsonDocument::Compact));
auto reply = new KLLMReply{m_manager->deleteResource(req, buf), this};
connect(reply, &KLLMReply::finished, this, [this, reply, buf] {
Q_EMIT finished(reply->readResponse());
buf->deleteLater();
});
}
#endif
KLLMReply *KLLMInterface::getCompletion(const KLLMRequest &request)
{
Q_ASSERT(ready());
QNetworkRequest req{QUrl::fromUserInput(m_ollamaUrl + QStringLiteral("/api/generate"))};
req.setHeader(QNetworkRequest::ContentTypeHeader, QStringLiteral("application/json"));
QJsonObject data;
data["model"_L1] = request.model().isEmpty() ? m_models.constFirst() : request.model();
data["prompt"_L1] = request.message();
const auto context = request.context().toJson();
if (!context.isNull()) {
data["context"_L1] = context;
}
if (!m_systemPrompt.isEmpty()) {
data["system"_L1] = m_systemPrompt;
}
auto buf = new QBuffer{this};
buf->setData(QJsonDocument(data).toJson(QJsonDocument::Compact));
auto reply = new KLLMReply{m_manager->post(req, buf), this};
connect(reply, &KLLMReply::finished, this, [this, reply, buf] {
Q_EMIT finished(reply->readResponse());
buf->deleteLater();
});
return reply;
}
KLLMReply *KLLMInterface::getModelInfo(const KLLMRequest &request)
{
Q_ASSERT(ready());
QNetworkRequest req{QUrl::fromUserInput(m_ollamaUrl + QStringLiteral("/api/show"))};
req.setHeader(QNetworkRequest::ContentTypeHeader, QStringLiteral("application/json"));
QJsonObject data;
data["model"_L1] = request.model().isEmpty() ? m_models.constFirst() : request.model();
auto buf = new QBuffer{this};
buf->setData(QJsonDocument(data).toJson(QJsonDocument::Compact));
auto reply = new KLLMReply{m_manager->post(req, buf), this, KLLMReply::RequestTypes::Show};
connect(reply, &KLLMReply::finished, this, [this, reply, buf] {
Q_EMIT finished(reply->readResponse());
buf->deleteLater();
});
return reply;
}
void KLLMInterface::reload()
{
if (m_ollamaCheck)
disconnect(m_ollamaCheck);
QNetworkRequest req{QUrl::fromUserInput(m_ollamaUrl + QStringLiteral("/api/tags"))};
req.setHeader(QNetworkRequest::ContentTypeHeader, QStringLiteral("application/json"));
auto rep = m_manager->get(req);
m_ollamaCheck = connect(rep, &QNetworkReply::finished, this, [this, rep] {
if (rep->error() != QNetworkReply::NoError) {
Q_EMIT errorOccurred(i18n("Failed to connect to interface at %1: %2", m_ollamaUrl, rep->errorString()));
m_hasError = true;
Q_EMIT readyChanged();
Q_EMIT hasErrorChanged();
return;
}
const auto json = QJsonDocument::fromJson(rep->readAll());
const auto models = json["models"_L1].toArray();
for (const QJsonValue &model : models) {
m_models.push_back(model["name"_L1].toString());
}
Q_EMIT modelsChanged();
m_ready = !m_models.isEmpty();
m_hasError = false;
Q_EMIT readyChanged();
Q_EMIT hasErrorChanged();
});
}
QString KLLMInterface::ollamaUrl() const
{
return m_ollamaUrl;
}
void KLLMInterface::setOllamaUrl(const QString &ollamaUrl)
{
if (m_ollamaUrl == ollamaUrl)
return;
m_ollamaUrl = ollamaUrl;
Q_EMIT ollamaUrlChanged();
reload();
}
void KLLMInterface::setOllamaUrl(const QUrl &ollamaUrl)
{
setOllamaUrl(ollamaUrl.toString());
}
QString KLLMInterface::systemPrompt() const
{
return m_systemPrompt;
}
void KLLMInterface::setSystemPrompt(const QString &systemPrompt)
{
if (m_systemPrompt == systemPrompt)
return;
m_systemPrompt = systemPrompt;
Q_EMIT systemPromptChanged();
}
#include "moc_KLLMInterface.cpp"

View File

@ -1,192 +0,0 @@
// SPDX-FileCopyrightText: 2023 Loren Burkholder <computersemiexpert@outlook.com>
// SPDX-FileCopyrightText: 2023 Klarälvdalens Datakonsult AB, a KDAB Group company <info@kdab.com>
//
// SPDX-License-Identifier: GPL-2.0-only OR GPL-3.0-only OR LicenseRef-KDE-Accepted-GPL
#pragma once
#include "kllmcore_export.h"
#include "KLLMReply.h"
#include "KLLMRequest.h"
class QNetworkAccessManager;
namespace KLLMCore
{
/**
* @brief The KLLMInterface class provides an interface around the LLM API.
*
* KLLM is designed to feel similar to Qt Network. This means that all LLM requests must be routed through a central
* KLLMInterface object.
*
* To request a message or completion from the LLM, first create a KLLMRequest object with the desired message. Choose the
* model from models() that you wish to use for this request and set it on the KLLMRequest. Then call getCompletion(),
* passing your KLLMRequest object. You will recieve a KLLMReply object; connect to KLLMReply::contentAdded() if you wish to
* receive content updates as they arrive from the server or connect to KLLMReply::finished() if you prefer to have the whole
* message delivered at once.
*
* You should not request any completions (or otherwise use this class) until ready() returns true. Using the interface
* before it is ready can cause problems; for example, the interface may not have loaded the available models yet or the
* backend could be unreachable.
*/
class KLLMCORE_EXPORT KLLMInterface : public QObject
{
Q_OBJECT
Q_PROPERTY(bool ready READ ready NOTIFY readyChanged FINAL)
Q_PROPERTY(bool hasError READ hasError NOTIFY hasErrorChanged FINAL)
Q_PROPERTY(QStringList models READ models NOTIFY modelsChanged FINAL)
Q_PROPERTY(QString ollamaUrl READ ollamaUrl WRITE setOllamaUrl NOTIFY ollamaUrlChanged FINAL)
Q_PROPERTY(QString systemPrompt READ systemPrompt WRITE setSystemPrompt NOTIFY systemPromptChanged FINAL)
public:
/**
* @brief Creates a KLLMInterface.
* @param parent The parent QObject.
*/
explicit KLLMInterface(QObject *parent = nullptr);
/**
* @brief Creates a KLLMInterface with the url set to \a ollamaUrl.
* @param ollamaUrl The URL to the Ollama instance.
* @param parent The parent QObject.
*/
explicit KLLMInterface(const QString &ollamaUrl, QObject *parent = nullptr);
/**
* @brief Creates a KLLMInterface with the url set to \a ollamaUrl.
* @param ollamaUrl The URL to the Ollama instance.
* @param parent The parent QObject.
*/
explicit KLLMInterface(const QUrl &ollamaUrl, QObject *parent = nullptr);
/**
* @brief Check whether the interface is ready.
*
* You should not use the interface until ready() returns true. Failure to observe this rule may result in undefined behavior.
*
* If the interface encounters an error, ready() will return false. However, do not use ready() to indicate to the user that the interface is in an error
* state, as the interface could be in the process of making its initial connection. Instead, you should use hasError() to check for an error state.
* Additionally, you should connect to errorOccurred() to handle errors as they arise.
*
* @return Returns whether the interface is ready.
*/
[[nodiscard]] bool ready() const;
/**
* @brief Check whether the interface is in an error state.
*
* After you handle an error from errorEmitted(), you should monitor this property. When it becomes \c false, you can safely resume operations.
*
* @return Returns whether the interface is in an error state.
*/
[[nodiscard]] bool hasError() const;
/**
* @brief Retrieve a list of models supported by the LLM backend.
*
* When creating a KLLMRequest, you should choose a model from this list for the request. If you do not specify a model,
* the request will probably fail.
*
* @return Returns a QStringList containing all valid models for this interface.
*/
[[nodiscard]] QStringList models() const;
/**
* @brief Get the URL to the Ollama instance.
* @return The URL for the Ollama instance.
*/
[[nodiscard]] QString ollamaUrl() const;
/**
* @brief Set the URL to the Ollama instance.
*
* Since Ollama is a self-hostable service, users may wish to use different instances. Use this function to set the URL to the desired instance. It should
* \a not contain the \c /api portion of the URL.
*
* @param ollamaUrl The new URL for the Ollama instance.
*/
void setOllamaUrl(const QString &ollamaUrl);
/**
* @brief A convenience overload of setOllamaUrl() that takes a QUrl.
* @param ollamaUrl The new URL for the Ollama instance.
*/
void setOllamaUrl(const QUrl &ollamaUrl);
/**
* @brief Get the system prompt for the LLM.
* @return The system prompt string.
*/
[[nodiscard]] QString systemPrompt() const;
/**
* @brief Set the system prompt for the LLM.
*
* LLMs can take system prompts that instruct them on how they should generally behave in a conversation. This could be anything from how they speak to what
* types of information they prefer to present. You can set a system prompt here to better cater to your users.
*
* @param systemPrompt The system prompt for the LLM.
*/
void setSystemPrompt(const QString &systemPrompt);
public Q_SLOTS:
/**
* @brief Request a completion from the LLM.
*
* Calling this function starts a request to the LLM backend. You should use the returned KLLMReply pointer to track the
* status of the LLM's response. Once the KLLMReply emits KLLMReply::finished(), it is your responsibility to either
* track or delete the KLLMReply; auto-deleting is not implemented yet.
*
* @param request The request object that will be used to create the actual LLM request.
* @return Returns a pointer to a KLLMReply that can be used to track the progress of the reply.
*/
KLLMReply *getCompletion(const KLLMRequest &request);
/**
* @brief Request model info from Ollama.
*
* Calling this function starts a request to the LLM backend. You should use the returned KLLMReply pointer to track the
* status of the LLM's response. Once the KLLMReply emits KLLMReply::finished(), it is your responsibility to either
* track or delete the KLLMReply; auto-deleting is not implemented yet.
*
* @param request The request object that will be used to create the actual LLM request.
* @return Returns a pointer to a KLLMReply that can be used to track the progress of the reply.
*/
KLLMReply *getModelInfo(const KLLMRequest &request);
/**
* @brief Reload the LLM interface.
*
* Reloading the interface can be used to check if a network error is gone or to see if the available models have changed.
*/
void reload();
Q_SIGNALS:
/**
* @brief This signal is emitted when any completion requested by the interface is completed.
* @param replyText Contains the text of the completion.
*/
void finished(const QString &replyText);
void readyChanged();
void hasErrorChanged();
void modelsChanged();
void ollamaUrlChanged();
void systemPromptChanged();
/**
* @brief An error occurred while communicating with the interface.
* @param message Contains the human readable error message.
*/
void errorOccurred(const QString &message);
private:
QNetworkAccessManager *const m_manager;
QStringList m_models;
bool m_ready = false;
bool m_hasError = false;
QString m_ollamaUrl;
QString m_systemPrompt;
QMetaObject::Connection m_ollamaCheck;
};
}

View File

@ -1,106 +0,0 @@
// SPDX-FileCopyrightText: 2023 Loren Burkholder <computersemiexpert@outlook.com>
// SPDX-FileCopyrightText: 2023 Klarälvdalens Datakonsult AB, a KDAB Group company <info@kdab.com>
//
// SPDX-License-Identifier: GPL-2.0-only OR GPL-3.0-only OR LicenseRef-KDE-Accepted-GPL
#include "KLLMReply.h"
#include "kllmcore_debug.h"
#include <QNetworkReply>
using namespace Qt::StringLiterals;
using namespace KLLMCore;
KLLMReply::KLLMReply(QNetworkReply *netReply, QObject *parent, RequestTypes requestType)
: QObject{parent}
, m_reply{netReply}
, m_requestType{requestType}
{
connect(m_reply, &QNetworkReply::finished, m_reply, [this] {
// Normally, we could assume that the tokens will never be empty once the request finishes, but it could be possible
// that the request failed and we have no tokens to parse.
if (m_requestType == RequestTypes::StreamingGenerate && !m_tokens.empty()) {
const auto finalResponse = m_tokens.constLast();
m_context.setOllamaContext(finalResponse["context"_L1].toArray());
m_info.totalDuration = std::chrono::nanoseconds{finalResponse["total_duration"_L1].toVariant().toULongLong()};
m_info.loadDuration = std::chrono::nanoseconds{finalResponse["load_duration"_L1].toVariant().toULongLong()};
m_info.promptEvalTokenCount = finalResponse["prompt_eval_count"_L1].toVariant().toULongLong();
m_info.promptEvalDuration = std::chrono::nanoseconds{finalResponse["prompt_eval_duration"_L1].toVariant().toULongLong()};
m_info.tokenCount = finalResponse["eval_count"_L1].toVariant().toULongLong();
m_info.duration = std::chrono::nanoseconds{finalResponse["eval_duration"_L1].toVariant().toULongLong()};
}
qCDebug(KLLMCORE_LOG) << "Ollama response finished";
m_finished = true;
Q_EMIT finished();
});
connect(m_reply, &QNetworkReply::errorOccurred, m_reply, [](QNetworkReply::NetworkError e) {
qCDebug(KLLMCORE_LOG) << "Ollama HTTP error:" << e;
});
connect(m_reply, &QNetworkReply::downloadProgress, m_reply, [this](qint64 received, qint64 /*total*/) {
m_incompleteTokens += m_reply->read(received - m_receivedSize);
m_receivedSize = received;
switch (m_requestType) {
case RequestTypes::Show:
m_tokens.append(QJsonDocument::fromJson(m_incompleteTokens));
break;
case RequestTypes::StreamingGenerate:
auto completeTokens = m_incompleteTokens.split('\n');
if (completeTokens.size() <= 1) {
return;
}
m_incompleteTokens = completeTokens.last();
completeTokens.removeLast();
m_tokens.reserve(completeTokens.count());
for (const auto &tok : std::as_const(completeTokens)) {
m_tokens.append(QJsonDocument::fromJson(tok));
}
break;
}
Q_EMIT contentAdded();
});
}
QString KLLMReply::readResponse() const
{
QString ret;
switch (m_requestType) {
case RequestTypes::Show:
ret += QString::fromLatin1("## Template: \n```\n") + m_tokens.constFirst()["template"_L1].toString() + QString::fromLatin1("\n```\n");
ret += QString::fromLatin1("## Modelfile: \n```\n") + m_tokens.constFirst()["modelfile"_L1].toString() + QString::fromLatin1("\n```\n");
ret += QString::fromLatin1("## Parameters: \n```\n") + m_tokens.constFirst()["parameters"_L1].toString() + QString::fromLatin1("\n```\n");
ret += QString::fromLatin1("## Details: \n```\n")
+ QString::fromLatin1(QJsonDocument::fromVariant(m_tokens.constFirst()["details"_L1].toVariant()).toJson()) + QString::fromLatin1("\n```\n");
ret += QString::fromLatin1("## Model Info: \n```\n")
+ QString::fromLatin1(QJsonDocument::fromVariant(m_tokens.constFirst()["model_info"_L1].toVariant()).toJson()) + QString::fromLatin1("\n```\n");
break;
case RequestTypes::StreamingGenerate:
for (const auto &tok : m_tokens)
ret += tok["response"_L1].toString();
break;
}
return ret;
}
const KLLMContext &KLLMReply::context() const
{
return m_context;
}
const KLLMReplyInfo &KLLMReply::info() const
{
return m_info;
}
const KLLMReply::RequestTypes &KLLMReply::requestType() const
{
return m_requestType;
}
bool KLLMReply::isFinished() const
{
return m_finished;
}
#include "moc_KLLMReply.cpp"

View File

@ -1,154 +0,0 @@
// SPDX-FileCopyrightText: 2023 Loren Burkholder <computersemiexpert@outlook.com>
// SPDX-FileCopyrightText: 2023 Klarälvdalens Datakonsult AB, a KDAB Group company <info@kdab.com>
//
// SPDX-License-Identifier: GPL-2.0-only OR GPL-3.0-only OR LicenseRef-KDE-Accepted-GPL
#pragma once
#include "kllmcore_export.h"
#include <QJsonDocument>
#include "KLLMContext.h"
class QNetworkReply;
namespace KLLMCore
{
/**
* @brief The KLLMReplyInfo class represents information about a reply from an LLM.
*
* When an LLM generates a completion, the server generally will return some information about the completion, including the
* duration of the completion, the number of tokens received, and the duration of the prompt evaluation. This struct encapsulates such information.
* If any one of these fields is not available, it will be set to its default value.
*/
struct KLLMCORE_EXPORT KLLMReplyInfo {
//! The total time from when the request was received by the server to when the reply was returned.
std::chrono::nanoseconds totalDuration;
//! The time spent loading the model.
std::chrono::nanoseconds loadDuration;
//! The number of tokens in the prompt.
int promptEvalTokenCount;
//! The time spent evaluating the prompt.
std::chrono::nanoseconds promptEvalDuration;
//! The number of tokens in the reply.
int tokenCount;
//! The time spent generating the reply.
std::chrono::nanoseconds duration;
};
/**
* @brief The KLLMReply class represents a reply from an LLM.
*
* Requesting a completion from a KLLMInterface will return a KLLMReply. You can use this to track the progress of the LLM's
* reply.
*
* If you want to stream a reply as it is written in real time, connect to contentAdded() and use readResponse() to retrieve
* the new content. If you prefer to wait for the entire reply before displaying anything, connect to finished(), which will
* only be emitted once the reply is complete.
*/
class KLLMCORE_EXPORT KLLMReply : public QObject
{
Q_OBJECT
public:
/**
* @brief Specifies the request type.
*
* When the class in instantiated the type of request should be specified
*/
enum class RequestTypes {
StreamingGenerate,
Show
};
/**
* @brief Get the current response content.
*
* This function returns what it has recieved of the response so far. Therefore, until finished() is emitted, this
* function may return different values. However, once finished() is emitted, the content is guaranteed to remain
* constant.
*
* @return The content that has been returned so far.
*/
[[nodiscard]] QString readResponse() const;
/**
* @brief Get the context token for this response (if applicable).
*
* Messages sent by most LLMs have a context identifier that allows you to chain messages into a conversation. To create
* such a conversation, you need to take this context object and set it on the next KLLMRequest in the conversation.
* KLLMInterface::getCompletion() will use that context object to continue the message thread.
*
* @return A context object that refers to this response.
*/
const KLLMContext &context() const;
/**
* @brief Get extra information about the reply (if applicable).
*
* This function returns a KLLMReplyInfo object containing information about this reply. If the reply has not finished, the KLLMReplyInfo object will have
* all members set to their default values.
*
* @return Extra information about the reply.
*/
const KLLMReplyInfo &info() const;
/**
* @brief Check whether the reply has finished.
*
* If you need to know if the response has finished changing or if the context has been received yet, call this function.
*
* @return Whether the reply has finished.
*/
[[nodiscard]] bool isFinished() const;
/**
* @brief Get request type.
*
* The request type is set when this object is created.
*
* @return Corresponding request type.
*/
const RequestTypes &requestType() const;
protected:
explicit KLLMReply(QNetworkReply *netReply, QObject *parent = nullptr, RequestTypes requestType = RequestTypes::StreamingGenerate);
friend class KLLMInterface;
Q_SIGNALS:
/**
* @brief Emits when new content has been added to the response.
*
* If you are not streaming the response live, this signal is not of importance to you. However, if you are streaming
* content, when this signal is emitted, you should call readResponse() to update the response that your application
* shows.
*/
void contentAdded();
/**
* @brief Emits when the LLM has finished returning its response.
*
* After this signal has emitted, the content is guaranteed to not change. At this point, you should call readResponse()
* to get the content and then either take ownership of the KLLMReply or delete it, as automatic reply deletion is not
* implemented yet.
*/
void finished();
private:
QNetworkReply *const m_reply;
QByteArray m_incompleteTokens;
QList<QJsonDocument> m_tokens;
KLLMContext m_context;
KLLMReplyInfo m_info;
RequestTypes m_requestType = RequestTypes::StreamingGenerate;
int m_receivedSize = 0;
bool m_finished = false;
};
}

View File

@ -1,49 +0,0 @@
// SPDX-FileCopyrightText: 2023 Loren Burkholder <computersemiexpert@outlook.com>
//
// SPDX-License-Identifier: GPL-2.0-only OR GPL-3.0-only OR LicenseRef-KDE-Accepted-GPL
#include "KLLMRequest.h"
using namespace KLLMCore;
KLLMRequest::KLLMRequest(const QString &message)
: m_message{message}
{
}
QString KLLMRequest::message() const
{
return m_message;
}
const KLLMContext &KLLMRequest::context() const
{
return m_context;
}
QString KLLMRequest::model() const
{
return m_model;
}
void KLLMRequest::setMessage(const QString &message)
{
m_message = message;
}
void KLLMRequest::setContext(const KLLMContext &context)
{
m_context = context;
}
void KLLMRequest::setModel(const QString &model)
{
m_model = model;
}
QDebug operator<<(QDebug d, const KLLMRequest &t)
{
d << "Model: " << t.model();
d << "Message: " << t.message();
d << "Context: " << t.context();
return d;
}

View File

@ -1,80 +0,0 @@
// SPDX-FileCopyrightText: 2023 Loren Burkholder <computersemiexpert@outlook.com>
// SPDX-FileCopyrightText: 2023 Klarälvdalens Datakonsult AB, a KDAB Group company <info@kdab.com>
//
// SPDX-License-Identifier: GPL-2.0-only OR GPL-3.0-only OR LicenseRef-KDE-Accepted-GPL
#pragma once
#include "kllmcore_export.h"
#include "KLLMContext.h"
#include <QDebug>
namespace KLLMCore
{
/**
* @brief The KLLMRequest class contains a representation of a message to an LLM.
*
* Before calling KLLMInterface::getCompletion(), you need to create a KLLMRequest with information about the desired
* completion.
*/
class KLLMCORE_EXPORT KLLMRequest
{
public:
/**
* @brief Creates a KLLMRequest with the message \a message.
* @param message The message that will be sent to the LLM.
* @param parent
*/
explicit KLLMRequest(const QString &message);
/**
* @brief Use this to get the message for the LLM.
* @return Returns the message to be sent to the LLM.
*/
[[nodiscard]] QString message() const;
/**
* @brief Use this to retrive the context for the LLM.
* @return Returns the context object to be sent to the LLM.
*/
const KLLMContext &context() const;
/**
* @brief Gets the LLM model that will be used by the backend.
* @return The name of the model to be used for this request.
*/
[[nodiscard]] QString model() const;
/**
* @brief Sets the message to be sent to the LLM.
* @param message The message text to be sent to the LLM.
*/
void setMessage(const QString &message);
/**
* @brief Sets the conversation context for the LLM.
*
* If you want the LLM to respond in context of previous messages, you should set this to the context returned in the
* previous KLLMReply.
*
* @param context The context object for this request.
*/
void setContext(const KLLMContext &context);
/**
* @brief Sets the model to be used for this request.
*
* You should set this to one of the values returned by KLLMInterface::models(); failure to do so will likely produce an
* error from the backend.
*
* @param model The name of the model to be used for this request.
*/
void setModel(const QString &model);
private:
QString m_message;
KLLMContext m_context;
QString m_model;
};
}
KLLMCORE_EXPORT QDebug operator<<(QDebug d, const KLLMCore::KLLMRequest &t);

29
src/mcp/CMakeLists.txt Normal file
View File

@ -0,0 +1,29 @@
add_library(kom_mcp STATIC
KompanionQtServer.cpp
)
qt_add_resources(kom_mcp kompanion_mcp_resources
PREFIX "/kompanion"
BASE "."
FILES ToolSchemas.json
)
target_link_libraries(kom_mcp PRIVATE
kom_dal
kom_ai
KF6::ConfigCore
Qt6::Core
Qt6::Network
Qt6::McpServer
Qt6::McpCommon
)
target_compile_options(kom_mcp PRIVATE -fexceptions)
target_compile_definitions(kom_mcp PRIVATE
PROJECT_SOURCE_DIR="${CMAKE_SOURCE_DIR}"
KOMPANION_DB_INIT_INSTALL_DIR="${KOMPANION_DB_INIT_INSTALL_DIR}"
)
install(FILES ToolSchemas.json DESTINATION ${KDE_INSTALL_DATADIR}/kompanion/mcp)

View File

@ -112,6 +112,11 @@ inline std::optional<std::string> currentDsnSource() {
} // namespace detail } // namespace detail
// Echos back the request payload
inline std::string echo(const std::string& reqJson) {
return reqJson;
}
// Produces a JSON response summarising project state: memory docs, task table, git status. // Produces a JSON response summarising project state: memory docs, task table, git status.
inline std::string project_snapshot(const std::string& reqJson) { inline std::string project_snapshot(const std::string& reqJson) {
(void)reqJson; (void)reqJson;

View File

@ -14,7 +14,7 @@
#include <unordered_set> #include <unordered_set>
#include <vector> #include <vector>
#include "dal/PgDal.hpp" #include "PgDal.hpp"
namespace Handlers { namespace Handlers {
namespace detail { namespace detail {

View File

@ -69,7 +69,8 @@ KompanionQtServer::KompanionQtServer(const QString &backend, KomMcpServer *logic
return result; return result;
}); });
addRequestHandler([this](const QUuid &, const QMcpCallToolRequest &request, QMcpJSONRPCErrorError *error) { addRequestHandler([this](const QUuid &correlationId, const QMcpCallToolRequest &request, QMcpJSONRPCErrorError *error) {
qDebug() << "KompanionQtServer: Received tool call request:" << correlationId;
QMcpCallToolResult result; QMcpCallToolResult result;
if (!m_logic) { if (!m_logic) {
@ -138,7 +139,9 @@ KompanionQtServer::KompanionQtServer(const QString &backend, KomMcpServer *logic
} }
const QByteArray payload = QJsonDocument(args).toJson(QJsonDocument::Compact); const QByteArray payload = QJsonDocument(args).toJson(QJsonDocument::Compact);
qDebug() << "KompanionQtServer: Dispatching payload:" << payload;
const std::string responseStr = m_logic->dispatch(toolKey, payload.toStdString()); const std::string responseStr = m_logic->dispatch(toolKey, payload.toStdString());
qDebug() << "KompanionQtServer: Received response:" << responseStr.c_str();
const QByteArray jsonBytes = QByteArray::fromStdString(responseStr); const QByteArray jsonBytes = QByteArray::fromStdString(responseStr);
QJsonParseError parseError{}; QJsonParseError parseError{};
@ -158,6 +161,7 @@ KompanionQtServer::KompanionQtServer(const QString &backend, KomMcpServer *logic
QList<QMcpTool> KompanionQtServer::loadToolsFromSchema() const QList<QMcpTool> KompanionQtServer::loadToolsFromSchema() const
{ {
qDebug() << "KompanionQtServer: Loading tools from schema...";
QList<QMcpTool> tools; QList<QMcpTool> tools;
QFile kSchemaResource(":/kompanion/ToolSchemas.json"); QFile kSchemaResource(":/kompanion/ToolSchemas.json");
if (!kSchemaResource.open(QIODevice::ReadOnly)) { if (!kSchemaResource.open(QIODevice::ReadOnly)) {

View File

@ -5,6 +5,7 @@
#include "HandlersMemory.hpp" #include "HandlersMemory.hpp"
inline void register_default_tools(KomMcpServer& server) { inline void register_default_tools(KomMcpServer& server) {
server.registerTool("echo", Handlers::echo);
server.registerTool("kom.memory.v1.save_context", Handlers::save_context); server.registerTool("kom.memory.v1.save_context", Handlers::save_context);
server.registerTool("kom.memory.v1.recall_context", Handlers::recall_context); server.registerTool("kom.memory.v1.recall_context", Handlers::recall_context);
server.registerTool("kom.memory.v1.embed_text", Handlers::embed_text); server.registerTool("kom.memory.v1.embed_text", Handlers::embed_text);

View File

@ -18,6 +18,11 @@ inline std::string json_arr(const std::vector<std::string>& items) {
os << "]"; return os.str(); os << "]"; return os.str();
} }
// `echo` tool: echoes back the input
inline std::string echo_response(const std::string& input) {
return input;
}
// `ping` tool: echoes { ok: true, tools: [...] } // `ping` tool: echoes { ok: true, tools: [...] }
inline std::string ping_response(const std::vector<std::string>& toolNames) { inline std::string ping_response(const std::vector<std::string>& toolNames) {
std::vector<std::string> quoted; quoted.reserve(toolNames.size()); std::vector<std::string> quoted; quoted.reserve(toolNames.size());

View File

@ -24,3 +24,9 @@ target_link_libraries(test_memory_exchange PRIVATE kom_dal)
target_compile_options(test_memory_exchange PRIVATE -fexceptions) target_compile_options(test_memory_exchange PRIVATE -fexceptions)
add_test(NAME mcp_memory_exchange COMMAND test_memory_exchange) add_test(NAME mcp_memory_exchange COMMAND test_memory_exchange)
add_test(
NAME e2e_mcp_test
COMMAND /bin/bash ${CMAKE_CURRENT_SOURCE_DIR}/e2e_mcp_test.sh
WORKING_DIRECTORY ${CMAKE_BINARY_DIR}
)

153
tests/e2e_mcp_test.sh Executable file
View File

@ -0,0 +1,153 @@
#!/usr/bin/env bash
set -euo pipefail
set -x
# --- Test Configuration ---
TEST_DB_NAME="kompanion_autotest"
MCP_SERVER_EXECUTABLE="./bin/kom_mcp"
PROJECT_ROOT_DIR=$(git rev-parse --show-toplevel)
MCP_SERVER_HOST="127.0.0.1"
MCP_SERVER_PORT="8081"
MCP_SERVER_URL="http://${MCP_SERVER_HOST}:${MCP_SERVER_PORT}"
# --- Cleanup Function ---
cleanup() {
echo "--- Cleaning up ---"
if [ -n "$mcp_server_pid" ]; then
kill "$mcp_server_pid" || true
fi
pkill -f kom_mcp || true
sleep 1 # Give the OS time to release the port
netstat -tuln | grep ":${MCP_SERVER_PORT}" || true # Check if port is still in use
psql -v ON_ERROR_STOP=1 -c "DROP DATABASE IF EXISTS \"$TEST_DB_NAME\";" >/dev/null 2>&1
}
trap cleanup EXIT
echo "--- Setting up test environment ---"
echo ">> Initializing test database..."
"${PROJECT_ROOT_DIR}/db/scripts/create-test-db.sh" "$TEST_DB_NAME"
echo ">> Harvesting embeddings..."
export DB_URL="dbname=${TEST_DB_NAME} user=kompanion host=/var/run/postgresql" EMBED_NAMESPACE="dev_knowledge"
python3 "${PROJECT_ROOT_DIR}/tools/ingest_dir.py" "${PROJECT_ROOT_DIR}/tests/test_data" "dev_knowledge"
echo ">> Starting MCP server..."
sleep 2
timeout 10 $MCP_SERVER_EXECUTABLE --backend sse --address "${MCP_SERVER_HOST}:${MCP_SERVER_PORT}" < /dev/null > /dev/null 2>&1 &
mcp_server_pid=$!
mcp_server_pid=$!
sleep 5
ps -ef | grep kom_mcp
# --- API Test Functions ---
send_request() {
local session_id=$1
local payload=$2
curl -s -X POST -H "Content-Type: application/json" -d "$payload" "${MCP_SERVER_URL}/messages?session_id=${session_id}"
}
# --- Running API tests ---
echo "--> Establishing SSE connection..."
SSE_RESPONSE=$(curl -s -N -H "Accept:text/event-stream" "${MCP_SERVER_URL}/sse")
sleep 1 # Give the server time to send the response
SESSION_ID=$(echo "$SSE_RESPONSE" | grep -m 1 -oE 'data: /messages/\?session_id=([a-f0-9-]+)' | cut -d '=' -f 2)
if [ -z "$SESSION_ID" ]; then
echo "Failed to get session ID"
exit 1
fi
echo "Session ID: $SESSION_ID"
# Test upsert_memory
echo "--> Testing upsert_memory..."
UPSERT_PAYLOAD='{
"jsonrpc": "2.0",
"method": "tool",
"params": {
"name": "kom.memory.v1.upsert_memory",
"arguments": {
"auth_token": "dev_knowledge:test-secret",
"namespace": "dev_knowledge",
"items": [
{
"id": "test-item-1",
"text": "This is a test item for upsert_memory.",
"tags": ["test", "upsert"]
}
]
}
}
}'
response=$(send_request "$SESSION_ID" "$UPSERT_PAYLOAD")
echo "$response" | grep '"status":"ok"' > /dev/null || (echo "upsert_memory test failed" && exit 1)
echo "upsert_memory test passed."
# Test search_memory
echo "--> Testing search_memory..."
SEARCH_PAYLOAD='{
"jsonrpc": "2.0",
"method": "tool",
"params": {
"name": "kom.memory.v1.search_memory",
"arguments": {
"auth_token": "dev_knowledge:test-secret",
"namespace": "dev_knowledge",
"query": {
"text": "upsert"
}
}
}
}'
response=$(send_request "$SESSION_ID" "$SEARCH_PAYLOAD")
echo "$response" | grep '"id":"test-item-1"' > /dev/null || (echo "search_memory test failed" && exit 1)
echo "search_memory test passed."
# Test save_context
echo "--> Testing save_context..."
SAVE_CONTEXT_PAYLOAD='{
"jsonrpc": "2.0",
"method": "tool",
"params": {
"name": "kom.memory.v1.save_context",
"arguments": {
"auth_token": "dev_knowledge:test-secret",
"namespace": "dev_knowledge",
"key": "test-context-1",
"content": {
"message": "This is a test context."
},
"tags": ["test", "context"]
}
}
}'
response=$(send_request "$SESSION_ID" "$SAVE_CONTEXT_PAYLOAD")
echo "$response" | grep '"id":' > /dev/null || (echo "save_context test failed" && exit 1)
echo "save_context test passed."
# Test recall_context
echo "--> Testing recall_context..."
RECALL_CONTEXT_PAYLOAD='{
"jsonrpc": "2.0",
"method": "tool",
"params": {
"name": "kom.memory.v1.recall_context",
"arguments": {
"auth_token": "dev_knowledge:test-secret",
"namespace": "dev_knowledge",
"key": "test-context-1"
}
}
}'
response=$(send_request "$SESSION_ID" "$RECALL_CONTEXT_PAYLOAD")
echo "$response" | grep '"key":"test-context-1"' > /dev/null || (echo "recall_context test failed" && exit 1)
echo "recall_context test passed."