metal-kompanion/src/KLLMInterface.cpp

127 lines
3.8 KiB
C++

#include "KLLMInterface.h"
#include <KLocalizedString>
using namespace Qt::StringLiterals;
using namespace KLLMCore;
KLLMInterface::KLLMInterface(QObject *parent)
: KLLMInterface{QString{}, parent}
{
}
KLLMInterface::KLLMInterface(const QString &ollamaUrl, QObject *parent)
: KLLMOriginalInterface{ollamaUrl, parent}
, m_kompanionClient{new KompanionAI::KIClient(this)}
, m_ollamaProvider{new KompanionAI::OllamaProvider(this)}
{
m_kompanionClient->setProvider(m_ollamaProvider);
setOllamaUrl(ollamaUrl);
}
KLLMInterface::KLLMInterface(const QUrl &ollamaUrl, QObject *parent)
: KLLMInterface{ollamaUrl.toString(), parent}
{
}
bool KLLMInterface::ready() const
{
// For now, assume ready if provider is set and has models
return m_kompanionClient->provider() != nullptr && !m_ollamaProvider->models().isEmpty();
}
bool KLLMInterface::hasError() const
{
// TODO: Implement proper error checking from KompanionAI client
return false;
}
QStringList KLLMInterface::models() const
{
return m_ollamaProvider->models();
}
KLLMReply *KLLMInterface::getCompletion(const KLLMRequest &request)
{
KompanionAI::KIThread thread;
KompanionAI::KIMessage message;
message.role = "user";
KompanionAI::KIMessagePart part;
part.mime = "text/plain";
part.text = request.message();
message.parts.append(part);
thread.messages.append(message);
KompanionAI::KIChatOptions opts;
opts.model = request.model().isEmpty() ? m_kompanionClient->defaultModel() : request.model();
// TODO: Map KLLMContext to KompanionAI thread/options
auto kiReply = m_kompanionClient->chat(thread, opts).result();
auto kllmReply = new KLLMReply(this);
connect(kiReply, &KompanionAI::KIReply::tokensAdded, kllmReply, [kllmReply](const QString& delta) {
kllmReply->addContent(delta);
});
connect(kiReply, &KompanionAI::KIReply::finished, kllmReply, [kllmReply]() {
kllmReply->setFinished(true);
});
connect(kiReply, &KompanionAI::KIReply::errorOccurred, kllmReply, [kllmReply](const KompanionAI::KIError& error) {
kllmReply->setError(error.message);
});
return kllmReply;
}
KLLMReply *KLLMInterface::getModelInfo(const KLLMRequest &request)
{
// This is a simplified implementation as KompanionAI does not have a direct getModelInfo.
// We will return a KLLMReply with the model name if it exists in the provider's models.
auto kllmReply = new KLLMReply(this);
if (m_ollamaProvider->models().contains(request.model())) {
kllmReply->addContent(QString("{ \"model\": \"%1\" }").arg(request.model()));
} else {
kllmReply->setError(i18n("Model %1 not found.", request.model()));
}
kllmReply->setFinished(true);
return kllmReply;
}
void KLLMInterface::reload()
{
m_ollamaProvider->reload();
}
QString KLLMInterface::ollamaUrl() const
{
// Ollama URL is managed by the OllamaProvider internally in KompanionAI
// For compatibility, we return an empty string or a placeholder.
return QString();
}
void KLLMInterface::setOllamaUrl(const QString &ollamaUrl)
{
// In KompanionAI, the Ollama URL is configured directly on the OllamaProvider.
// For compatibility, we can re-instantiate the provider or update its internal URL.
// For now, we'll just set the default model if the URL is treated as a model name.
// This needs proper handling if the URL is truly meant for provider configuration.
m_kompanionClient->setDefaultModel(ollamaUrl);
}
void KLLMInterface::setOllamaUrl(const QUrl &ollamaUrl)
{
setOllamaUrl(ollamaUrl.toString());
}
QString KLLMInterface::systemPrompt() const
{
// TODO: Extract system prompt from KompanionAI thread if available
return QString();
}
void KLLMInterface::setSystemPrompt(const QString &systemPrompt)
{
// TODO: Set system prompt in KompanionAI thread
}