mcp: implement warm_cache using libKI embeddings + DAL upserts (chunks + embeddings); returns queued count
This commit is contained in:
parent
d5226fe7d6
commit
a0f5dd8b4f
|
|
@ -716,17 +716,68 @@ inline std::string embed_text(const std::string& reqJson) {
|
|||
}
|
||||
|
||||
/**
|
||||
* warm_cache (stub)
|
||||
* Request: { "namespace": string }
|
||||
* warm_cache
|
||||
* Request: { "namespace": string, "model?": string, "limit?": int }
|
||||
* Response: { "queued": int }
|
||||
*
|
||||
* Implementation: fetches recent items for the namespace, embeds their text via libKI,
|
||||
* creates a single chunk (ord=0) per item and upserts the (chunk, embedding) rows.
|
||||
*/
|
||||
inline std::string warm_cache(const std::string& reqJson) {
|
||||
const std::string nsName = detail::extract_string_field(reqJson, "namespace");
|
||||
if (nsName.empty()) {
|
||||
return detail::error_response("bad_request", "namespace is required");
|
||||
}
|
||||
// For now, just return a dummy successful response
|
||||
return "{\"queued\":0}";
|
||||
std::string model = detail::extract_string_field(reqJson, "model");
|
||||
int limit = 10;
|
||||
if (auto lim = detail::extract_int_field(reqJson, "limit")) { if (*lim > 0) limit = *lim; }
|
||||
|
||||
auto nsRow = detail::database().findNamespace(nsName);
|
||||
if (!nsRow) {
|
||||
return std::string("{\"queued\":0}");
|
||||
}
|
||||
|
||||
// Fetch recent items
|
||||
std::vector<std::string> tags; // empty
|
||||
auto rows = detail::database().fetchContext(nsRow->id, std::nullopt, tags, std::nullopt, limit);
|
||||
if (rows.empty()) {
|
||||
return std::string("{\"queued\":0}");
|
||||
}
|
||||
|
||||
// Collect texts
|
||||
std::vector<std::pair<std::string, std::string>> toEmbed; toEmbed.reserve(rows.size());
|
||||
for (const auto &row : rows) {
|
||||
if (row.text && !row.text->empty()) {
|
||||
toEmbed.emplace_back(row.id, *row.text);
|
||||
}
|
||||
if ((int)toEmbed.size() >= limit) break;
|
||||
}
|
||||
if (toEmbed.empty()) {
|
||||
return std::string("{\"queued\":0}");
|
||||
}
|
||||
|
||||
// libKI
|
||||
KI::KIClient client; KI::OllamaProvider provider; client.setProvider(&provider);
|
||||
KI::KIEmbedOptions opts; if (!model.empty()) opts.model = QString::fromStdString(model);
|
||||
QStringList texts; for (auto &p : toEmbed) texts.push_back(QString::fromStdString(p.second));
|
||||
QEventLoop loop; QFuture<KI::KIEmbeddingResult> fut = client.embed(texts, opts);
|
||||
QFutureWatcher<KI::KIEmbeddingResult> watcher; QObject::connect(&watcher, &QFutureWatcher<KI::KIEmbeddingResult>::finished, &loop, &QEventLoop::quit);
|
||||
watcher.setFuture(fut); loop.exec(); const KI::KIEmbeddingResult result = watcher.result();
|
||||
|
||||
// Persist
|
||||
int persisted = 0; const int n = std::min(result.vectors.size(), (int)toEmbed.size());
|
||||
for (int i = 0; i < n; ++i) {
|
||||
const auto &pair = toEmbed[(size_t)i];
|
||||
ki::ChunkRow chunk; chunk.item_id = pair.first; chunk.ord = 0; chunk.text = pair.second;
|
||||
auto chunkIds = detail::database().upsertChunks(std::vector<ki::ChunkRow>{chunk});
|
||||
if (chunkIds.empty()) continue;
|
||||
const auto &vec = result.vectors[i];
|
||||
ki::EmbeddingRow emb; emb.chunk_id = chunkIds.front(); emb.model = result.model.toStdString(); emb.dim = vec.size();
|
||||
emb.vector.reserve(vec.size()); for (float f : vec) emb.vector.push_back(f);
|
||||
detail::database().upsertEmbeddings(std::vector<ki::EmbeddingRow>{emb});
|
||||
persisted++;
|
||||
}
|
||||
std::ostringstream os; os << "{\"queued\":" << persisted << "}"; return os.str();
|
||||
}
|
||||
|
||||
} // namespace Handlers
|
||||
|
|
|
|||
Loading…
Reference in New Issue