gadget/packages/ai/src/ollama.ts
Rob Colbert f1b5a560a3 documentation updates; AI classes renamed
We now have AiApi, OllamaAiApi, and OpenAiApi. Documentation updates to
provide a bit more high-level clarity that was originally generated by
the agent.
2026-04-28 11:49:21 -04:00

126 lines
3.2 KiB
TypeScript

// src/ollama.ts
// Copyright (C) 2026 Rob Colbert <rob.colbert@openplatform.us>
// Licensed under the Apache License, Version 2.0
import assert from "node:assert";
import { Ollama } from "ollama";
import numeral from "numeral";
import {
AiApi,
IAiChatOptions,
IAiChatResponse,
IAiGenerateOptions,
IAiGenerateResponse,
IAiLogger,
IAiModelConfig,
IAiProvider,
IAiResponseStreamFn,
} from "./api.js";
export class OllamaAiApi extends AiApi {
protected client: Ollama;
constructor(provider: IAiProvider, logger?: IAiLogger) {
super(provider, logger);
this.client = new Ollama({
host: this.provider.baseUrl,
headers: { Authorization: `Bearer ${this.provider.apiKey}` },
});
}
async listModels(): Promise<void> {
await this.log.debug("OllamaAiApi.listModels called");
}
async probeModel(modelId: string): Promise<void> {
await this.log.debug("OllamaAiApi.probeModel called", { modelId });
}
async generate(
model: IAiModelConfig,
options: IAiGenerateOptions,
streamCallback?: IAiResponseStreamFn,
): Promise<IAiGenerateResponse> {
await this.log.debug("OllamaAiApi.generate called", {
provider: model.provider.name,
modelId: model.modelId,
});
const response = await this.client.generate({
model: model.modelId,
prompt: options.prompt,
system: options.systemPrompt,
stream: true,
});
let lastChunk;
for await (const chunk of response) {
await this.log.debug("stream chunk received", { chunk });
lastChunk = chunk;
}
assert(lastChunk, "no stream response chunks received");
return {
done: lastChunk.done,
doneReason: lastChunk.done_reason,
response: lastChunk.response,
thinking: lastChunk.thinking,
stats: {
duration: {
seconds: lastChunk.total_duration,
text: numeral(lastChunk.total_duration).format("hh:mm:ss"),
},
tokenCounts: {
input: lastChunk.prompt_eval_count,
response: lastChunk.eval_count,
thinking: 0,
},
},
};
}
async chat(
model: IAiModelConfig,
options: IAiChatOptions,
streamCallback?: IAiResponseStreamFn,
): Promise<IAiChatResponse> {
await this.log.debug("OllamaAiApi.chat called", {
provider: model.provider.name,
modelId: model.modelId,
});
const response = await this.client.chat({
model: model.modelId,
messages: options.context,
stream: true,
think: model.params.reasoning,
});
let lastChunk;
for await (const chunk of response) {
await this.log.debug("stream chunk received", { chunk });
lastChunk = chunk;
}
assert(lastChunk, "no response chunks received");
return {
done: lastChunk.done,
doneReason: lastChunk.done_reason,
response: lastChunk.message.content,
thinking: lastChunk.message.thinking,
stats: {
duration: {
seconds: lastChunk.total_duration,
text: numeral(lastChunk.total_duration).format("hh:mm:ss"),
},
tokenCounts: {
input: lastChunk.prompt_eval_count,
response: lastChunk.eval_count,
thinking: 0,
},
},
};
}
}