pre-task cleanup (reduce log spam)

This commit is contained in:
Rob Colbert 2026-05-08 17:27:04 -04:00
parent 36df6444f1
commit eb37a22771
2 changed files with 57 additions and 108 deletions

View File

@ -36,11 +36,7 @@ export class OllamaAiApi extends AiApi {
}
async listModels(): Promise<IAiModelListResult> {
await this.log.debug("OllamaAiApi.listModels called");
const response = await this.client.list();
await this.log.debug("Ollama list response", { models: response.models });
const models = response.models.map((model) => {
const parameterCount = this.parseParameterCount(
model.details.parameter_size,
@ -58,21 +54,9 @@ export class OllamaAiApi extends AiApi {
}
async probeModel(modelId: string): Promise<IAiModelProbeResult> {
await this.log.debug("OllamaAiApi.probeModel called", { modelId });
const response = await this.client.show({ model: modelId });
await this.log.debug("Ollama show response", {
modelId,
capabilities: response.capabilities,
details: response.details,
modelInfo: response.model_info,
template: response.template,
system: response.system,
});
const capabilities = this.analyzeCapabilities(response, modelId);
const settings = this.extractSettings(response);
return {
capabilities,
settings,
@ -108,7 +92,8 @@ export class OllamaAiApi extends AiApi {
!!modelInfo?.["vision_model"] ||
!!modelInfo?.["clip"],
hasEmbedding: capabilities.includes("embeddings"),
hasThinking: capabilities.includes("thinking") || capabilities.includes("reasoning"),
hasThinking:
capabilities.includes("thinking") || capabilities.includes("reasoning"),
isInstructTuned:
modelId.toLowerCase().includes("instruct") ||
modelId.toLowerCase().includes("chat") ||
@ -164,19 +149,18 @@ export class OllamaAiApi extends AiApi {
let lastChunk;
for await (const chunk of response) {
await this.log.debug("stream chunk received", { chunk });
lastChunk = chunk;
if (streamCallback) {
if (chunk.thinking) {
await streamCallback({
type: 'thinking',
type: "thinking",
data: chunk.thinking,
});
}
if (chunk.response) {
await streamCallback({
type: 'response',
type: "response",
data: chunk.response,
});
}
@ -227,7 +211,7 @@ export class OllamaAiApi extends AiApi {
// Add system prompt if present
if (options.systemPrompt) {
messages.push({
role: 'system',
role: "system",
content: options.systemPrompt,
});
}
@ -236,15 +220,15 @@ export class OllamaAiApi extends AiApi {
if (options.context) {
for (const msg of options.context) {
if (msg.content && msg.content.trim()) {
if (msg.role === 'tool') {
if (msg.role === "tool") {
messages.push({
role: 'tool',
role: "tool",
content: msg.content,
tool_name: msg.toolName,
});
} else {
messages.push({
role: msg.role as 'user' | 'assistant' | 'system',
role: msg.role as "user" | "assistant" | "system",
content: msg.content,
});
}
@ -254,19 +238,24 @@ export class OllamaAiApi extends AiApi {
// Add user prompt (required)
messages.push({
role: 'user',
role: "user",
content: options.userPrompt,
});
// VALIDATE: Ensure messages array is not empty before calling API
if (messages.length === 0) {
throw new Error("Messages array is empty - cannot call Ollama API with no messages");
throw new Error(
"Messages array is empty - cannot call Ollama API with no messages",
);
}
// DEBUG: Log what we're sending to Ollama
await this.log.debug("Ollama chat request", {
messagesCount: messages.length,
messages: messages.map(m => ({ role: m.role, contentLength: m.content?.length || 0 })),
messages: messages.map((m) => ({
role: m.role,
contentLength: m.content?.length || 0,
})),
userPrompt: options.userPrompt?.slice(0, 100),
contextCount: options.context?.length || 0,
});
@ -307,21 +296,20 @@ export class OllamaAiApi extends AiApi {
}> = [];
for await (const chunk of response) {
await this.log.debug("stream chunk received", { chunk });
lastChunk = chunk;
if (streamCallback) {
if (chunk.message.thinking) {
accumulatedThinking += chunk.message.thinking;
await streamCallback({
type: 'thinking',
type: "thinking",
data: chunk.message.thinking,
});
}
if (chunk.message.content) {
accumulatedResponse += chunk.message.content;
await streamCallback({
type: 'response',
type: "response",
data: chunk.message.content,
});
}
@ -341,7 +329,7 @@ export class OllamaAiApi extends AiApi {
allToolCalls.push(toolCall);
await streamCallback({
type: 'toolCall',
type: "toolCall",
data: params,
toolCallId: callId,
toolName: tc.function.name,
@ -371,7 +359,8 @@ export class OllamaAiApi extends AiApi {
response: totalAccumulatedResponse,
thinking: totalAccumulatedThinking,
toolCalls: allToolCalls.length > 0 ? allToolCalls : undefined,
toolCallResults: allToolCallResults.length > 0 ? allToolCallResults : undefined,
toolCallResults:
allToolCallResults.length > 0 ? allToolCallResults : undefined,
stats: {
duration: {
seconds: lastChunk.total_duration,
@ -392,16 +381,6 @@ export class OllamaAiApi extends AiApi {
);
allToolCallResults.push(...toolCallResults);
// DEBUG: Log tool results being added to context
await this.log.debug("tool results ready for context", {
toolCallResults: toolCallResults.map(r => ({
callId: r.callId,
functionName: r.functionName,
resultLength: r.result?.length || 0,
hasError: !!r.error,
})),
});
const assistantMsg: OllamaMessage = {
role: "assistant",
content: accumulatedResponse || lastChunk.message.content,
@ -423,26 +402,11 @@ export class OllamaAiApi extends AiApi {
role: "tool" as const,
content: toolContent,
};
await this.log.debug("adding tool result to messages", {
contentLength: toolMsg.content?.length || 0,
hasContent: !!(toolMsg.content && toolMsg.content.length),
});
messages.push(toolMsg);
}
// DEBUG: Log full messages array before next iteration
await this.log.debug("messages array for next Ollama API call", {
messageCount: messages.length,
messages: messages.map(m => ({
role: m.role,
contentLength: m.content?.length || 0,
tool_name: (m as any).tool_name,
contentPreview: m.content?.slice(0, 200),
})),
});
// VALIDATE: Ensure tool results are in messages
const toolMessages = messages.filter(m => m.role === 'tool');
const toolMessages = messages.filter((m) => m.role === "tool");
if (toolMessages.length === 0 && toolCallResults.length > 0) {
await this.log.error("CRITICAL: tool results NOT in messages array", {
toolCallResultsCount: toolCallResults.length,

View File

@ -69,17 +69,10 @@ export class OpenAiApi extends AiApi {
}
async listModels(): Promise<IAiModelListResult> {
await this.log.debug("OpenAiApi.listModels called");
const response = await this.client.models.list();
await this.log.debug("OpenAI models list response", {
data: response.data,
});
const models = response.data.map((model) => {
const modelInfo = model as unknown as OpenAIModelInfo;
const maxTokens = modelInfo.max_tokens || modelInfo.context_window;
return {
id: model.id,
name: model.id,
@ -93,40 +86,21 @@ export class OpenAiApi extends AiApi {
}
async probeModel(modelId: string): Promise<IAiModelProbeResult> {
await this.log.debug("OpenAiApi.probeModel called", { modelId });
try {
const response = await this.client.models.retrieve(modelId);
const modelInfo = response as unknown as OpenAIModelInfo;
await this.log.debug("OpenAI model retrieve response", {
modelId,
features: modelInfo.features,
supported_methods: modelInfo.supported_methods,
capabilities: modelInfo.capabilities,
});
const capabilities = this.analyzeCapabilities(modelInfo);
return {
capabilities,
settings: undefined,
};
} catch (error) {
await this.log.debug("Failed to retrieve model details, using fallback", {
modelId,
error: (error as Error).message,
});
const listResponse = await this.client.models.list();
const modelFromList = listResponse.data.find((m) => m.id === modelId);
if (modelFromList) {
const modelInfo = modelFromList as unknown as OpenAIModelInfo;
if (modelInfo.capabilities) {
await this.log.debug("Using capabilities from list endpoint", {
modelId,
});
return {
capabilities: this.analyzeCapabilities(modelInfo),
settings: undefined,
@ -204,7 +178,12 @@ export class OpenAiApi extends AiApi {
],
stream: true,
...(typeof model.params.reasoning === "string"
? { reasoning_effort: model.params.reasoning as "low" | "medium" | "high" }
? {
reasoning_effort: model.params.reasoning as
| "low"
| "medium"
| "high",
}
: {}),
});
@ -218,16 +197,16 @@ export class OpenAiApi extends AiApi {
accumulatedResponse += delta.content;
if (streamCallback) {
await streamCallback({
type: 'response',
type: "response",
data: delta.content,
});
}
}
if ('reasoning' in delta && delta.reasoning) {
if ("reasoning" in delta && delta.reasoning) {
accumulatedThinking += delta.reasoning as string;
if (streamCallback) {
await streamCallback({
type: 'thinking',
type: "thinking",
data: delta.reasoning as string,
});
}
@ -276,11 +255,11 @@ export class OpenAiApi extends AiApi {
}
if (options.context) {
for (const msg of options.context) {
if (msg.role === 'tool') {
if (msg.role === "tool") {
messages.push({
role: 'tool',
role: "tool",
content: msg.content,
tool_call_id: msg.callId || '',
tool_call_id: msg.callId || "",
});
} else {
messages.push({
@ -320,7 +299,12 @@ export class OpenAiApi extends AiApi {
tools,
stream: true,
...(typeof model.params.reasoning === "string"
? { reasoning_effort: model.params.reasoning as "low" | "medium" | "high" }
? {
reasoning_effort: model.params.reasoning as
| "low"
| "medium"
| "high",
}
: {}),
});
@ -335,16 +319,16 @@ export class OpenAiApi extends AiApi {
accumulatedResponse += delta.content;
if (streamCallback) {
await streamCallback({
type: 'response',
type: "response",
data: delta.content,
});
}
}
if ('reasoning' in delta && delta.reasoning) {
if ("reasoning" in delta && delta.reasoning) {
accumulatedThinking += delta.reasoning as string;
if (streamCallback) {
await streamCallback({
type: 'thinking',
type: "thinking",
data: delta.reasoning as string,
});
}
@ -363,7 +347,7 @@ export class OpenAiApi extends AiApi {
allToolCalls.push(toolCall);
if (streamCallback) {
await streamCallback({
type: 'toolCall',
type: "toolCall",
data: tc.function.arguments || "",
toolCallId: tc.id,
toolName: tc.function.name,
@ -392,7 +376,8 @@ export class OpenAiApi extends AiApi {
response: accumulatedResponse,
thinking: accumulatedThinking || undefined,
toolCalls: allToolCalls.length > 0 ? allToolCalls : undefined,
toolCallResults: allToolCallResults.length > 0 ? allToolCallResults : undefined,
toolCallResults:
allToolCallResults.length > 0 ? allToolCallResults : undefined,
stats: {
duration: {
seconds: (Date.now() - startTime) / 1000,