more refactoring to standardize interfaces

We want to speak only one language when dealing with AI content to
minimize the number of maps, transforms, and copies. This initiative
isn't done, this is a checkpoint along the way while conducting
experiments.
This commit is contained in:
Rob Colbert 2026-04-29 11:47:28 -04:00
parent 096d8fe8b3
commit 9cb689668f
4 changed files with 112 additions and 77 deletions

View File

@ -7,12 +7,18 @@ import { Types, Schema, model } from "mongoose";
import { import {
ChatSessionMode, ChatSessionMode,
ChatTurnStatus, ChatTurnStatus,
IChatTurnPrompts,
IChatSubagentProcess, IChatSubagentProcess,
IChatToolCall, IChatToolCall,
IChatTurn, IChatTurn,
IChatTurnStats, IChatTurnStats,
} from "@gadget/api"; } from "@gadget/api";
export const ChatTurnPromptsSchema = new Schema<IChatTurnPrompts>({
user: { type: String, required: true },
system: { type: String, required: false },
});
export const ChatTurnStatsSchema = new Schema<IChatTurnStats>({ export const ChatTurnStatsSchema = new Schema<IChatTurnStats>({
toolCallCount: { type: Number, default: 0, required: true }, toolCallCount: { type: Number, default: 0, required: true },
inputTokens: { type: Number, default: 0, required: true }, inputTokens: { type: Number, default: 0, required: true },
@ -55,7 +61,7 @@ export const ChatTurnSchema = new Schema<IChatTurn>({
default: ChatTurnStatus.Processing, default: ChatTurnStatus.Processing,
required: true, required: true,
}, },
prompt: { type: String, required: true }, prompts: { type: ChatTurnPromptsSchema, required: true },
thinking: { type: String, required: false }, thinking: { type: String, required: false },
response: { type: String, required: false }, response: { type: String, required: false },
toolCalls: { type: [ChatToolCallSchema], default: [], required: true }, toolCalls: { type: [ChatToolCallSchema], default: [], required: true },

View File

@ -2,25 +2,17 @@
// Copyright (C) 2026 Rob Colbert <rob.colbert@openplatform.us> // Copyright (C) 2026 Rob Colbert <rob.colbert@openplatform.us>
// Licensed under the Apache License, Version 2.0 // Licensed under the Apache License, Version 2.0
import assert from "node:assert"; import {
import path from "node:path"; IAiChatOptions,
import fs from "node:fs"; type IAiProvider,
type IContextChatMessage,
import dayjs from "dayjs"; } from "@gadget/ai";
import { IChatSession, IChatTurn, IProject, IUser } from "@gadget/api";
import { type IAiProvider, type IContextChatMessage } from "@gadget/ai";
import AiService from "./ai.ts"; import AiService from "./ai.ts";
import { GadgetService } from "../lib/service.ts"; import { GadgetService } from "../lib/service.ts";
export interface IProject {
_id: string;
name: string;
slug: string;
gitUrl: string;
}
export interface IToolCall { export interface IToolCall {
name: string; name: string;
params: string; params: string;
@ -29,31 +21,12 @@ export interface IToolCall {
error?: Error; error?: Error;
} }
export interface IChatMessage {
role: string;
content: string;
}
export interface IChatTurn {
_id: string;
mode: string;
modelId?: string;
prompts: {
system: string;
user: string;
};
}
export interface IChatSession {
_id: string;
name: string;
context: IContextChatMessage[];
}
export interface IAgentWorkOrder { export interface IAgentWorkOrder {
createdAt: Date;
project: IProject; project: IProject;
provider: IAiProvider; provider: IAiProvider;
session: IChatSession; session: IChatSession;
context: IChatTurn[];
turn: IChatTurn; turn: IChatTurn;
} }
@ -80,20 +53,9 @@ class AgentService extends GadgetService {
return "[all tool calls are stubbed out]"; return "[all tool calls are stubbed out]";
} }
// this turn's context (system, history, prompt, work)
const messages: IChatMessage[] = [];
messages.push({ role: "system", content: turn.prompts.system });
// recall full session history into messages array
this.buildSessionContext(session, messages);
// push the User's latest prompt to the context
messages.push({ role: "user", content: workOrder.turn.prompts.user });
const modelConfig = { const modelConfig = {
provider: workOrder.provider, provider: workOrder.provider,
modelId: workOrder.turn.modelId ?? workOrder.provider.defaultModelId ?? "llama3.2", modelId: workOrder.turn.llm,
params: { params: {
reasoning: false, reasoning: false,
temperature: 0.8, temperature: 0.8,
@ -102,42 +64,102 @@ class AgentService extends GadgetService {
}, },
}; };
const chatOptions = { const context = this.buildSessionContext(workOrder);
context: session.context, const chatOptions: IAiChatOptions = {
systemPrompt: workOrder.turn.prompts.system,
context,
userPrompt: workOrder.turn.prompts.user,
}; };
let keepProcessing = true; let keepProcessing = true;
do { do {
const response = await AiService.chat(workOrder.provider, modelConfig, chatOptions); const response = await AiService.chat(
keepProcessing = (response.tool_calls?.length ?? 0) > 0; workOrder.provider,
for (const tool_call of response.tool_calls ?? []) { modelConfig,
const result = await aiCallTool(tool_call.function.name, tool_call.function.arguments); chatOptions,
messages.push({ role: "tool", content: result }); );
keepProcessing = (response.toolCalls?.length ?? 0) > 0;
for (const toolCall of response.toolCalls ?? []) {
const result = await aiCallTool(
toolCall.function.name,
toolCall.function.arguments,
);
context.push({
createdAt: new Date(),
role: "tool",
callId: toolCall.callId,
content: result,
});
/* emit turn-tool-call socket message */ /* emit turn-tool-call socket message */
} }
} while (keepProcessing); } while (keepProcessing);
/* emit turn-finished socket message */ /*
* TODO:
* 1. Call web service to POST results to the work order
* 2. Emit turn-finished socket message
*/
} }
buildSessionContext(session: IChatSession, messages: IChatMessage[]): void { buildSessionContext(workOrder: IAgentWorkOrder): IContextChatMessage[] {
let content; const user: IUser = workOrder.session.user as IUser;
for (const message of session.context) { const messages: IContextChatMessage[] = [];
switch (message.role) {
case "system": for (const turn of workOrder.context) {
continue; /*
case "user": * add the User message
content = message.content; */
break; messages.push({
case "assistant": createdAt: turn.createdAt,
content = message.content; role: "user",
break; content: turn.prompts.user,
case "tool": user: {
content = message.content; _id: user._id.toHexString(),
break; username: user.email,
displayName: user.displayName,
},
});
/*
* Add the agent's responses (thinking, respone text, tool calls)
*/
if (turn.toolCalls?.length > 0) {
for (const toolCall of turn.toolCalls) {
messages.push({
createdAt: turn.createdAt,
role: "tool",
callId: toolCall.callId,
content: toolCall.response,
});
}
} }
messages.push({ role: message.role, content: message.content });
/*
* Add the assistant's output (if any), to include the thinking
* (reasoning) output (if any).
*/
let content = "";
if (turn.thinking) {
content += `<thinking>${turn.thinking}</thinking>`;
if (turn.response && turn.response.length) {
content += "\n";
}
}
if (turn.response) {
content += turn.response;
}
messages.push({
createdAt: turn.createdAt,
role: "assistant",
content:
content && content.length
? content
: "(you didn't say anything this turn)",
});
} }
return messages;
} }
/** /**

View File

@ -51,6 +51,7 @@ export interface IAiGenerateResponse {
export interface IContextChatMessage { export interface IContextChatMessage {
createdAt: Date; createdAt: Date;
role: string; role: string;
callId?: string;
content: string; content: string;
user?: { user?: {
_id: string; _id: string;
@ -66,7 +67,7 @@ export interface IAiChatOptions {
} }
export interface IToolCall { export interface IToolCall {
call_id: string; callId: string;
function: { function: {
name: string; name: string;
arguments: string; arguments: string;
@ -79,7 +80,7 @@ export interface IAiChatResponse {
stats: IAiInferenceStats; stats: IAiInferenceStats;
done: boolean; done: boolean;
doneReason?: string; doneReason?: string;
tool_calls?: IToolCall[]; toolCalls?: IToolCall[];
} }
export interface IAiStreamChunk { export interface IAiStreamChunk {
@ -129,4 +130,4 @@ export abstract class AiApi {
options: IAiChatOptions, options: IAiChatOptions,
streamCallback?: IAiResponseStreamFn, streamCallback?: IAiResponseStreamFn,
): Promise<IAiChatResponse>; ): Promise<IAiChatResponse>;
} }

View File

@ -15,6 +15,11 @@ export enum ChatTurnStatus {
Error = "error", Error = "error",
} }
export interface IChatTurnPrompts {
user: string;
system?: string;
}
export interface IChatTurnStats { export interface IChatTurnStats {
toolCallCount: number; // total number of tool functions called this turn toolCallCount: number; // total number of tool functions called this turn
inputTokens: number; // total number of input tokens processed this turn inputTokens: number; // total number of input tokens processed this turn
@ -25,6 +30,7 @@ export interface IChatTurnStats {
} }
export interface IChatToolCall { export interface IChatToolCall {
callId: string; // ID of the call so the agent can match response to call
name: string; // tool function name being called name: string; // tool function name being called
parameters: string; // JSON.stringify of input parameters parameters: string; // JSON.stringify of input parameters
response: string; // the tool's response response: string; // the tool's response
@ -53,10 +59,10 @@ export interface IChatTurn extends Document {
llm: string; // id/name of the model used to process the prompt llm: string; // id/name of the model used to process the prompt
mode: ChatSessionMode; // session mode for this turn/prompt mode: ChatSessionMode; // session mode for this turn/prompt
status: ChatTurnStatus; status: ChatTurnStatus;
prompt: string; prompts: IChatTurnPrompts;
thinking?: string; thinking?: string;
response?: string; response?: string;
toolCalls: IChatToolCall[]; toolCalls: IChatToolCall[];
subagents: IChatSubagentProcess[]; // subagents used while processing this turn subagents: IChatSubagentProcess[]; // subagents used while processing this turn
stats: IChatTurnStats; stats: IChatTurnStats;
} }