more refactoring to standardize interfaces

We want to speak only one language when dealing with AI content to
minimize the number of maps, transforms, and copies. This initiative
isn't done, this is a checkpoint along the way while conducting
experiments.
This commit is contained in:
Rob Colbert 2026-04-29 11:47:28 -04:00
parent 096d8fe8b3
commit 9cb689668f
4 changed files with 112 additions and 77 deletions

View File

@ -7,12 +7,18 @@ import { Types, Schema, model } from "mongoose";
import {
ChatSessionMode,
ChatTurnStatus,
IChatTurnPrompts,
IChatSubagentProcess,
IChatToolCall,
IChatTurn,
IChatTurnStats,
} from "@gadget/api";
export const ChatTurnPromptsSchema = new Schema<IChatTurnPrompts>({
user: { type: String, required: true },
system: { type: String, required: false },
});
export const ChatTurnStatsSchema = new Schema<IChatTurnStats>({
toolCallCount: { type: Number, default: 0, required: true },
inputTokens: { type: Number, default: 0, required: true },
@ -55,7 +61,7 @@ export const ChatTurnSchema = new Schema<IChatTurn>({
default: ChatTurnStatus.Processing,
required: true,
},
prompt: { type: String, required: true },
prompts: { type: ChatTurnPromptsSchema, required: true },
thinking: { type: String, required: false },
response: { type: String, required: false },
toolCalls: { type: [ChatToolCallSchema], default: [], required: true },

View File

@ -2,25 +2,17 @@
// Copyright (C) 2026 Rob Colbert <rob.colbert@openplatform.us>
// Licensed under the Apache License, Version 2.0
import assert from "node:assert";
import path from "node:path";
import fs from "node:fs";
import dayjs from "dayjs";
import { type IAiProvider, type IContextChatMessage } from "@gadget/ai";
import {
IAiChatOptions,
type IAiProvider,
type IContextChatMessage,
} from "@gadget/ai";
import { IChatSession, IChatTurn, IProject, IUser } from "@gadget/api";
import AiService from "./ai.ts";
import { GadgetService } from "../lib/service.ts";
export interface IProject {
_id: string;
name: string;
slug: string;
gitUrl: string;
}
export interface IToolCall {
name: string;
params: string;
@ -29,31 +21,12 @@ export interface IToolCall {
error?: Error;
}
export interface IChatMessage {
role: string;
content: string;
}
export interface IChatTurn {
_id: string;
mode: string;
modelId?: string;
prompts: {
system: string;
user: string;
};
}
export interface IChatSession {
_id: string;
name: string;
context: IContextChatMessage[];
}
export interface IAgentWorkOrder {
createdAt: Date;
project: IProject;
provider: IAiProvider;
session: IChatSession;
context: IChatTurn[];
turn: IChatTurn;
}
@ -80,20 +53,9 @@ class AgentService extends GadgetService {
return "[all tool calls are stubbed out]";
}
// this turn's context (system, history, prompt, work)
const messages: IChatMessage[] = [];
messages.push({ role: "system", content: turn.prompts.system });
// recall full session history into messages array
this.buildSessionContext(session, messages);
// push the User's latest prompt to the context
messages.push({ role: "user", content: workOrder.turn.prompts.user });
const modelConfig = {
provider: workOrder.provider,
modelId: workOrder.turn.modelId ?? workOrder.provider.defaultModelId ?? "llama3.2",
modelId: workOrder.turn.llm,
params: {
reasoning: false,
temperature: 0.8,
@ -102,42 +64,102 @@ class AgentService extends GadgetService {
},
};
const chatOptions = {
context: session.context,
const context = this.buildSessionContext(workOrder);
const chatOptions: IAiChatOptions = {
systemPrompt: workOrder.turn.prompts.system,
context,
userPrompt: workOrder.turn.prompts.user,
};
let keepProcessing = true;
do {
const response = await AiService.chat(workOrder.provider, modelConfig, chatOptions);
keepProcessing = (response.tool_calls?.length ?? 0) > 0;
for (const tool_call of response.tool_calls ?? []) {
const result = await aiCallTool(tool_call.function.name, tool_call.function.arguments);
messages.push({ role: "tool", content: result });
const response = await AiService.chat(
workOrder.provider,
modelConfig,
chatOptions,
);
keepProcessing = (response.toolCalls?.length ?? 0) > 0;
for (const toolCall of response.toolCalls ?? []) {
const result = await aiCallTool(
toolCall.function.name,
toolCall.function.arguments,
);
context.push({
createdAt: new Date(),
role: "tool",
callId: toolCall.callId,
content: result,
});
/* emit turn-tool-call socket message */
}
} while (keepProcessing);
/* emit turn-finished socket message */
/*
* TODO:
* 1. Call web service to POST results to the work order
* 2. Emit turn-finished socket message
*/
}
buildSessionContext(session: IChatSession, messages: IChatMessage[]): void {
let content;
for (const message of session.context) {
switch (message.role) {
case "system":
continue;
case "user":
content = message.content;
break;
case "assistant":
content = message.content;
break;
case "tool":
content = message.content;
break;
buildSessionContext(workOrder: IAgentWorkOrder): IContextChatMessage[] {
const user: IUser = workOrder.session.user as IUser;
const messages: IContextChatMessage[] = [];
for (const turn of workOrder.context) {
/*
* add the User message
*/
messages.push({
createdAt: turn.createdAt,
role: "user",
content: turn.prompts.user,
user: {
_id: user._id.toHexString(),
username: user.email,
displayName: user.displayName,
},
});
/*
* Add the agent's responses (thinking, respone text, tool calls)
*/
if (turn.toolCalls?.length > 0) {
for (const toolCall of turn.toolCalls) {
messages.push({
createdAt: turn.createdAt,
role: "tool",
callId: toolCall.callId,
content: toolCall.response,
});
}
}
messages.push({ role: message.role, content: message.content });
/*
* Add the assistant's output (if any), to include the thinking
* (reasoning) output (if any).
*/
let content = "";
if (turn.thinking) {
content += `<thinking>${turn.thinking}</thinking>`;
if (turn.response && turn.response.length) {
content += "\n";
}
}
if (turn.response) {
content += turn.response;
}
messages.push({
createdAt: turn.createdAt,
role: "assistant",
content:
content && content.length
? content
: "(you didn't say anything this turn)",
});
}
return messages;
}
/**

View File

@ -51,6 +51,7 @@ export interface IAiGenerateResponse {
export interface IContextChatMessage {
createdAt: Date;
role: string;
callId?: string;
content: string;
user?: {
_id: string;
@ -66,7 +67,7 @@ export interface IAiChatOptions {
}
export interface IToolCall {
call_id: string;
callId: string;
function: {
name: string;
arguments: string;
@ -79,7 +80,7 @@ export interface IAiChatResponse {
stats: IAiInferenceStats;
done: boolean;
doneReason?: string;
tool_calls?: IToolCall[];
toolCalls?: IToolCall[];
}
export interface IAiStreamChunk {
@ -129,4 +130,4 @@ export abstract class AiApi {
options: IAiChatOptions,
streamCallback?: IAiResponseStreamFn,
): Promise<IAiChatResponse>;
}
}

View File

@ -15,6 +15,11 @@ export enum ChatTurnStatus {
Error = "error",
}
export interface IChatTurnPrompts {
user: string;
system?: string;
}
export interface IChatTurnStats {
toolCallCount: number; // total number of tool functions called this turn
inputTokens: number; // total number of input tokens processed this turn
@ -25,6 +30,7 @@ export interface IChatTurnStats {
}
export interface IChatToolCall {
callId: string; // ID of the call so the agent can match response to call
name: string; // tool function name being called
parameters: string; // JSON.stringify of input parameters
response: string; // the tool's response
@ -53,10 +59,10 @@ export interface IChatTurn extends Document {
llm: string; // id/name of the model used to process the prompt
mode: ChatSessionMode; // session mode for this turn/prompt
status: ChatTurnStatus;
prompt: string;
prompts: IChatTurnPrompts;
thinking?: string;
response?: string;
toolCalls: IChatToolCall[];
subagents: IChatSubagentProcess[]; // subagents used while processing this turn
stats: IChatTurnStats;
}
}