OpenAI API tool call processing fixes/correctness
This commit is contained in:
parent
1bb2a1d392
commit
b090b5308b
@ -7,7 +7,7 @@ an `AbortError` through the agent loop, and marks the turn as `Aborted` (not
|
||||
|
||||
Abort is separate from completion. `workOrderComplete` is a normal end
|
||||
condition — the agent finished its work. Abort is something the user does
|
||||
*before* completion and invalid after it. When a work order completes naturally
|
||||
_before_ completion and invalid after it. When a work order completes naturally
|
||||
or is aborted, the abort signal mechanism is cleaned up end-to-end.
|
||||
|
||||
---
|
||||
@ -34,9 +34,9 @@ or is aborted, the abort signal mechanism is cleaned up end-to-end.
|
||||
workOrderComplete(turnId, true, "aborted")
|
||||
|
|
||||
[DroneSession] --workOrderComplete(turnId, true, "aborted")--> [CodeSession]
|
||||
| |
|
||||
turn.status = turn.status = "aborted"
|
||||
ChatTurnStatus.Aborted displayed subtly
|
||||
| |
|
||||
turn.status = turn.status = "aborted"
|
||||
ChatTurnStatus.Aborted displayed subtly
|
||||
```
|
||||
|
||||
**Key design decision**: `workOrderComplete` is used with `success=true` and
|
||||
@ -136,6 +136,7 @@ and is detected downstream via `error.name === "AbortError"`.
|
||||
### 5. Frontend (`gadget-code/frontend`)
|
||||
|
||||
**`frontend/src/lib/api.ts`** — Added `"aborted"` to `ChatTurn.status`:
|
||||
|
||||
```ts
|
||||
status: "processing" | "finished" | "aborted" | "error";
|
||||
```
|
||||
@ -170,13 +171,15 @@ status: "processing" | "finished" | "aborted" | "error";
|
||||
- Status color: `"aborted"` → `text-yellow-500` (NOT red)
|
||||
- Aborted display: Subtle info box (not the bright red error box):
|
||||
```tsx
|
||||
{turn.status === "aborted" && (
|
||||
<div className="bg-bg-tertiary border border-border-default rounded p-3">
|
||||
<div className="text-sm text-text-secondary">
|
||||
The turn was aborted by you.
|
||||
{
|
||||
turn.status === "aborted" && (
|
||||
<div className="bg-bg-tertiary border border-border-default rounded p-3">
|
||||
<div className="text-sm text-text-secondary">
|
||||
The turn was aborted by you.
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
)}
|
||||
);
|
||||
}
|
||||
```
|
||||
- The existing big red error block only shows for `status === "error"`, not
|
||||
`status === "aborted"`
|
||||
@ -185,22 +188,22 @@ status: "processing" | "finished" | "aborted" | "error";
|
||||
|
||||
## Files Changed (14 files)
|
||||
|
||||
| File | Change |
|
||||
|------|--------|
|
||||
| `.gitignore` | Add root-level log file patterns |
|
||||
| `packages/api/src/messages/ide.ts` | New `AbortWorkOrderCallback`, `AbortWorkOrderMessage` |
|
||||
| `packages/api/src/messages/socket.ts` | Register `abortWorkOrder` in both event interfaces |
|
||||
| `packages/ai/src/api.ts` | `signal?: AbortSignal` on `IAiChatOptions`, `IAiGenerateOptions` |
|
||||
| `packages/ai/src/ollama.ts` | Pre-request and per-chunk abort checks, signal pass-through |
|
||||
| `packages/ai/src/openai.ts` | Same for `generate()`, `readStreamingChatCompletion()`, `readNonStreamingChatCompletion()` |
|
||||
| `gadget-drone/src/services/agent.ts` | `AbortController` property, abort detection, `abortCurrentWorkOrder()` |
|
||||
| `gadget-drone/src/gadget-drone.ts` | `onAbortWorkOrder` socket handler |
|
||||
| `gadget-code/src/lib/code-session.ts` | Forward `abortWorkOrder` to drone |
|
||||
| `gadget-code/src/lib/drone-session.ts` | Detect `"aborted"` in `workOrderComplete` |
|
||||
| `gadget-code/frontend/src/lib/api.ts` | `"aborted"` in `ChatTurn.status` |
|
||||
| `gadget-code/frontend/src/lib/socket.ts` | `abortWorkOrder` method on `SocketClient` |
|
||||
| `gadget-code/frontend/src/pages/ChatSessionView.tsx` | Cancel button, double-Esc, abort flow |
|
||||
| `gadget-code/frontend/src/components/ChatTurn.tsx` | Subtle aborted display, yellow status |
|
||||
| File | Change |
|
||||
| ---------------------------------------------------- | ------------------------------------------------------------------------------------------ |
|
||||
| `.gitignore` | Add root-level log file patterns |
|
||||
| `packages/api/src/messages/ide.ts` | New `AbortWorkOrderCallback`, `AbortWorkOrderMessage` |
|
||||
| `packages/api/src/messages/socket.ts` | Register `abortWorkOrder` in both event interfaces |
|
||||
| `packages/ai/src/api.ts` | `signal?: AbortSignal` on `IAiChatOptions`, `IAiGenerateOptions` |
|
||||
| `packages/ai/src/ollama.ts` | Pre-request and per-chunk abort checks, signal pass-through |
|
||||
| `packages/ai/src/openai.ts` | Same for `generate()`, `readStreamingChatCompletion()`, `readNonStreamingChatCompletion()` |
|
||||
| `gadget-drone/src/services/agent.ts` | `AbortController` property, abort detection, `abortCurrentWorkOrder()` |
|
||||
| `gadget-drone/src/gadget-drone.ts` | `onAbortWorkOrder` socket handler |
|
||||
| `gadget-code/src/lib/code-session.ts` | Forward `abortWorkOrder` to drone |
|
||||
| `gadget-code/src/lib/drone-session.ts` | Detect `"aborted"` in `workOrderComplete` |
|
||||
| `gadget-code/frontend/src/lib/api.ts` | `"aborted"` in `ChatTurn.status` |
|
||||
| `gadget-code/frontend/src/lib/socket.ts` | `abortWorkOrder` method on `SocketClient` |
|
||||
| `gadget-code/frontend/src/pages/ChatSessionView.tsx` | Cancel button, double-Esc, abort flow |
|
||||
| `gadget-code/frontend/src/components/ChatTurn.tsx` | Subtle aborted display, yellow status |
|
||||
|
||||
---
|
||||
|
||||
|
||||
@ -11,7 +11,7 @@ describe("AgentService", () => {
|
||||
await service.start();
|
||||
});
|
||||
|
||||
it("replays historical tool results as assistant-readable context, not raw tool-role messages", () => {
|
||||
it("replays historical tool results as role:tool messages with unaltered content", () => {
|
||||
const user = {
|
||||
_id: "user-1",
|
||||
email: "user@example.com",
|
||||
@ -111,10 +111,15 @@ describe("AgentService", () => {
|
||||
expect(messages.map((message) => message.role)).toEqual([
|
||||
"user",
|
||||
"assistant",
|
||||
"assistant",
|
||||
"tool",
|
||||
]);
|
||||
expect(messages[2]?.content).toContain("Historical tool result: file_read");
|
||||
expect(messages[2]?.content).toContain("PATH: index.html");
|
||||
expect(messages[1]?.toolCalls).toBeDefined();
|
||||
expect(messages[1]?.toolCalls).toHaveLength(1);
|
||||
expect(messages[1]?.toolCalls?.[0]?.id).toBe("call-1");
|
||||
expect(messages[1]?.toolCalls?.[0]?.function?.name).toBe("file_read");
|
||||
expect(messages[2]?.role).toBe("tool");
|
||||
expect(messages[2]?.content).toBe("PATH: index.html\n---\ncontent");
|
||||
expect(messages[2]?.toolCallId).toBe("call-1");
|
||||
});
|
||||
|
||||
it("does not expose mutating file tools in plan mode", () => {
|
||||
|
||||
@ -251,6 +251,14 @@ class AgentService extends GadgetService {
|
||||
createdAt: turn.createdAt,
|
||||
role: "assistant",
|
||||
content: response.response,
|
||||
toolCalls: response.toolCalls.map((tc) => ({
|
||||
id: tc.callId,
|
||||
type: "function" as const,
|
||||
function: {
|
||||
name: tc.function.name,
|
||||
arguments: tc.function.arguments,
|
||||
},
|
||||
})),
|
||||
});
|
||||
|
||||
for (const toolCall of response.toolCalls) {
|
||||
@ -289,12 +297,10 @@ class AgentService extends GadgetService {
|
||||
|
||||
messages.push({
|
||||
createdAt: turn.createdAt,
|
||||
role: "assistant",
|
||||
content: this.formatHistoricalToolResult({
|
||||
name: toolCall.function.name,
|
||||
parameters: toolCall.function.arguments,
|
||||
response: result,
|
||||
}),
|
||||
role: "tool",
|
||||
content: result,
|
||||
toolCallId: toolCall.callId,
|
||||
toolName: toolCall.function.name,
|
||||
});
|
||||
|
||||
inputTokens += Math.ceil(toolCall.function.arguments.length / 4);
|
||||
@ -376,30 +382,36 @@ class AgentService extends GadgetService {
|
||||
}
|
||||
}
|
||||
|
||||
messages.push({
|
||||
const assistantMsg: IContextChatMessage = {
|
||||
createdAt: turn.createdAt,
|
||||
role: "assistant",
|
||||
content:
|
||||
content && content.length
|
||||
? content
|
||||
: "(you didn't say anything this turn)",
|
||||
});
|
||||
};
|
||||
|
||||
if (turn.toolCalls?.length > 0) {
|
||||
assistantMsg.toolCalls = turn.toolCalls.map((tc: IChatToolCall) => ({
|
||||
id: tc.callId,
|
||||
type: "function" as const,
|
||||
function: {
|
||||
name: tc.name,
|
||||
arguments: tc.parameters,
|
||||
},
|
||||
}));
|
||||
}
|
||||
|
||||
messages.push(assistantMsg);
|
||||
|
||||
/*
|
||||
* Persisted turns do not currently store provider-native assistant
|
||||
* tool-call messages. Replaying these as role=tool creates invalid
|
||||
* OpenAI-compatible history. Keep the information, but make it normal
|
||||
* assistant-readable context.
|
||||
*/
|
||||
if (turn.toolCalls?.length > 0) {
|
||||
for (const toolCall of turn.toolCalls) {
|
||||
const content = this.formatHistoricalToolResult(toolCall);
|
||||
messages.push({
|
||||
createdAt: turn.createdAt,
|
||||
role: "assistant",
|
||||
callId: toolCall.callId,
|
||||
role: "tool",
|
||||
content: toolCall.response,
|
||||
toolCallId: toolCall.callId,
|
||||
toolName: toolCall.name,
|
||||
content,
|
||||
});
|
||||
}
|
||||
}
|
||||
@ -424,22 +436,6 @@ class AgentService extends GadgetService {
|
||||
return this.toolbox.getToolNamesForMode(mode);
|
||||
}
|
||||
|
||||
private formatHistoricalToolResult(toolCall: {
|
||||
name: string;
|
||||
parameters?: string;
|
||||
response?: string;
|
||||
}): string {
|
||||
const response = toolCall.response || "";
|
||||
return [
|
||||
`Historical tool result: ${toolCall.name}`,
|
||||
`Parameters: ${toolCall.parameters || "{}"}`,
|
||||
"---",
|
||||
response.length > 8000
|
||||
? `${response.slice(0, 8000)}\n\n[Tool result truncated from ${response.length} characters.]`
|
||||
: response,
|
||||
].join("\n");
|
||||
}
|
||||
|
||||
private makeStreamHandler(socket: DroneSocket): IAiResponseStreamFn {
|
||||
return async (chunk) => {
|
||||
switch (chunk.type) {
|
||||
@ -481,7 +477,7 @@ class AgentService extends GadgetService {
|
||||
modelId,
|
||||
params: {
|
||||
reasoning,
|
||||
temperature: settings?.temperature ?? 0.8,
|
||||
temperature: settings?.temperature ?? 0.1,
|
||||
topP: settings?.topP ?? 0.9,
|
||||
topK: settings?.topK ?? 40,
|
||||
numPredict: settings?.numPredict ?? -1, // -1 = unlimited (Ollama)
|
||||
@ -657,6 +653,14 @@ class AgentService extends GadgetService {
|
||||
createdAt: new Date(),
|
||||
role: "assistant",
|
||||
content: response.response,
|
||||
toolCalls: response.toolCalls.map((tc) => ({
|
||||
id: tc.callId,
|
||||
type: "function" as const,
|
||||
function: {
|
||||
name: tc.function.name,
|
||||
arguments: tc.function.arguments,
|
||||
},
|
||||
})),
|
||||
});
|
||||
|
||||
for (const toolCall of response.toolCalls) {
|
||||
@ -684,12 +688,10 @@ class AgentService extends GadgetService {
|
||||
|
||||
messages.push({
|
||||
createdAt: new Date(),
|
||||
role: "assistant",
|
||||
content: this.formatHistoricalToolResult({
|
||||
name: toolCall.function.name,
|
||||
parameters: toolArgsRaw,
|
||||
response: result,
|
||||
}),
|
||||
role: "tool",
|
||||
content: result,
|
||||
toolCallId: toolCall.callId,
|
||||
toolName: toolCall.function.name,
|
||||
});
|
||||
|
||||
inputTokens += Math.ceil(toolArgsRaw.length / 4);
|
||||
|
||||
@ -61,7 +61,16 @@ export interface IContextChatMessage {
|
||||
createdAt: Date;
|
||||
role: string;
|
||||
callId?: string;
|
||||
toolCallId?: string;
|
||||
toolName?: string;
|
||||
toolCalls?: Array<{
|
||||
id: string;
|
||||
type: "function";
|
||||
function: {
|
||||
name: string;
|
||||
arguments: string;
|
||||
};
|
||||
}>;
|
||||
content: string;
|
||||
user?: {
|
||||
_id: string;
|
||||
|
||||
@ -157,6 +157,9 @@ export class OllamaAiApi extends AiApi {
|
||||
options: {
|
||||
num_ctx: model.params.numCtx,
|
||||
num_predict: model.params.numPredict,
|
||||
temperature: model.params.temperature,
|
||||
top_p: model.params.topP,
|
||||
top_k: model.params.topK,
|
||||
},
|
||||
});
|
||||
|
||||
@ -285,6 +288,9 @@ export class OllamaAiApi extends AiApi {
|
||||
options: {
|
||||
num_ctx: model.params.numCtx,
|
||||
num_predict: model.params.numPredict,
|
||||
temperature: model.params.temperature,
|
||||
top_p: model.params.topP,
|
||||
top_k: model.params.topK,
|
||||
},
|
||||
});
|
||||
|
||||
|
||||
@ -206,10 +206,11 @@ export class OpenAiApi extends AiApi {
|
||||
{ role: "user" as const, content: options.prompt },
|
||||
],
|
||||
stream: true,
|
||||
...(options.signal ? { signal: options.signal } : {}),
|
||||
...(model.params.maxCompletionTokens
|
||||
? { max_completion_tokens: model.params.maxCompletionTokens }
|
||||
: {}),
|
||||
temperature: model.params.temperature,
|
||||
top_p: model.params.topP,
|
||||
...(typeof model.params.reasoning === "string"
|
||||
? {
|
||||
reasoning_effort: model.params.reasoning as
|
||||
@ -218,7 +219,7 @@ export class OpenAiApi extends AiApi {
|
||||
| "high",
|
||||
}
|
||||
: {}),
|
||||
});
|
||||
}, options.signal ? { signal: options.signal } : undefined);
|
||||
|
||||
let accumulatedResponse = "";
|
||||
let accumulatedThinking = "";
|
||||
@ -347,9 +348,18 @@ export class OpenAiApi extends AiApi {
|
||||
for (const msg of options.context || []) {
|
||||
if (!msg.content?.trim()) continue;
|
||||
if (msg.role === "tool") {
|
||||
messages.push({
|
||||
role: "tool",
|
||||
tool_call_id: msg.toolCallId || msg.callId || "",
|
||||
content: msg.content,
|
||||
});
|
||||
continue;
|
||||
}
|
||||
if (msg.role === "assistant" && msg.toolCalls && msg.toolCalls.length > 0) {
|
||||
messages.push({
|
||||
role: "assistant",
|
||||
content: `Historical tool result${msg.toolName ? ` from ${msg.toolName}` : ""}:\n${msg.content}`,
|
||||
content: msg.content,
|
||||
tool_calls: msg.toolCalls,
|
||||
});
|
||||
continue;
|
||||
}
|
||||
@ -391,10 +401,11 @@ export class OpenAiApi extends AiApi {
|
||||
messages,
|
||||
tools,
|
||||
stream: true,
|
||||
...(signal ? { signal } : {}),
|
||||
...(model.params.maxCompletionTokens
|
||||
? { max_completion_tokens: model.params.maxCompletionTokens }
|
||||
: {}),
|
||||
temperature: model.params.temperature,
|
||||
top_p: model.params.topP,
|
||||
...(typeof model.params.reasoning === "string"
|
||||
? {
|
||||
reasoning_effort: model.params.reasoning as
|
||||
@ -403,7 +414,7 @@ export class OpenAiApi extends AiApi {
|
||||
| "high",
|
||||
}
|
||||
: {}),
|
||||
});
|
||||
}, signal ? { signal } : undefined);
|
||||
|
||||
let content = "";
|
||||
let thinking = "";
|
||||
@ -461,15 +472,20 @@ export class OpenAiApi extends AiApi {
|
||||
tools: ChatCompletionTool[],
|
||||
signal?: AbortSignal,
|
||||
): Promise<OpenAiChatIterationResult> {
|
||||
if (signal?.aborted) {
|
||||
throw new DOMException("The operation was aborted", "AbortError");
|
||||
}
|
||||
|
||||
const response = await this.client.chat.completions.create({
|
||||
model: model.modelId,
|
||||
messages,
|
||||
tools,
|
||||
stream: false,
|
||||
...(signal ? { signal } : {}),
|
||||
...(model.params.maxCompletionTokens
|
||||
? { max_completion_tokens: model.params.maxCompletionTokens }
|
||||
: {}),
|
||||
temperature: model.params.temperature,
|
||||
top_p: model.params.topP,
|
||||
...(typeof model.params.reasoning === "string"
|
||||
? {
|
||||
reasoning_effort: model.params.reasoning as
|
||||
@ -478,7 +494,7 @@ export class OpenAiApi extends AiApi {
|
||||
| "high",
|
||||
}
|
||||
: {}),
|
||||
});
|
||||
}, signal ? { signal } : undefined);
|
||||
const choice = response.choices[0];
|
||||
const message = choice?.message;
|
||||
const content = typeof message?.content === "string" ? message.content : "";
|
||||
|
||||
Loading…
Reference in New Issue
Block a user