Updating ask sage models to support tool calling. Plan and Agent mode now work correctly.

This commit is contained in:
Alex McGraw
2026-01-13 16:52:26 -06:00
parent 545751c49a
commit 78c0a5d44c

View File

@@ -4,6 +4,8 @@ import {
CompletionOptions, CompletionOptions,
LLMOptions, LLMOptions,
TextMessagePart, TextMessagePart,
Tool,
ToolCallDelta,
} from "../../index.js"; } from "../../index.js";
import { BaseLLM } from "../index.js"; import { BaseLLM } from "../index.js";
@@ -61,15 +63,26 @@ interface AskSageRequestArgs {
file?: unknown; file?: unknown;
} }
interface AskSageToolCall {
id: string;
type: "function";
function: {
name: string;
arguments: string;
};
}
interface AskSageResponse { interface AskSageResponse {
text?: string; text?: string;
answer?: string; answer?: string;
message?: string; message?: string;
status?: number | string; status?: number | string;
response?: unknown; response?: unknown;
tool_calls?: AskSageToolCall[];
choices?: Array<{ choices?: Array<{
message?: { message?: {
content?: string; content?: string;
tool_calls?: AskSageToolCall[];
}; };
}>; }>;
} }
@@ -185,6 +198,17 @@ class Asksage extends BaseLLM {
}; };
} }
protected _convertToolToAskSageTool(tool: Tool): AskSageTool {
return {
type: tool.type,
function: {
name: tool.function.name,
description: tool.function.description,
parameters: tool.function.parameters,
},
};
}
protected _convertArgs( protected _convertArgs(
options: AskSageCompletionOptions, options: AskSageCompletionOptions,
messages: ChatMessage[], messages: ChatMessage[],
@@ -196,6 +220,14 @@ class Asksage extends BaseLLM {
formattedMessage = messages.map(this._convertMessage); formattedMessage = messages.map(this._convertMessage);
} }
// Convert standard tools to AskSage format, or use askSageTools if provided
const tools =
options.tools?.map((tool) => this._convertToolToAskSageTool(tool)) ??
options.askSageTools;
// Map standard toolChoice to AskSage format, or use askSageToolChoice if provided
const toolChoice = options.toolChoice ?? options.askSageToolChoice;
const args: AskSageRequestArgs = { const args: AskSageRequestArgs = {
message: formattedMessage, message: formattedMessage,
model: options.model, model: options.model,
@@ -207,10 +239,8 @@ class Asksage extends BaseLLM {
options.systemPrompt ?? options.systemPrompt ??
process.env.ASKSAGE_SYSTEM_PROMPT ?? process.env.ASKSAGE_SYSTEM_PROMPT ??
"You are an expert software developer. You give helpful and concise responses.", "You are an expert software developer. You give helpful and concise responses.",
tools: options.askSageTools, tools,
// enabled_mcp_tools: options.enabledMcpTools as string[] | undefined, tool_choice: toolChoice,
// tools_to_execute: options.toolsToExecute as string[] | undefined,
tool_choice: options.askSageToolChoice,
reasoning_effort: options.reasoningEffort as reasoning_effort: options.reasoningEffort as
| "low" | "low"
| "medium" | "medium"
@@ -391,6 +421,22 @@ class Asksage extends BaseLLM {
const data = (await response.json()) as AskSageResponse; const data = (await response.json()) as AskSageResponse;
// Extract tool calls from response (check both top-level and choices format)
const rawToolCalls =
data.tool_calls || data.choices?.[0]?.message?.tool_calls;
// Convert to ToolCallDelta format if present
const toolCalls: ToolCallDelta[] | undefined = rawToolCalls?.map(
(tc) => ({
id: tc.id,
type: tc.type,
function: {
name: tc.function.name,
arguments: tc.function.arguments,
},
}),
);
const assistantMessage: ChatMessage = { const assistantMessage: ChatMessage = {
role: "assistant", role: "assistant",
content: content:
@@ -399,6 +445,7 @@ class Asksage extends BaseLLM {
data.message || data.message ||
data.choices?.[0]?.message?.content || data.choices?.[0]?.message?.content ||
"", "",
...(toolCalls && toolCalls.length > 0 ? { toolCalls } : {}),
}; };
yield assistantMessage; yield assistantMessage;