Compare commits
7 Commits
@continued
...
pr-9099
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
752b258c17 | ||
|
|
ff12144269 | ||
|
|
c174c22a86 | ||
|
|
707dc6be2b | ||
|
|
b4e3c30f76 | ||
|
|
838fcdb44b | ||
|
|
e6cf787b10 |
4
package-lock.json
generated
4
package-lock.json
generated
@@ -380,7 +380,6 @@
|
||||
"integrity": "sha512-NZyJarBfL7nWwIq+FDL6Zp/yHEhePMNnnJ0y3qfieCrmNvYct8uvtiV41UvlSe6apAfk0fY1FbWx+NwfmpvtTg==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"peer": true,
|
||||
"bin": {
|
||||
"acorn": "bin/acorn"
|
||||
},
|
||||
@@ -1260,7 +1259,6 @@
|
||||
"deprecated": "This version is no longer supported. Please see https://eslint.org/version-support for other options.",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"peer": true,
|
||||
"dependencies": {
|
||||
"@eslint-community/eslint-utils": "^4.2.0",
|
||||
"@eslint-community/regexpp": "^4.6.1",
|
||||
@@ -3472,7 +3470,6 @@
|
||||
"integrity": "sha512-QQtaxnoDJeAkDvDKWCLiwIXkTgRhwYDEQCghU9Z6q03iyek/rxRh/2lC3HB7P8sWT2xC/y5JDctPLBIGzHKbhw==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"peer": true,
|
||||
"bin": {
|
||||
"prettier": "bin/prettier.cjs"
|
||||
},
|
||||
@@ -4412,7 +4409,6 @@
|
||||
"integrity": "sha512-p1diW6TqL9L07nNxvRMM7hMMw4c5XOo/1ibL4aAIGmSAt9slTE1Xgw5KWuof2uTOvCg9BY7ZRi+GaF+7sfgPeQ==",
|
||||
"dev": true,
|
||||
"license": "Apache-2.0",
|
||||
"peer": true,
|
||||
"bin": {
|
||||
"tsc": "bin/tsc",
|
||||
"tsserver": "bin/tsserver"
|
||||
|
||||
@@ -339,7 +339,7 @@ export class AnthropicApi implements BaseLlmApi {
|
||||
// Vercel SDK cannot handle pre-existing tool call conversations
|
||||
const hasToolMessages = body.messages.some((msg) => msg.role === "tool");
|
||||
|
||||
if (this.useVercelSDK && this.anthropicProvider && !hasToolMessages) {
|
||||
if (this.useVercelSDK && !hasToolMessages) {
|
||||
return this.chatCompletionNonStreamVercel(body, signal);
|
||||
}
|
||||
|
||||
@@ -585,7 +585,7 @@ export class AnthropicApi implements BaseLlmApi {
|
||||
// Vercel SDK cannot handle pre-existing tool call conversations
|
||||
const hasToolMessages = body.messages.some((msg) => msg.role === "tool");
|
||||
|
||||
if (this.useVercelSDK && this.anthropicProvider && !hasToolMessages) {
|
||||
if (this.useVercelSDK && !hasToolMessages) {
|
||||
yield* this.chatCompletionStreamVercel(body, signal);
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -151,11 +151,7 @@ export class OpenAIApi implements BaseLlmApi {
|
||||
body: ChatCompletionCreateParamsNonStreaming,
|
||||
signal: AbortSignal,
|
||||
): Promise<ChatCompletion> {
|
||||
if (
|
||||
this.useVercelSDK &&
|
||||
this.openaiProvider &&
|
||||
!this.shouldUseResponsesEndpoint(body.model)
|
||||
) {
|
||||
if (this.useVercelSDK && !this.shouldUseResponsesEndpoint(body.model)) {
|
||||
return this.chatCompletionNonStreamVercel(body, signal);
|
||||
}
|
||||
|
||||
@@ -255,11 +251,7 @@ export class OpenAIApi implements BaseLlmApi {
|
||||
body: ChatCompletionCreateParamsStreaming,
|
||||
signal: AbortSignal,
|
||||
): AsyncGenerator<ChatCompletionChunk, any, unknown> {
|
||||
if (
|
||||
this.useVercelSDK &&
|
||||
this.openaiProvider &&
|
||||
!this.shouldUseResponsesEndpoint(body.model)
|
||||
) {
|
||||
if (this.useVercelSDK && !this.shouldUseResponsesEndpoint(body.model)) {
|
||||
yield* this.chatCompletionStreamVercel(body, signal);
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -55,7 +55,7 @@ export function convertOpenAIMessagesToVercel(
|
||||
case "user":
|
||||
vercelMessages.push({
|
||||
role: "user",
|
||||
content: typeof msg.content === "string" ? msg.content : msg.content,
|
||||
content: msg.content,
|
||||
});
|
||||
break;
|
||||
|
||||
|
||||
@@ -75,12 +75,14 @@ export function testFim(api: BaseLlmApi, model: string) {
|
||||
}
|
||||
|
||||
export function testChat(
|
||||
api: BaseLlmApi,
|
||||
api: BaseLlmApi | (() => BaseLlmApi),
|
||||
model: string,
|
||||
options?: TestConfigOptions,
|
||||
) {
|
||||
const getApi = () => (typeof api === "function" ? api() : api);
|
||||
|
||||
test("should successfully stream chat", async () => {
|
||||
const stream = api.chatCompletionStream(
|
||||
const stream = getApi().chatCompletionStream(
|
||||
{
|
||||
model,
|
||||
messages: [{ role: "user", content: "Hello! Who are you?" }],
|
||||
@@ -121,7 +123,7 @@ export function testChat(
|
||||
});
|
||||
|
||||
test("should successfully stream multi-part chat with empty text", async () => {
|
||||
const stream = api.chatCompletionStream(
|
||||
const stream = getApi().chatCompletionStream(
|
||||
{
|
||||
model,
|
||||
messages: [
|
||||
@@ -158,7 +160,7 @@ export function testChat(
|
||||
});
|
||||
|
||||
test.skip("should successfully stream multi-part chat with image", async () => {
|
||||
const stream = api.chatCompletionStream(
|
||||
const stream = getApi().chatCompletionStream(
|
||||
{
|
||||
model,
|
||||
messages: [
|
||||
@@ -193,7 +195,7 @@ export function testChat(
|
||||
});
|
||||
|
||||
test("should successfully non-stream chat", async () => {
|
||||
const response = await api.chatCompletionNonStream(
|
||||
const response = await getApi().chatCompletionNonStream(
|
||||
{
|
||||
model,
|
||||
messages: [{ role: "user", content: "Hello! Who are you?" }],
|
||||
@@ -228,7 +230,7 @@ export function testChat(
|
||||
|
||||
test("should acknowledge system message in chat", async () => {
|
||||
if (options?.skipSystemMessage !== true) {
|
||||
const response = await api.chatCompletionNonStream(
|
||||
const response = await getApi().chatCompletionNonStream(
|
||||
{
|
||||
model,
|
||||
messages: [
|
||||
@@ -255,7 +257,7 @@ export function testChat(
|
||||
test("Tool Call works", async () => {
|
||||
let args = "";
|
||||
let isFirstChunk = true;
|
||||
for await (const chunk of api.chatCompletionStream(
|
||||
for await (const chunk of getApi().chatCompletionStream(
|
||||
{
|
||||
messages: [
|
||||
{
|
||||
@@ -318,7 +320,7 @@ export function testChat(
|
||||
|
||||
test("Tool Call second message works", async () => {
|
||||
let response = "";
|
||||
for await (const chunk of api.chatCompletionStream(
|
||||
for await (const chunk of getApi().chatCompletionStream(
|
||||
{
|
||||
messages: [
|
||||
{
|
||||
|
||||
@@ -29,11 +29,13 @@ function testVercelProvider(config: VercelTestConfig, featureFlag: string) {
|
||||
describe(`${config.provider}/${config.model} (Vercel SDK)`, () => {
|
||||
vi.setConfig({ testTimeout: 30000 });
|
||||
|
||||
let api: any;
|
||||
let api: ReturnType<typeof getLlmApi>;
|
||||
|
||||
// Set feature flag and create API instance AFTER flag is set
|
||||
beforeAll(() => {
|
||||
// Set feature flag before creating API instance
|
||||
// Note: We don't clean this up as multiple test suites may use the same flag
|
||||
process.env[featureFlag] = "true";
|
||||
|
||||
api = getLlmApi({
|
||||
provider: config.provider as any,
|
||||
apiKey: config.apiKey,
|
||||
@@ -42,11 +44,7 @@ function testVercelProvider(config: VercelTestConfig, featureFlag: string) {
|
||||
});
|
||||
});
|
||||
|
||||
afterAll(() => {
|
||||
delete process.env[featureFlag];
|
||||
});
|
||||
|
||||
testChat(api, model, {
|
||||
testChat(() => api, model, {
|
||||
skipTools: skipTools ?? false,
|
||||
expectUsage: expectUsage ?? true,
|
||||
skipSystemMessage: skipSystemMessage ?? false,
|
||||
|
||||
@@ -279,7 +279,8 @@ describe("convertVercelStream", () => {
|
||||
for await (const chunk of convertVercelStream(streamParts(), options)) {
|
||||
chunks.push(chunk);
|
||||
}
|
||||
expect.fail("Should have thrown error");
|
||||
// If we get here without throwing, the test should fail
|
||||
throw new Error("Expected stream to throw error but it didn't");
|
||||
} catch (error: any) {
|
||||
expect(error.message).toBe("Stream error");
|
||||
expect(chunks).toHaveLength(1); // Only the first chunk before error
|
||||
|
||||
Reference in New Issue
Block a user