|
@@ -1,125 +1,28 @@
|
|
|
-// Mocks must come first, before imports
|
|
|
|
|
-const mockCreate = vi.fn()
|
|
|
|
|
-vi.mock("openai", () => {
|
|
|
|
|
|
|
+// Use vi.hoisted to define mock functions that can be referenced in hoisted vi.mock() calls
|
|
|
|
|
+const { mockStreamText, mockGenerateText } = vi.hoisted(() => ({
|
|
|
|
|
+ mockStreamText: vi.fn(),
|
|
|
|
|
+ mockGenerateText: vi.fn(),
|
|
|
|
|
+}))
|
|
|
|
|
+
|
|
|
|
|
+vi.mock("ai", async (importOriginal) => {
|
|
|
|
|
+ const actual = await importOriginal<typeof import("ai")>()
|
|
|
return {
|
|
return {
|
|
|
- __esModule: true,
|
|
|
|
|
- default: vi.fn().mockImplementation(() => ({
|
|
|
|
|
- chat: {
|
|
|
|
|
- completions: {
|
|
|
|
|
- create: mockCreate.mockImplementation(async (options) => {
|
|
|
|
|
- if (!options.stream) {
|
|
|
|
|
- return {
|
|
|
|
|
- id: "test-completion",
|
|
|
|
|
- choices: [
|
|
|
|
|
- {
|
|
|
|
|
- message: { role: "assistant", content: "Test response", refusal: null },
|
|
|
|
|
- finish_reason: "stop",
|
|
|
|
|
- index: 0,
|
|
|
|
|
- },
|
|
|
|
|
- ],
|
|
|
|
|
- usage: {
|
|
|
|
|
- prompt_tokens: 10,
|
|
|
|
|
- completion_tokens: 5,
|
|
|
|
|
- total_tokens: 15,
|
|
|
|
|
- prompt_tokens_details: {
|
|
|
|
|
- cache_miss_tokens: 8,
|
|
|
|
|
- cached_tokens: 2,
|
|
|
|
|
- },
|
|
|
|
|
- },
|
|
|
|
|
- }
|
|
|
|
|
- }
|
|
|
|
|
-
|
|
|
|
|
- // Check if this is a reasoning_content test by looking at model
|
|
|
|
|
- const isReasonerModel = options.model?.includes("deepseek-reasoner")
|
|
|
|
|
- const isToolCallTest = options.tools?.length > 0
|
|
|
|
|
-
|
|
|
|
|
- // Return async iterator for streaming
|
|
|
|
|
- return {
|
|
|
|
|
- [Symbol.asyncIterator]: async function* () {
|
|
|
|
|
- // For reasoner models, emit reasoning_content first
|
|
|
|
|
- if (isReasonerModel) {
|
|
|
|
|
- yield {
|
|
|
|
|
- choices: [
|
|
|
|
|
- {
|
|
|
|
|
- delta: { reasoning_content: "Let me think about this..." },
|
|
|
|
|
- index: 0,
|
|
|
|
|
- },
|
|
|
|
|
- ],
|
|
|
|
|
- usage: null,
|
|
|
|
|
- }
|
|
|
|
|
- yield {
|
|
|
|
|
- choices: [
|
|
|
|
|
- {
|
|
|
|
|
- delta: { reasoning_content: " I'll analyze step by step." },
|
|
|
|
|
- index: 0,
|
|
|
|
|
- },
|
|
|
|
|
- ],
|
|
|
|
|
- usage: null,
|
|
|
|
|
- }
|
|
|
|
|
- }
|
|
|
|
|
-
|
|
|
|
|
- // For tool call tests with reasoner, emit tool call
|
|
|
|
|
- if (isReasonerModel && isToolCallTest) {
|
|
|
|
|
- yield {
|
|
|
|
|
- choices: [
|
|
|
|
|
- {
|
|
|
|
|
- delta: {
|
|
|
|
|
- tool_calls: [
|
|
|
|
|
- {
|
|
|
|
|
- index: 0,
|
|
|
|
|
- id: "call_123",
|
|
|
|
|
- function: {
|
|
|
|
|
- name: "get_weather",
|
|
|
|
|
- arguments: '{"location":"SF"}',
|
|
|
|
|
- },
|
|
|
|
|
- },
|
|
|
|
|
- ],
|
|
|
|
|
- },
|
|
|
|
|
- index: 0,
|
|
|
|
|
- },
|
|
|
|
|
- ],
|
|
|
|
|
- usage: null,
|
|
|
|
|
- }
|
|
|
|
|
- } else {
|
|
|
|
|
- yield {
|
|
|
|
|
- choices: [
|
|
|
|
|
- {
|
|
|
|
|
- delta: { content: "Test response" },
|
|
|
|
|
- index: 0,
|
|
|
|
|
- },
|
|
|
|
|
- ],
|
|
|
|
|
- usage: null,
|
|
|
|
|
- }
|
|
|
|
|
- }
|
|
|
|
|
-
|
|
|
|
|
- yield {
|
|
|
|
|
- choices: [
|
|
|
|
|
- {
|
|
|
|
|
- delta: {},
|
|
|
|
|
- index: 0,
|
|
|
|
|
- finish_reason: isToolCallTest ? "tool_calls" : "stop",
|
|
|
|
|
- },
|
|
|
|
|
- ],
|
|
|
|
|
- usage: {
|
|
|
|
|
- prompt_tokens: 10,
|
|
|
|
|
- completion_tokens: 5,
|
|
|
|
|
- total_tokens: 15,
|
|
|
|
|
- prompt_tokens_details: {
|
|
|
|
|
- cache_miss_tokens: 8,
|
|
|
|
|
- cached_tokens: 2,
|
|
|
|
|
- },
|
|
|
|
|
- },
|
|
|
|
|
- }
|
|
|
|
|
- },
|
|
|
|
|
- }
|
|
|
|
|
- }),
|
|
|
|
|
- },
|
|
|
|
|
- },
|
|
|
|
|
- })),
|
|
|
|
|
|
|
+ ...actual,
|
|
|
|
|
+ streamText: mockStreamText,
|
|
|
|
|
+ generateText: mockGenerateText,
|
|
|
}
|
|
}
|
|
|
})
|
|
})
|
|
|
|
|
|
|
|
-import OpenAI from "openai"
|
|
|
|
|
|
|
+vi.mock("@ai-sdk/deepseek", () => ({
|
|
|
|
|
+ createDeepSeek: vi.fn(() => {
|
|
|
|
|
+ // Return a function that returns a mock language model
|
|
|
|
|
+ return vi.fn(() => ({
|
|
|
|
|
+ modelId: "deepseek-chat",
|
|
|
|
|
+ provider: "deepseek",
|
|
|
|
|
+ }))
|
|
|
|
|
+ }),
|
|
|
|
|
+}))
|
|
|
|
|
+
|
|
|
import type { Anthropic } from "@anthropic-ai/sdk"
|
|
import type { Anthropic } from "@anthropic-ai/sdk"
|
|
|
|
|
|
|
|
import { deepSeekDefaultModelId, type ModelInfo } from "@roo-code/types"
|
|
import { deepSeekDefaultModelId, type ModelInfo } from "@roo-code/types"
|
|
@@ -148,15 +51,6 @@ describe("DeepSeekHandler", () => {
|
|
|
expect(handler.getModel().id).toBe(mockOptions.apiModelId)
|
|
expect(handler.getModel().id).toBe(mockOptions.apiModelId)
|
|
|
})
|
|
})
|
|
|
|
|
|
|
|
- it.skip("should throw error if API key is missing", () => {
|
|
|
|
|
- expect(() => {
|
|
|
|
|
- new DeepSeekHandler({
|
|
|
|
|
- ...mockOptions,
|
|
|
|
|
- deepSeekApiKey: undefined,
|
|
|
|
|
- })
|
|
|
|
|
- }).toThrow("DeepSeek API key is required")
|
|
|
|
|
- })
|
|
|
|
|
-
|
|
|
|
|
it("should use default model ID if not provided", () => {
|
|
it("should use default model ID if not provided", () => {
|
|
|
const handlerWithoutModel = new DeepSeekHandler({
|
|
const handlerWithoutModel = new DeepSeekHandler({
|
|
|
...mockOptions,
|
|
...mockOptions,
|
|
@@ -171,12 +65,6 @@ describe("DeepSeekHandler", () => {
|
|
|
deepSeekBaseUrl: undefined,
|
|
deepSeekBaseUrl: undefined,
|
|
|
})
|
|
})
|
|
|
expect(handlerWithoutBaseUrl).toBeInstanceOf(DeepSeekHandler)
|
|
expect(handlerWithoutBaseUrl).toBeInstanceOf(DeepSeekHandler)
|
|
|
- // The base URL is passed to OpenAI client internally
|
|
|
|
|
- expect(OpenAI).toHaveBeenCalledWith(
|
|
|
|
|
- expect.objectContaining({
|
|
|
|
|
- baseURL: "https://api.deepseek.com",
|
|
|
|
|
- }),
|
|
|
|
|
- )
|
|
|
|
|
})
|
|
})
|
|
|
|
|
|
|
|
it("should use custom base URL if provided", () => {
|
|
it("should use custom base URL if provided", () => {
|
|
@@ -186,18 +74,6 @@ describe("DeepSeekHandler", () => {
|
|
|
deepSeekBaseUrl: customBaseUrl,
|
|
deepSeekBaseUrl: customBaseUrl,
|
|
|
})
|
|
})
|
|
|
expect(handlerWithCustomUrl).toBeInstanceOf(DeepSeekHandler)
|
|
expect(handlerWithCustomUrl).toBeInstanceOf(DeepSeekHandler)
|
|
|
- // The custom base URL is passed to OpenAI client
|
|
|
|
|
- expect(OpenAI).toHaveBeenCalledWith(
|
|
|
|
|
- expect.objectContaining({
|
|
|
|
|
- baseURL: customBaseUrl,
|
|
|
|
|
- }),
|
|
|
|
|
- )
|
|
|
|
|
- })
|
|
|
|
|
-
|
|
|
|
|
- it("should set includeMaxTokens to true", () => {
|
|
|
|
|
- // Create a new handler and verify OpenAI client was called with includeMaxTokens
|
|
|
|
|
- const _handler = new DeepSeekHandler(mockOptions)
|
|
|
|
|
- expect(OpenAI).toHaveBeenCalledWith(expect.objectContaining({ apiKey: mockOptions.deepSeekApiKey }))
|
|
|
|
|
})
|
|
})
|
|
|
})
|
|
})
|
|
|
|
|
|
|
@@ -296,6 +172,31 @@ describe("DeepSeekHandler", () => {
|
|
|
]
|
|
]
|
|
|
|
|
|
|
|
it("should handle streaming responses", async () => {
|
|
it("should handle streaming responses", async () => {
|
|
|
|
|
+ // Mock the fullStream async generator
|
|
|
|
|
+ // Note: processAiSdkStreamPart expects 'text' property for text-delta type
|
|
|
|
|
+ async function* mockFullStream() {
|
|
|
|
|
+ yield { type: "text-delta", text: "Test response" }
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ // Mock usage and providerMetadata promises
|
|
|
|
|
+ const mockUsage = Promise.resolve({
|
|
|
|
|
+ inputTokens: 10,
|
|
|
|
|
+ outputTokens: 5,
|
|
|
|
|
+ })
|
|
|
|
|
+
|
|
|
|
|
+ const mockProviderMetadata = Promise.resolve({
|
|
|
|
|
+ deepseek: {
|
|
|
|
|
+ promptCacheHitTokens: 2,
|
|
|
|
|
+ promptCacheMissTokens: 8,
|
|
|
|
|
+ },
|
|
|
|
|
+ })
|
|
|
|
|
+
|
|
|
|
|
+ mockStreamText.mockReturnValue({
|
|
|
|
|
+ fullStream: mockFullStream(),
|
|
|
|
|
+ usage: mockUsage,
|
|
|
|
|
+ providerMetadata: mockProviderMetadata,
|
|
|
|
|
+ })
|
|
|
|
|
+
|
|
|
const stream = handler.createMessage(systemPrompt, messages)
|
|
const stream = handler.createMessage(systemPrompt, messages)
|
|
|
const chunks: any[] = []
|
|
const chunks: any[] = []
|
|
|
for await (const chunk of stream) {
|
|
for await (const chunk of stream) {
|
|
@@ -309,6 +210,28 @@ describe("DeepSeekHandler", () => {
|
|
|
})
|
|
})
|
|
|
|
|
|
|
|
it("should include usage information", async () => {
|
|
it("should include usage information", async () => {
|
|
|
|
|
+ async function* mockFullStream() {
|
|
|
|
|
+ yield { type: "text-delta", text: "Test response" }
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ const mockUsage = Promise.resolve({
|
|
|
|
|
+ inputTokens: 10,
|
|
|
|
|
+ outputTokens: 5,
|
|
|
|
|
+ })
|
|
|
|
|
+
|
|
|
|
|
+ const mockProviderMetadata = Promise.resolve({
|
|
|
|
|
+ deepseek: {
|
|
|
|
|
+ promptCacheHitTokens: 2,
|
|
|
|
|
+ promptCacheMissTokens: 8,
|
|
|
|
|
+ },
|
|
|
|
|
+ })
|
|
|
|
|
+
|
|
|
|
|
+ mockStreamText.mockReturnValue({
|
|
|
|
|
+ fullStream: mockFullStream(),
|
|
|
|
|
+ usage: mockUsage,
|
|
|
|
|
+ providerMetadata: mockProviderMetadata,
|
|
|
|
|
+ })
|
|
|
|
|
+
|
|
|
const stream = handler.createMessage(systemPrompt, messages)
|
|
const stream = handler.createMessage(systemPrompt, messages)
|
|
|
const chunks: any[] = []
|
|
const chunks: any[] = []
|
|
|
for await (const chunk of stream) {
|
|
for await (const chunk of stream) {
|
|
@@ -321,7 +244,30 @@ describe("DeepSeekHandler", () => {
|
|
|
expect(usageChunks[0].outputTokens).toBe(5)
|
|
expect(usageChunks[0].outputTokens).toBe(5)
|
|
|
})
|
|
})
|
|
|
|
|
|
|
|
- it("should include cache metrics in usage information", async () => {
|
|
|
|
|
|
|
+ it("should include cache metrics in usage information from providerMetadata", async () => {
|
|
|
|
|
+ async function* mockFullStream() {
|
|
|
|
|
+ yield { type: "text-delta", text: "Test response" }
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ const mockUsage = Promise.resolve({
|
|
|
|
|
+ inputTokens: 10,
|
|
|
|
|
+ outputTokens: 5,
|
|
|
|
|
+ })
|
|
|
|
|
+
|
|
|
|
|
+ // DeepSeek provides cache metrics via providerMetadata
|
|
|
|
|
+ const mockProviderMetadata = Promise.resolve({
|
|
|
|
|
+ deepseek: {
|
|
|
|
|
+ promptCacheHitTokens: 2,
|
|
|
|
|
+ promptCacheMissTokens: 8,
|
|
|
|
|
+ },
|
|
|
|
|
+ })
|
|
|
|
|
+
|
|
|
|
|
+ mockStreamText.mockReturnValue({
|
|
|
|
|
+ fullStream: mockFullStream(),
|
|
|
|
|
+ usage: mockUsage,
|
|
|
|
|
+ providerMetadata: mockProviderMetadata,
|
|
|
|
|
+ })
|
|
|
|
|
+
|
|
|
const stream = handler.createMessage(systemPrompt, messages)
|
|
const stream = handler.createMessage(systemPrompt, messages)
|
|
|
const chunks: any[] = []
|
|
const chunks: any[] = []
|
|
|
for await (const chunk of stream) {
|
|
for await (const chunk of stream) {
|
|
@@ -330,29 +276,76 @@ describe("DeepSeekHandler", () => {
|
|
|
|
|
|
|
|
const usageChunks = chunks.filter((chunk) => chunk.type === "usage")
|
|
const usageChunks = chunks.filter((chunk) => chunk.type === "usage")
|
|
|
expect(usageChunks.length).toBeGreaterThan(0)
|
|
expect(usageChunks.length).toBeGreaterThan(0)
|
|
|
- expect(usageChunks[0].cacheWriteTokens).toBe(8)
|
|
|
|
|
- expect(usageChunks[0].cacheReadTokens).toBe(2)
|
|
|
|
|
|
|
+ expect(usageChunks[0].cacheWriteTokens).toBe(8) // promptCacheMissTokens
|
|
|
|
|
+ expect(usageChunks[0].cacheReadTokens).toBe(2) // promptCacheHitTokens
|
|
|
|
|
+ })
|
|
|
|
|
+ })
|
|
|
|
|
+
|
|
|
|
|
+ describe("completePrompt", () => {
|
|
|
|
|
+ it("should complete a prompt using generateText", async () => {
|
|
|
|
|
+ mockGenerateText.mockResolvedValue({
|
|
|
|
|
+ text: "Test completion",
|
|
|
|
|
+ })
|
|
|
|
|
+
|
|
|
|
|
+ const result = await handler.completePrompt("Test prompt")
|
|
|
|
|
+
|
|
|
|
|
+ expect(result).toBe("Test completion")
|
|
|
|
|
+ expect(mockGenerateText).toHaveBeenCalledWith(
|
|
|
|
|
+ expect.objectContaining({
|
|
|
|
|
+ prompt: "Test prompt",
|
|
|
|
|
+ }),
|
|
|
|
|
+ )
|
|
|
})
|
|
})
|
|
|
})
|
|
})
|
|
|
|
|
|
|
|
describe("processUsageMetrics", () => {
|
|
describe("processUsageMetrics", () => {
|
|
|
- it("should correctly process usage metrics including cache information", () => {
|
|
|
|
|
|
|
+ it("should correctly process usage metrics including cache information from providerMetadata", () => {
|
|
|
// We need to access the protected method, so we'll create a test subclass
|
|
// We need to access the protected method, so we'll create a test subclass
|
|
|
class TestDeepSeekHandler extends DeepSeekHandler {
|
|
class TestDeepSeekHandler extends DeepSeekHandler {
|
|
|
- public testProcessUsageMetrics(usage: any) {
|
|
|
|
|
- return this.processUsageMetrics(usage)
|
|
|
|
|
|
|
+ public testProcessUsageMetrics(usage: any, providerMetadata?: any) {
|
|
|
|
|
+ return this.processUsageMetrics(usage, providerMetadata)
|
|
|
}
|
|
}
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
const testHandler = new TestDeepSeekHandler(mockOptions)
|
|
const testHandler = new TestDeepSeekHandler(mockOptions)
|
|
|
|
|
|
|
|
const usage = {
|
|
const usage = {
|
|
|
- prompt_tokens: 100,
|
|
|
|
|
- completion_tokens: 50,
|
|
|
|
|
- total_tokens: 150,
|
|
|
|
|
- prompt_tokens_details: {
|
|
|
|
|
- cache_miss_tokens: 80,
|
|
|
|
|
- cached_tokens: 20,
|
|
|
|
|
|
|
+ inputTokens: 100,
|
|
|
|
|
+ outputTokens: 50,
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ // DeepSeek provides cache metrics via providerMetadata
|
|
|
|
|
+ const providerMetadata = {
|
|
|
|
|
+ deepseek: {
|
|
|
|
|
+ promptCacheHitTokens: 20,
|
|
|
|
|
+ promptCacheMissTokens: 80,
|
|
|
|
|
+ },
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ const result = testHandler.testProcessUsageMetrics(usage, providerMetadata)
|
|
|
|
|
+
|
|
|
|
|
+ expect(result.type).toBe("usage")
|
|
|
|
|
+ expect(result.inputTokens).toBe(100)
|
|
|
|
|
+ expect(result.outputTokens).toBe(50)
|
|
|
|
|
+ expect(result.cacheWriteTokens).toBe(80) // promptCacheMissTokens
|
|
|
|
|
+ expect(result.cacheReadTokens).toBe(20) // promptCacheHitTokens
|
|
|
|
|
+ })
|
|
|
|
|
+
|
|
|
|
|
+ it("should handle usage with details.cachedInputTokens when providerMetadata is not available", () => {
|
|
|
|
|
+ class TestDeepSeekHandler extends DeepSeekHandler {
|
|
|
|
|
+ public testProcessUsageMetrics(usage: any, providerMetadata?: any) {
|
|
|
|
|
+ return this.processUsageMetrics(usage, providerMetadata)
|
|
|
|
|
+ }
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ const testHandler = new TestDeepSeekHandler(mockOptions)
|
|
|
|
|
+
|
|
|
|
|
+ const usage = {
|
|
|
|
|
+ inputTokens: 100,
|
|
|
|
|
+ outputTokens: 50,
|
|
|
|
|
+ details: {
|
|
|
|
|
+ cachedInputTokens: 25,
|
|
|
|
|
+ reasoningTokens: 30,
|
|
|
},
|
|
},
|
|
|
}
|
|
}
|
|
|
|
|
|
|
@@ -361,24 +354,24 @@ describe("DeepSeekHandler", () => {
|
|
|
expect(result.type).toBe("usage")
|
|
expect(result.type).toBe("usage")
|
|
|
expect(result.inputTokens).toBe(100)
|
|
expect(result.inputTokens).toBe(100)
|
|
|
expect(result.outputTokens).toBe(50)
|
|
expect(result.outputTokens).toBe(50)
|
|
|
- expect(result.cacheWriteTokens).toBe(80)
|
|
|
|
|
- expect(result.cacheReadTokens).toBe(20)
|
|
|
|
|
|
|
+ expect(result.cacheReadTokens).toBe(25) // from details.cachedInputTokens
|
|
|
|
|
+ expect(result.cacheWriteTokens).toBeUndefined()
|
|
|
|
|
+ expect(result.reasoningTokens).toBe(30)
|
|
|
})
|
|
})
|
|
|
|
|
|
|
|
it("should handle missing cache metrics gracefully", () => {
|
|
it("should handle missing cache metrics gracefully", () => {
|
|
|
class TestDeepSeekHandler extends DeepSeekHandler {
|
|
class TestDeepSeekHandler extends DeepSeekHandler {
|
|
|
- public testProcessUsageMetrics(usage: any) {
|
|
|
|
|
- return this.processUsageMetrics(usage)
|
|
|
|
|
|
|
+ public testProcessUsageMetrics(usage: any, providerMetadata?: any) {
|
|
|
|
|
+ return this.processUsageMetrics(usage, providerMetadata)
|
|
|
}
|
|
}
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
const testHandler = new TestDeepSeekHandler(mockOptions)
|
|
const testHandler = new TestDeepSeekHandler(mockOptions)
|
|
|
|
|
|
|
|
const usage = {
|
|
const usage = {
|
|
|
- prompt_tokens: 100,
|
|
|
|
|
- completion_tokens: 50,
|
|
|
|
|
- total_tokens: 150,
|
|
|
|
|
- // No prompt_tokens_details
|
|
|
|
|
|
|
+ inputTokens: 100,
|
|
|
|
|
+ outputTokens: 50,
|
|
|
|
|
+ // No details or providerMetadata
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
const result = testHandler.testProcessUsageMetrics(usage)
|
|
const result = testHandler.testProcessUsageMetrics(usage)
|
|
@@ -391,7 +384,7 @@ describe("DeepSeekHandler", () => {
|
|
|
})
|
|
})
|
|
|
})
|
|
})
|
|
|
|
|
|
|
|
- describe("interleaved thinking mode", () => {
|
|
|
|
|
|
|
+ describe("reasoning content with deepseek-reasoner", () => {
|
|
|
const systemPrompt = "You are a helpful assistant."
|
|
const systemPrompt = "You are a helpful assistant."
|
|
|
const messages: Anthropic.Messages.MessageParam[] = [
|
|
const messages: Anthropic.Messages.MessageParam[] = [
|
|
|
{
|
|
{
|
|
@@ -405,12 +398,41 @@ describe("DeepSeekHandler", () => {
|
|
|
},
|
|
},
|
|
|
]
|
|
]
|
|
|
|
|
|
|
|
- it("should handle reasoning_content in streaming responses for deepseek-reasoner", async () => {
|
|
|
|
|
|
|
+ it("should handle reasoning content in streaming responses for deepseek-reasoner", async () => {
|
|
|
const reasonerHandler = new DeepSeekHandler({
|
|
const reasonerHandler = new DeepSeekHandler({
|
|
|
...mockOptions,
|
|
...mockOptions,
|
|
|
apiModelId: "deepseek-reasoner",
|
|
apiModelId: "deepseek-reasoner",
|
|
|
})
|
|
})
|
|
|
|
|
|
|
|
|
|
+ // Mock the fullStream async generator with reasoning content
|
|
|
|
|
+ // Note: processAiSdkStreamPart expects 'text' property for reasoning type
|
|
|
|
|
+ async function* mockFullStream() {
|
|
|
|
|
+ yield { type: "reasoning", text: "Let me think about this..." }
|
|
|
|
|
+ yield { type: "reasoning", text: " I'll analyze step by step." }
|
|
|
|
|
+ yield { type: "text-delta", text: "Test response" }
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ const mockUsage = Promise.resolve({
|
|
|
|
|
+ inputTokens: 10,
|
|
|
|
|
+ outputTokens: 5,
|
|
|
|
|
+ details: {
|
|
|
|
|
+ reasoningTokens: 15,
|
|
|
|
|
+ },
|
|
|
|
|
+ })
|
|
|
|
|
+
|
|
|
|
|
+ const mockProviderMetadata = Promise.resolve({
|
|
|
|
|
+ deepseek: {
|
|
|
|
|
+ promptCacheHitTokens: 2,
|
|
|
|
|
+ promptCacheMissTokens: 8,
|
|
|
|
|
+ },
|
|
|
|
|
+ })
|
|
|
|
|
+
|
|
|
|
|
+ mockStreamText.mockReturnValue({
|
|
|
|
|
+ fullStream: mockFullStream(),
|
|
|
|
|
+ usage: mockUsage,
|
|
|
|
|
+ providerMetadata: mockProviderMetadata,
|
|
|
|
|
+ })
|
|
|
|
|
+
|
|
|
const stream = reasonerHandler.createMessage(systemPrompt, messages)
|
|
const stream = reasonerHandler.createMessage(systemPrompt, messages)
|
|
|
const chunks: any[] = []
|
|
const chunks: any[] = []
|
|
|
for await (const chunk of stream) {
|
|
for await (const chunk of stream) {
|
|
@@ -419,54 +441,91 @@ describe("DeepSeekHandler", () => {
|
|
|
|
|
|
|
|
// Should have reasoning chunks
|
|
// Should have reasoning chunks
|
|
|
const reasoningChunks = chunks.filter((chunk) => chunk.type === "reasoning")
|
|
const reasoningChunks = chunks.filter((chunk) => chunk.type === "reasoning")
|
|
|
- expect(reasoningChunks.length).toBeGreaterThan(0)
|
|
|
|
|
|
|
+ expect(reasoningChunks.length).toBe(2)
|
|
|
expect(reasoningChunks[0].text).toBe("Let me think about this...")
|
|
expect(reasoningChunks[0].text).toBe("Let me think about this...")
|
|
|
expect(reasoningChunks[1].text).toBe(" I'll analyze step by step.")
|
|
expect(reasoningChunks[1].text).toBe(" I'll analyze step by step.")
|
|
|
|
|
+
|
|
|
|
|
+ // Should also have text chunks
|
|
|
|
|
+ const textChunks = chunks.filter((chunk) => chunk.type === "text")
|
|
|
|
|
+ expect(textChunks.length).toBe(1)
|
|
|
|
|
+ expect(textChunks[0].text).toBe("Test response")
|
|
|
})
|
|
})
|
|
|
|
|
|
|
|
- it("should pass thinking parameter for deepseek-reasoner model", async () => {
|
|
|
|
|
|
|
+ it("should include reasoningTokens in usage for deepseek-reasoner", async () => {
|
|
|
const reasonerHandler = new DeepSeekHandler({
|
|
const reasonerHandler = new DeepSeekHandler({
|
|
|
...mockOptions,
|
|
...mockOptions,
|
|
|
apiModelId: "deepseek-reasoner",
|
|
apiModelId: "deepseek-reasoner",
|
|
|
})
|
|
})
|
|
|
|
|
|
|
|
- const stream = reasonerHandler.createMessage(systemPrompt, messages)
|
|
|
|
|
- for await (const _chunk of stream) {
|
|
|
|
|
- // Consume the stream
|
|
|
|
|
|
|
+ async function* mockFullStream() {
|
|
|
|
|
+ yield { type: "reasoning", text: "Thinking..." }
|
|
|
|
|
+ yield { type: "text-delta", text: "Answer" }
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
- // Verify that the thinking parameter was passed to the API
|
|
|
|
|
- // Note: mockCreate receives two arguments - request options and path options
|
|
|
|
|
- expect(mockCreate).toHaveBeenCalledWith(
|
|
|
|
|
- expect.objectContaining({
|
|
|
|
|
- thinking: { type: "enabled" },
|
|
|
|
|
- }),
|
|
|
|
|
- {}, // Empty path options for non-Azure URLs
|
|
|
|
|
- )
|
|
|
|
|
- })
|
|
|
|
|
|
|
+ const mockUsage = Promise.resolve({
|
|
|
|
|
+ inputTokens: 10,
|
|
|
|
|
+ outputTokens: 5,
|
|
|
|
|
+ details: {
|
|
|
|
|
+ reasoningTokens: 15,
|
|
|
|
|
+ },
|
|
|
|
|
+ })
|
|
|
|
|
|
|
|
- it("should NOT pass thinking parameter for deepseek-chat model", async () => {
|
|
|
|
|
- const chatHandler = new DeepSeekHandler({
|
|
|
|
|
- ...mockOptions,
|
|
|
|
|
- apiModelId: "deepseek-chat",
|
|
|
|
|
|
|
+ const mockProviderMetadata = Promise.resolve({})
|
|
|
|
|
+
|
|
|
|
|
+ mockStreamText.mockReturnValue({
|
|
|
|
|
+ fullStream: mockFullStream(),
|
|
|
|
|
+ usage: mockUsage,
|
|
|
|
|
+ providerMetadata: mockProviderMetadata,
|
|
|
})
|
|
})
|
|
|
|
|
|
|
|
- const stream = chatHandler.createMessage(systemPrompt, messages)
|
|
|
|
|
- for await (const _chunk of stream) {
|
|
|
|
|
- // Consume the stream
|
|
|
|
|
|
|
+ const stream = reasonerHandler.createMessage(systemPrompt, messages)
|
|
|
|
|
+ const chunks: any[] = []
|
|
|
|
|
+ for await (const chunk of stream) {
|
|
|
|
|
+ chunks.push(chunk)
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
- // Verify that the thinking parameter was NOT passed to the API
|
|
|
|
|
- const callArgs = mockCreate.mock.calls[0][0]
|
|
|
|
|
- expect(callArgs.thinking).toBeUndefined()
|
|
|
|
|
|
|
+ const usageChunks = chunks.filter((chunk) => chunk.type === "usage")
|
|
|
|
|
+ expect(usageChunks.length).toBe(1)
|
|
|
|
|
+ expect(usageChunks[0].reasoningTokens).toBe(15)
|
|
|
})
|
|
})
|
|
|
|
|
|
|
|
- it("should handle tool calls with reasoning_content", async () => {
|
|
|
|
|
|
|
+ it("should handle tool calls with reasoning content", async () => {
|
|
|
const reasonerHandler = new DeepSeekHandler({
|
|
const reasonerHandler = new DeepSeekHandler({
|
|
|
...mockOptions,
|
|
...mockOptions,
|
|
|
apiModelId: "deepseek-reasoner",
|
|
apiModelId: "deepseek-reasoner",
|
|
|
})
|
|
})
|
|
|
|
|
|
|
|
|
|
+ // Mock stream with reasoning followed by tool call via streaming events
|
|
|
|
|
+ // (tool-input-start/delta/end, NOT tool-call which is ignored to prevent duplicates)
|
|
|
|
|
+ async function* mockFullStream() {
|
|
|
|
|
+ yield { type: "reasoning", text: "Let me think about this..." }
|
|
|
|
|
+ yield { type: "reasoning", text: " I'll analyze step by step." }
|
|
|
|
|
+ yield { type: "tool-input-start", id: "call_123", toolName: "get_weather" }
|
|
|
|
|
+ yield { type: "tool-input-delta", id: "call_123", delta: '{"location":"SF"}' }
|
|
|
|
|
+ yield { type: "tool-input-end", id: "call_123" }
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ const mockUsage = Promise.resolve({
|
|
|
|
|
+ inputTokens: 10,
|
|
|
|
|
+ outputTokens: 5,
|
|
|
|
|
+ details: {
|
|
|
|
|
+ reasoningTokens: 15,
|
|
|
|
|
+ },
|
|
|
|
|
+ })
|
|
|
|
|
+
|
|
|
|
|
+ const mockProviderMetadata = Promise.resolve({
|
|
|
|
|
+ deepseek: {
|
|
|
|
|
+ promptCacheHitTokens: 2,
|
|
|
|
|
+ promptCacheMissTokens: 8,
|
|
|
|
|
+ },
|
|
|
|
|
+ })
|
|
|
|
|
+
|
|
|
|
|
+ mockStreamText.mockReturnValue({
|
|
|
|
|
+ fullStream: mockFullStream(),
|
|
|
|
|
+ usage: mockUsage,
|
|
|
|
|
+ providerMetadata: mockProviderMetadata,
|
|
|
|
|
+ })
|
|
|
|
|
+
|
|
|
const tools: any[] = [
|
|
const tools: any[] = [
|
|
|
{
|
|
{
|
|
|
type: "function",
|
|
type: "function",
|
|
@@ -486,12 +545,239 @@ describe("DeepSeekHandler", () => {
|
|
|
|
|
|
|
|
// Should have reasoning chunks
|
|
// Should have reasoning chunks
|
|
|
const reasoningChunks = chunks.filter((chunk) => chunk.type === "reasoning")
|
|
const reasoningChunks = chunks.filter((chunk) => chunk.type === "reasoning")
|
|
|
- expect(reasoningChunks.length).toBeGreaterThan(0)
|
|
|
|
|
|
|
+ expect(reasoningChunks.length).toBe(2)
|
|
|
|
|
+
|
|
|
|
|
+ // Should have tool call streaming chunks (start/delta/end, NOT tool_call)
|
|
|
|
|
+ const toolCallStartChunks = chunks.filter((chunk) => chunk.type === "tool_call_start")
|
|
|
|
|
+ expect(toolCallStartChunks.length).toBe(1)
|
|
|
|
|
+ expect(toolCallStartChunks[0].name).toBe("get_weather")
|
|
|
|
|
+ })
|
|
|
|
|
+ })
|
|
|
|
|
+
|
|
|
|
|
+ describe("tool handling", () => {
|
|
|
|
|
+ const systemPrompt = "You are a helpful assistant."
|
|
|
|
|
+ const messages: Anthropic.Messages.MessageParam[] = [
|
|
|
|
|
+ {
|
|
|
|
|
+ role: "user",
|
|
|
|
|
+ content: [{ type: "text" as const, text: "Hello!" }],
|
|
|
|
|
+ },
|
|
|
|
|
+ ]
|
|
|
|
|
+
|
|
|
|
|
+ it("should handle tool calls in streaming", async () => {
|
|
|
|
|
+ async function* mockFullStream() {
|
|
|
|
|
+ yield {
|
|
|
|
|
+ type: "tool-input-start",
|
|
|
|
|
+ id: "tool-call-1",
|
|
|
|
|
+ toolName: "read_file",
|
|
|
|
|
+ }
|
|
|
|
|
+ yield {
|
|
|
|
|
+ type: "tool-input-delta",
|
|
|
|
|
+ id: "tool-call-1",
|
|
|
|
|
+ delta: '{"path":"test.ts"}',
|
|
|
|
|
+ }
|
|
|
|
|
+ yield {
|
|
|
|
|
+ type: "tool-input-end",
|
|
|
|
|
+ id: "tool-call-1",
|
|
|
|
|
+ }
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ const mockUsage = Promise.resolve({
|
|
|
|
|
+ inputTokens: 10,
|
|
|
|
|
+ outputTokens: 5,
|
|
|
|
|
+ })
|
|
|
|
|
+
|
|
|
|
|
+ const mockProviderMetadata = Promise.resolve({})
|
|
|
|
|
+
|
|
|
|
|
+ mockStreamText.mockReturnValue({
|
|
|
|
|
+ fullStream: mockFullStream(),
|
|
|
|
|
+ usage: mockUsage,
|
|
|
|
|
+ providerMetadata: mockProviderMetadata,
|
|
|
|
|
+ })
|
|
|
|
|
+
|
|
|
|
|
+ const stream = handler.createMessage(systemPrompt, messages, {
|
|
|
|
|
+ taskId: "test-task",
|
|
|
|
|
+ tools: [
|
|
|
|
|
+ {
|
|
|
|
|
+ type: "function",
|
|
|
|
|
+ function: {
|
|
|
|
|
+ name: "read_file",
|
|
|
|
|
+ description: "Read a file",
|
|
|
|
|
+ parameters: {
|
|
|
|
|
+ type: "object",
|
|
|
|
|
+ properties: { path: { type: "string" } },
|
|
|
|
|
+ required: ["path"],
|
|
|
|
|
+ },
|
|
|
|
|
+ },
|
|
|
|
|
+ },
|
|
|
|
|
+ ],
|
|
|
|
|
+ })
|
|
|
|
|
+
|
|
|
|
|
+ const chunks: any[] = []
|
|
|
|
|
+ for await (const chunk of stream) {
|
|
|
|
|
+ chunks.push(chunk)
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ const toolCallStartChunks = chunks.filter((c) => c.type === "tool_call_start")
|
|
|
|
|
+ const toolCallDeltaChunks = chunks.filter((c) => c.type === "tool_call_delta")
|
|
|
|
|
+ const toolCallEndChunks = chunks.filter((c) => c.type === "tool_call_end")
|
|
|
|
|
+
|
|
|
|
|
+ expect(toolCallStartChunks.length).toBe(1)
|
|
|
|
|
+ expect(toolCallStartChunks[0].id).toBe("tool-call-1")
|
|
|
|
|
+ expect(toolCallStartChunks[0].name).toBe("read_file")
|
|
|
|
|
+
|
|
|
|
|
+ expect(toolCallDeltaChunks.length).toBe(1)
|
|
|
|
|
+ expect(toolCallDeltaChunks[0].delta).toBe('{"path":"test.ts"}')
|
|
|
|
|
+
|
|
|
|
|
+ expect(toolCallEndChunks.length).toBe(1)
|
|
|
|
|
+ expect(toolCallEndChunks[0].id).toBe("tool-call-1")
|
|
|
|
|
+ })
|
|
|
|
|
+
|
|
|
|
|
+ it("should ignore tool-call events to prevent duplicate tools in UI", async () => {
|
|
|
|
|
+ // tool-call events are intentionally ignored because tool-input-start/delta/end
|
|
|
|
|
+ // already provide complete tool call information. Emitting tool-call would cause
|
|
|
|
|
+ // duplicate tools in the UI for AI SDK providers (e.g., DeepSeek, Moonshot).
|
|
|
|
|
+ async function* mockFullStream() {
|
|
|
|
|
+ yield {
|
|
|
|
|
+ type: "tool-call",
|
|
|
|
|
+ toolCallId: "tool-call-1",
|
|
|
|
|
+ toolName: "read_file",
|
|
|
|
|
+ input: { path: "test.ts" },
|
|
|
|
|
+ }
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ const mockUsage = Promise.resolve({
|
|
|
|
|
+ inputTokens: 10,
|
|
|
|
|
+ outputTokens: 5,
|
|
|
|
|
+ })
|
|
|
|
|
+
|
|
|
|
|
+ const mockProviderMetadata = Promise.resolve({})
|
|
|
|
|
+
|
|
|
|
|
+ mockStreamText.mockReturnValue({
|
|
|
|
|
+ fullStream: mockFullStream(),
|
|
|
|
|
+ usage: mockUsage,
|
|
|
|
|
+ providerMetadata: mockProviderMetadata,
|
|
|
|
|
+ })
|
|
|
|
|
+
|
|
|
|
|
+ const stream = handler.createMessage(systemPrompt, messages, {
|
|
|
|
|
+ taskId: "test-task",
|
|
|
|
|
+ tools: [
|
|
|
|
|
+ {
|
|
|
|
|
+ type: "function",
|
|
|
|
|
+ function: {
|
|
|
|
|
+ name: "read_file",
|
|
|
|
|
+ description: "Read a file",
|
|
|
|
|
+ parameters: {
|
|
|
|
|
+ type: "object",
|
|
|
|
|
+ properties: { path: { type: "string" } },
|
|
|
|
|
+ required: ["path"],
|
|
|
|
|
+ },
|
|
|
|
|
+ },
|
|
|
|
|
+ },
|
|
|
|
|
+ ],
|
|
|
|
|
+ })
|
|
|
|
|
+
|
|
|
|
|
+ const chunks: any[] = []
|
|
|
|
|
+ for await (const chunk of stream) {
|
|
|
|
|
+ chunks.push(chunk)
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ // tool-call events are ignored, so no tool_call chunks should be emitted
|
|
|
|
|
+ const toolCallChunks = chunks.filter((c) => c.type === "tool_call")
|
|
|
|
|
+ expect(toolCallChunks.length).toBe(0)
|
|
|
|
|
+ })
|
|
|
|
|
+ })
|
|
|
|
|
+
|
|
|
|
|
+ describe("getMaxOutputTokens", () => {
|
|
|
|
|
+ it("should return maxTokens from model info", () => {
|
|
|
|
|
+ class TestDeepSeekHandler extends DeepSeekHandler {
|
|
|
|
|
+ public testGetMaxOutputTokens() {
|
|
|
|
|
+ return this.getMaxOutputTokens()
|
|
|
|
|
+ }
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ const testHandler = new TestDeepSeekHandler(mockOptions)
|
|
|
|
|
+ const result = testHandler.testGetMaxOutputTokens()
|
|
|
|
|
+
|
|
|
|
|
+ // Default model maxTokens is 8192
|
|
|
|
|
+ expect(result).toBe(8192)
|
|
|
|
|
+ })
|
|
|
|
|
+
|
|
|
|
|
+ it("should use modelMaxTokens when provided", () => {
|
|
|
|
|
+ class TestDeepSeekHandler extends DeepSeekHandler {
|
|
|
|
|
+ public testGetMaxOutputTokens() {
|
|
|
|
|
+ return this.getMaxOutputTokens()
|
|
|
|
|
+ }
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ const customMaxTokens = 5000
|
|
|
|
|
+ const testHandler = new TestDeepSeekHandler({
|
|
|
|
|
+ ...mockOptions,
|
|
|
|
|
+ modelMaxTokens: customMaxTokens,
|
|
|
|
|
+ })
|
|
|
|
|
+
|
|
|
|
|
+ const result = testHandler.testGetMaxOutputTokens()
|
|
|
|
|
+ expect(result).toBe(customMaxTokens)
|
|
|
|
|
+ })
|
|
|
|
|
+
|
|
|
|
|
+ it("should fall back to modelInfo.maxTokens when modelMaxTokens is not provided", () => {
|
|
|
|
|
+ class TestDeepSeekHandler extends DeepSeekHandler {
|
|
|
|
|
+ public testGetMaxOutputTokens() {
|
|
|
|
|
+ return this.getMaxOutputTokens()
|
|
|
|
|
+ }
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ const testHandler = new TestDeepSeekHandler(mockOptions)
|
|
|
|
|
+ const result = testHandler.testGetMaxOutputTokens()
|
|
|
|
|
+
|
|
|
|
|
+ // deepseek-chat has maxTokens of 8192
|
|
|
|
|
+ expect(result).toBe(8192)
|
|
|
|
|
+ })
|
|
|
|
|
+ })
|
|
|
|
|
+
|
|
|
|
|
+ describe("mapToolChoice", () => {
|
|
|
|
|
+ it("should handle string tool choices", () => {
|
|
|
|
|
+ class TestDeepSeekHandler extends DeepSeekHandler {
|
|
|
|
|
+ public testMapToolChoice(toolChoice: any) {
|
|
|
|
|
+ return this.mapToolChoice(toolChoice)
|
|
|
|
|
+ }
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ const testHandler = new TestDeepSeekHandler(mockOptions)
|
|
|
|
|
+
|
|
|
|
|
+ expect(testHandler.testMapToolChoice("auto")).toBe("auto")
|
|
|
|
|
+ expect(testHandler.testMapToolChoice("none")).toBe("none")
|
|
|
|
|
+ expect(testHandler.testMapToolChoice("required")).toBe("required")
|
|
|
|
|
+ expect(testHandler.testMapToolChoice("unknown")).toBe("auto")
|
|
|
|
|
+ })
|
|
|
|
|
+
|
|
|
|
|
+ it("should handle object tool choice with function name", () => {
|
|
|
|
|
+ class TestDeepSeekHandler extends DeepSeekHandler {
|
|
|
|
|
+ public testMapToolChoice(toolChoice: any) {
|
|
|
|
|
+ return this.mapToolChoice(toolChoice)
|
|
|
|
|
+ }
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ const testHandler = new TestDeepSeekHandler(mockOptions)
|
|
|
|
|
+
|
|
|
|
|
+ const result = testHandler.testMapToolChoice({
|
|
|
|
|
+ type: "function",
|
|
|
|
|
+ function: { name: "my_tool" },
|
|
|
|
|
+ })
|
|
|
|
|
+
|
|
|
|
|
+ expect(result).toEqual({ type: "tool", toolName: "my_tool" })
|
|
|
|
|
+ })
|
|
|
|
|
+
|
|
|
|
|
+ it("should return undefined for null or undefined", () => {
|
|
|
|
|
+ class TestDeepSeekHandler extends DeepSeekHandler {
|
|
|
|
|
+ public testMapToolChoice(toolChoice: any) {
|
|
|
|
|
+ return this.mapToolChoice(toolChoice)
|
|
|
|
|
+ }
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ const testHandler = new TestDeepSeekHandler(mockOptions)
|
|
|
|
|
|
|
|
- // Should have tool call chunks
|
|
|
|
|
- const toolCallChunks = chunks.filter((chunk) => chunk.type === "tool_call_partial")
|
|
|
|
|
- expect(toolCallChunks.length).toBeGreaterThan(0)
|
|
|
|
|
- expect(toolCallChunks[0].name).toBe("get_weather")
|
|
|
|
|
|
|
+ expect(testHandler.testMapToolChoice(null)).toBeUndefined()
|
|
|
|
|
+ expect(testHandler.testMapToolChoice(undefined)).toBeUndefined()
|
|
|
})
|
|
})
|
|
|
})
|
|
})
|
|
|
})
|
|
})
|