| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130 |
- import type { AssistantModelMessage } from "ai"
- export type ApiStream = AsyncGenerator<ApiStreamChunk>
- export type ApiStreamChunk =
- | ApiStreamTextChunk
- | ApiStreamUsageChunk
- | ApiStreamReasoningChunk
- | ApiStreamThinkingCompleteChunk
- | ApiStreamGroundingChunk
- | ApiStreamToolCallChunk
- | ApiStreamToolCallStartChunk
- | ApiStreamToolCallDeltaChunk
- | ApiStreamToolCallEndChunk
- | ApiStreamToolCallPartialChunk
- | ApiStreamResponseMessageChunk
- | ApiStreamError
- export interface ApiStreamError {
- type: "error"
- error: string
- message: string
- }
- export interface ApiStreamTextChunk {
- type: "text"
- text: string
- }
- /**
- * Reasoning/thinking chunk from the API stream.
- * For Anthropic extended thinking, this may include a signature field
- * which is required for passing thinking blocks back to the API during tool use.
- */
- export interface ApiStreamReasoningChunk {
- type: "reasoning"
- text: string
- /**
- * Signature for the thinking block (Anthropic extended thinking).
- * When present, this indicates a complete thinking block that should be
- * preserved for tool use continuations. The signature is used to verify
- * that thinking blocks were generated by Claude.
- */
- signature?: string
- }
- /**
- * Signals completion of a thinking block with its verification signature.
- * Used by Anthropic extended thinking to pass the signature needed for
- * tool use continuations and caching.
- */
- export interface ApiStreamThinkingCompleteChunk {
- type: "thinking_complete"
- /**
- * Cryptographic signature that verifies this thinking block was generated by Claude.
- * Must be preserved and passed back to the API when continuing conversations with tool use.
- */
- signature: string
- }
- export interface ApiStreamUsageChunk {
- type: "usage"
- inputTokens: number
- outputTokens: number
- cacheWriteTokens?: number
- cacheReadTokens?: number
- reasoningTokens?: number
- totalCost?: number
- /** Total input tokens including cache read/write tokens. Each provider computes this directly. */
- totalInputTokens?: number
- /** Total output tokens. Each provider computes this directly. */
- totalOutputTokens?: number
- }
- export interface ApiStreamGroundingChunk {
- type: "grounding"
- sources: GroundingSource[]
- }
- export interface ApiStreamToolCallChunk {
- type: "tool_call"
- id: string
- name: string
- arguments: string
- }
- export interface ApiStreamToolCallStartChunk {
- type: "tool_call_start"
- id: string
- name: string
- }
- export interface ApiStreamToolCallDeltaChunk {
- type: "tool_call_delta"
- id: string
- delta: string
- }
- export interface ApiStreamToolCallEndChunk {
- type: "tool_call_end"
- id: string
- }
- /**
- * Raw tool call chunk from the API stream.
- * Providers emit this simple format; NativeToolCallParser handles all state management
- * (tracking, buffering, emitting start/delta/end events).
- */
- export interface ApiStreamToolCallPartialChunk {
- type: "tool_call_partial"
- index: number
- id?: string
- name?: string
- arguments?: string
- }
- /**
- * Carries the fully-formed assistant message from the AI SDK's `result.response.messages`.
- * Yielded after streaming completes so Task.ts can store it directly without manual reconstruction.
- */
- export interface ApiStreamResponseMessageChunk {
- type: "response_message"
- message: AssistantModelMessage
- }
- export interface GroundingSource {
- title: string
- url: string
- snippet?: string
- }
|