Browse Source

feat: add adapter to convert the reasponse object or chatCompletion object to the message type required by dialogue

林艳 2 weeks ago
parent
commit
63cb5c7bc4
23 changed files with 1638 additions and 51 deletions
  1. 6 1
      packages/semi-foundation/aiChatDialogue/aiChatDialogue.scss
  2. 13 3
      packages/semi-foundation/aiChatDialogue/constants.ts
  3. 46 0
      packages/semi-foundation/aiChatDialogue/dataAdapter/aiChatInputContentToMessage.ts
  4. 82 0
      packages/semi-foundation/aiChatDialogue/dataAdapter/chatCompletionToMessage.ts
  5. 12 0
      packages/semi-foundation/aiChatDialogue/dataAdapter/index.ts
  6. 215 0
      packages/semi-foundation/aiChatDialogue/dataAdapter/interface.ts
  7. 15 0
      packages/semi-foundation/aiChatDialogue/dataAdapter/responseToMessage.ts
  8. 129 0
      packages/semi-foundation/aiChatDialogue/dataAdapter/streamingChatCompletionToMessage.ts
  9. 512 0
      packages/semi-foundation/aiChatDialogue/dataAdapter/streamingResponseToMessage.ts
  10. 5 4
      packages/semi-foundation/aiChatDialogue/foundation.ts
  11. 51 0
      packages/semi-ui/aiChatDialogue/_story/Data/ChatCompletionData.js
  12. 114 0
      packages/semi-ui/aiChatDialogue/_story/Data/ResponseData.js
  13. 24 0
      packages/semi-ui/aiChatDialogue/_story/Data/StreamingChatCompletion.js
  14. 16 0
      packages/semi-ui/aiChatDialogue/_story/Data/StreamingResponseData.js
  15. 1 0
      packages/semi-ui/aiChatDialogue/_story/DataAdapter/aiChatInputContentToMessage.jsx
  16. 49 0
      packages/semi-ui/aiChatDialogue/_story/DataAdapter/chatCompletionToMessage.jsx
  17. 49 0
      packages/semi-ui/aiChatDialogue/_story/DataAdapter/responseToMessage.jsx
  18. 73 0
      packages/semi-ui/aiChatDialogue/_story/DataAdapter/streamingChatCompletionToMessage.jsx
  19. 143 0
      packages/semi-ui/aiChatDialogue/_story/DataAdapter/streamingResponseToMessage.jsx
  20. 32 20
      packages/semi-ui/aiChatDialogue/_story/aiChatDialogue.stories.jsx
  21. 20 5
      packages/semi-ui/aiChatDialogue/widgets/contentItem/annotation.tsx
  22. 30 18
      packages/semi-ui/aiChatDialogue/widgets/dialogueContent.tsx
  23. 1 0
      packages/semi-ui/index.ts

+ 6 - 1
packages/semi-foundation/aiChatDialogue/aiChatDialogue.scss

@@ -261,7 +261,7 @@ $module: #{$prefix}-ai-chat-dialogue;
         }
 
         &-tool-call {
-            display: inline-flex;
+            display: flex;
             color: $color-aiChatDialogue_tool_call-text;
             padding: $spacing-aiChatDialogue_tool_call-paddingY $spacing-aiChatDialogue_tool_call-paddingX;
             background-color: $color-aiChatDialogue_tool_call-bg;
@@ -466,6 +466,11 @@ $module: #{$prefix}-ai-chat-dialogue;
                 display: flex;
             }
 
+            &-logo-renderMore {
+                width: $width-aiChatDialogue_annotation_logo;
+                height: $height-aiChatDialogue_annotation_logo;
+            }
+
             &-icon {
                 @include all-center;
             }

+ 13 - 3
packages/semi-foundation/aiChatDialogue/constants.ts

@@ -13,7 +13,7 @@ const cssClasses = {
     PREFIX_HINT: `${BASE_CLASS_PREFIX}-ai-chat-dialogue-hint`,
 };
 
-export const ROLE = {
+const ROLE = {
     USER: 'user',
     ASSISTANT: 'assistant',
     SYSTEM: 'system',
@@ -25,7 +25,7 @@ const DIALOGUE_ALIGN = {
 };
 
 const STATUS = {
-    QUEUE: 'queue',
+    QUEUED: 'queued',
     IN_PROGRESS: 'in_progress',
     INCOMPLETE: 'incomplete',
     COMPLETED: 'completed',
@@ -65,13 +65,23 @@ const MESSAGE_ITEM_TYPE = {
     ITEM_REFERENCE: 'item_reference',
 };
 
+const FINISH_REASON = {
+    STOP: 'stop',
+    LENGTH: 'length',
+    CONTENT_FILTER: 'content_filter',
+    TOOL_CALLS: 'tool_calls',
+    FUNCTION_CALL: 'function_call'
+};
+
+
 const strings = {
     STATUS,
     ROLE,
     DIALOGUE_ALIGN,
     MODE,
     SEND_HOT_KEY,
-    MESSAGE_ITEM_TYPE
+    MESSAGE_ITEM_TYPE,
+    FINISH_REASON
 };
 
 export { cssClasses, strings };

+ 46 - 0
packages/semi-foundation/aiChatDialogue/dataAdapter/aiChatInputContentToMessage.ts

@@ -0,0 +1,46 @@
+
+
+export default function AiChatInputContentToMessage(inputContent: any) { // todo: 合入 aiChatInput 后补充类型定义
+    const { references, attachments, inputContents, setup } = inputContent;
+
+    let inputs: any[] = [];
+
+    if (attachments?.length) { // todo: attachment 允许传递目录?
+        attachments.forEach((item: any) => {
+            const { name, url } = item;
+            // todo: 如何区分文件是 image 还是 file?
+            if (name.includes('.png') || name.includes('.jpg') || name.includes('.jpeg')) {
+                inputs.push({
+                    type: 'input_image',
+                    image_url: url,
+                    detail: 'auto'
+                });
+            } else {
+                inputs.push({
+                    type: 'input_file',
+                    file_url: url, // todo: blob URL?
+                    name: name,
+                });
+            }
+        });
+    }
+
+    if (inputContents?.length) {
+        inputContents.forEach((item: any) => {
+            inputs.push({
+                type: 'input_text',
+                text: item.text,
+            });
+        });
+    }
+
+    // todo: mcp 
+
+    return {
+        role: "user",
+        content: inputs,
+        // createdAt: created_at, // todo: 产生消息时给 createdat 还是发送时?
+        // model: model, // todo: inputContent 中未包含 model 信息
+        references
+    };
+}

+ 82 - 0
packages/semi-foundation/aiChatDialogue/dataAdapter/chatCompletionToMessage.ts

@@ -0,0 +1,82 @@
+import { ChatCompletion, Choice, ToolCalls, FunctionToolCall, CustomToolCall } from './interface';
+/* 
+Chat Completion VS. Response
+- The former only have content、refusal、function_call、tool_calls; 
+- The former annotations belongs to content;
+- The former function_call and tool_calls do not have call_id and status;
+*/
+export default function chatCompletionToMessage(chatCompletion: ChatCompletion) {
+    return chatCompletion.choices.map((choice: Choice) => {
+        const message = choice.message;
+        const role = message.role;
+        const id = chatCompletion.id;
+        const status = 'completed'; 
+
+        const outputResult = [];
+        
+        // processing text and refusal
+        if (message.content !== '' || message.refusal !== '') {
+            const annotations = (message.annotations?.length
+                ? message.annotations.map((annotation) => ({
+                    type: annotation.type,
+                    ...(annotation.url_citation || {}),
+                }))
+                : []);
+
+            const outputMessage = [
+                message.content !== '' && {
+                    type: 'output_text',
+                    text: message.content,
+                    annotations,
+                },
+                message.refusal !== '' && {
+                    type: 'refusal',
+                    refusal: message.refusal,
+                },
+            ].filter(Boolean);
+
+            outputResult.push({
+                type: 'message',
+                id: id,
+                role: 'assistant',
+                status: status,
+                content: outputMessage
+            });
+        }
+
+        // processing function call
+        if (message.function_call) {
+            outputResult.push({
+                ...message.function_call,
+                type: 'function_call',
+                status: 'completed',
+            });
+        }
+
+        // processing tool calls
+        if (message?.tool_calls?.length) {
+            const toolCalls = message.tool_calls.map((toolCall: ToolCalls) => {
+                if (toolCall.type === 'function') {
+                    return {
+                        status: 'completed',
+                        ...(toolCall as FunctionToolCall).function,
+                        type: 'function_call',
+                        // todo: call_id?
+                    };
+                }
+                return {
+                    ...(toolCall as CustomToolCall).custom,
+                    type: 'custom_call',
+                };
+            });
+            outputResult.push(...toolCalls);
+        }
+
+        return {
+            id: id,
+            role: role,
+            content: outputResult,
+            status: status,
+        };
+    });
+}

+ 12 - 0
packages/semi-foundation/aiChatDialogue/dataAdapter/index.ts

@@ -0,0 +1,12 @@
+import responseToMessage from './responseToMessage';
+import chatCompletionToMessage from './chatCompletionToMessage';
+import streamingChatCompletionToMessage from './streamingChatCompletionToMessage';
+import streamingResponseToMessage from './streamingResponseToMessage';
+
+
+export { 
+    chatCompletionToMessage, 
+    streamingChatCompletionToMessage, 
+    responseToMessage, 
+    streamingResponseToMessage 
+};

+ 215 - 0
packages/semi-foundation/aiChatDialogue/dataAdapter/interface.ts

@@ -0,0 +1,215 @@
+import { ContentItem, OutputText, Refusal } from "aiChatDialogue/foundation";
+
+export interface ChatCompletion {
+    id?: string;
+    object?: string;
+    created?: number;
+    model?: string;
+    choices?: Choice[];
+    usage?: Usage;
+    service_tier?: string;
+    system_fingerprint?: string
+}
+
+export interface ChatCompletionChunk {
+    id?: string;
+    object?: string;
+    created?: number;
+    model?: string;
+    choices?: ChoiceChunk[];
+    usage?: Usage;
+    service_tier?: string;
+    system_fingerprint?: string
+}
+
+export interface Choice {
+    index?: number;
+    message?: ChatCompletionMessage;
+    logprobs?: Logprobs;
+    finish_reason?: string
+}
+
+export interface ChoiceChunk {
+    index?: number;
+    delta?: Delta;
+    logprobs?: Logprobs;
+    finish_reason?: string
+}
+
+export interface Delta {
+    role?: string;
+    content?: string;
+    refusal?: string;
+    function_call?: FunctionCall;
+    tool_calls?: FunctionToolCall[]
+}
+
+
+interface ChatCompletionMessage {
+    role?: string;
+    content?: string;
+    refusal?: string;
+    annotations?: ChatCompletionAnnotation[];
+    audio?: Audio;
+    function_call?: FunctionCall;
+    tool_calls?: ToolCalls[]
+}
+
+export type ToolCalls = FunctionToolCall | CustomToolCall 
+
+interface ChatCompletionAnnotation {
+    type?: string;
+    url_citation?: URLCitation
+}
+
+interface URLCitation {
+    end_index?: number;
+    start_index?: number;
+    title?: string;
+    url?: string
+}
+
+interface Audio {
+    data: string;
+    expires_at?: number;
+    id?: string;
+    transcript?: string
+}
+
+interface FunctionCall {
+    name?: string;
+    arguments?: string
+}
+
+interface ToolCall {
+    input?: string;
+    result?: string
+}
+
+export interface FunctionToolCall {
+    id?: string;
+    index?: number;
+    type?: string;
+    function?: FunctionCall
+}
+
+export interface CustomToolCall {
+    id?: string;
+    type?: string;
+    custom?: ToolCall
+}
+
+interface Logprobs {
+    content?: LogprobsContent[];
+    refusal?: LogprobsContent[]
+}
+
+interface LogprobsContent {
+    bytes?: number[];
+    logprob?: number;
+    token?: string;
+    top_logprobs?: LogprobsContent[]
+}
+
+
+interface Usage {
+    prompt_tokens?: number;
+    completion_tokens?: number;
+    total_tokens?: number;
+    completion_tokens_details?: CompletionTokensDetails;
+    prompt_tokens_details?: PromptTokensDetails
+}
+
+interface CompletionTokensDetails {
+    reasoning_tokens?: number;
+    audio_tokens?: number;
+    accepted_prediction_tokens?: number;
+    rejected_prediction_tokens?: number
+}
+
+interface PromptTokensDetails {
+    cached_tokens?: number;
+    audio_tokens?: number
+}
+
+
+export interface Response {
+    id?: string;
+    created_at?: number;
+    error?: ResponseError;
+    incomplete_details?: {
+        reason: string
+    };
+    max_output_tokens?: number;
+    max_tool_calls?: number;
+    model?: string;
+    object?: string;
+    output?: ContentItem;
+    parallel_tool_calls?: boolean;
+    previous_response_id?: string;
+    reasoning?: ResponseReasoning;
+    safety_identifier?: string;
+    status?: string;
+    temperature?: number;
+    top_logprobs?: number;
+    top_p?: number;
+    truncation?: string;
+    [x: string]: any
+}
+
+export interface ResponseError {
+    code?: string;
+    message?: string
+}
+
+interface ResponseReasoning {
+    effort?: string;
+    generate_summary?: string;
+    summary?: string
+}
+
+export interface ResponseChunk {
+    type?: string;
+    response?: Response;
+    sequence_number?: number;
+    output_index?: number;
+    item?: ContentItem;
+    content_index?: number;
+    item_id?: string;
+    part?: OutputText | Refusal | ReasoningText;
+    delta?: string;
+    refusal?: string;
+    name?: string;
+    arguments?: string;
+    summary_index?: number;
+    text?: string;
+    code?: string;
+    message?: string;
+    param?: string;
+    [x: string]: any
+}
+
+interface ReasoningText {
+    text?: string;
+    type?: string
+}
+
+export interface CodeInterpreterCall {
+    code?: string;
+    status?: string;
+    outputs?: {
+        logs?: string;
+        url?: string;
+        type?: string
+    }[];
+    id?: string;
+    container_id?: string;
+    type?: string
+}
+
+export interface ImageGenerationCall {
+    id?: string;
+    result?: string;
+    status?: string;
+    type?: string
+}

+ 15 - 0
packages/semi-foundation/aiChatDialogue/dataAdapter/responseToMessage.ts

@@ -0,0 +1,15 @@
+import { Response } from './interface';
+
+export default function responseToMessage(response: Response) {
+    const { id, model, status, output, output_text, created_at } = response;
+
+    return {
+        id: id,
+        role: "assistant",
+        content: output,
+        createdAt: created_at,
+        output_text: output_text,
+        model: model,
+        status: status,
+    };
+}

+ 129 - 0
packages/semi-foundation/aiChatDialogue/dataAdapter/streamingChatCompletionToMessage.ts

@@ -0,0 +1,129 @@
+import { Message } from 'aiChatDialogue/foundation';
+import { ChatCompletionChunk, FunctionToolCall } from './interface';
+ 
+
+// 状态对象:记录每个请求 id + choice index 已处理的 chunk 数量
+export interface StreamingChatState {
+    processedCountByIndex?: Record<string, number>
+}
+
+export default function streamingChatCompletionToMessage(chatCompletionChunks: ChatCompletionChunk[], state?: StreamingChatState): { messages: Message[]; state?: StreamingChatState } { // There may be N answers, so the return value is a Message array
+
+    const groupedChunks = groupByIndex(chatCompletionChunks);
+
+    const results = groupedChunks.map((chatCompletionChunks: ChatCompletionChunk[], groupIndex: number) => {
+        const id = chatCompletionChunks[0].id;
+        const status = getStatus(chatCompletionChunks);
+        let textContent = '';
+        let refusal = '';
+        let functionCall = { name: '', arguments: '' };
+        let toolCalls = {};
+
+        // 基于 state 增量处理:仅处理新到达的 chunk 片段
+        const stateKey = `${id}:${chatCompletionChunks[0]?.choices?.[0]?.index ?? groupIndex}`;
+        const processedCount = state?.processedCountByIndex?.[stateKey] ?? 0;
+        const start = processedCount > 0 ? Math.min(processedCount, chatCompletionChunks.length) : 0;
+        const chunksToProcess = state ? chatCompletionChunks.slice(start) : chatCompletionChunks;
+
+        // 若提供了 state 且本次没有新增内容,则跳过该 index
+        if (state && chunksToProcess.length === 0) {
+            return null;
+        }
+
+        chunksToProcess.map((chunk: ChatCompletionChunk) => {
+            const delta = chunk.choices[0].delta;
+            if (delta?.content) {
+                textContent += delta.content;
+            }
+            if (delta?.refusal) {
+                refusal += delta.refusal;
+            }
+            if (delta?.function_call) {
+                functionCall.name += delta.function_call.name;
+                functionCall.arguments += delta.function_call.arguments;
+            }
+            if (delta?.tool_calls) {
+                delta?.tool_calls.forEach((toolCall: FunctionToolCall) => {
+                    if (toolCalls[toolCall.id]) {
+                        toolCalls[toolCall.id].name += toolCall.function.name;
+                        toolCalls[toolCall.id].arguments += toolCall.function.arguments;
+                    } else {
+                        toolCalls[toolCall.id] = {
+                            ...(toolCall as FunctionToolCall).function,
+                            type: 'function_call',
+                            status: status,
+                            id: toolCall.id,
+                        };
+                    }
+                });
+            }
+        });
+
+        const toolCallsArray = Object.values(toolCalls) as FunctionToolCall[];
+
+        const outputMessage = [
+            textContent !== '' && {
+                type: 'output_text',
+                text: textContent,
+            },
+            refusal !== '' && {
+                type: 'refusal',
+                refusal: refusal,
+            },
+        ].filter(Boolean);
+
+
+        const outputResult = [
+            outputMessage.length > 0 && {
+                type: 'message',
+                id: id,
+                role: 'assistant',
+                status: status,
+                content: outputMessage
+            },
+            functionCall.name !== '' && {
+                type: 'function_call',
+                status: status,
+                ...functionCall
+            },
+            ...toolCallsArray,
+        ].filter(Boolean);
+
+        // 更新 state:记录该 index 已处理到的 chunk 数量
+        if (state) {
+            if (!state.processedCountByIndex) {
+                state.processedCountByIndex = {};
+            }
+            state.processedCountByIndex[stateKey] = chatCompletionChunks.length;
+        }
+
+        return {
+            id: id,
+            role: "assistant",
+            content: outputResult,
+            status: status,
+        };
+    }).filter(Boolean) as Message[];
+    
+    return {
+        messages: results,
+        state: state
+    };
+}
+
+const groupByIndex = (chatCompletionChunks: ChatCompletionChunk[]) => {
+    const groupedChunks = [];
+    chatCompletionChunks.forEach((chunk) => {
+        const curIndex = chunk.choices[0].index;
+        if (!groupedChunks[curIndex]) {
+            groupedChunks[curIndex] = [];
+        }
+        groupedChunks[curIndex].push(chunk);
+    });
+    return groupedChunks;
+};
+
+const getStatus = (chatCompletionChunks: ChatCompletionChunk[]) => {
+    const lastChunk = chatCompletionChunks[chatCompletionChunks.length - 1];
+    return lastChunk.choices[0].finish_reason !== null ? 'completed' : 'in_progress';
+};

+ 512 - 0
packages/semi-foundation/aiChatDialogue/dataAdapter/streamingResponseToMessage.ts

@@ -0,0 +1,512 @@
+import { ContentItem, OutputMessage, OutputText, Reasoning, Refusal, FunctionToolCall, CustomToolCall, MCPToolCall, Message } from "aiChatDialogue/foundation";
+import { CodeInterpreterCall, ImageGenerationCall, ResponseChunk } from "./interface";
+
+
+
+/**
+ * Incremental reducer for streaming Response chunks with out-of-order handling.
+ * 增量处理流式响应块的归约器,支持无序处理。
+ * 
+ * ## Features / 特性
+ * - Only applies chunks that were not processed before (by sequence_number)
+ *   只处理之前未处理过的块(根据 sequence_number)
+ * 
+ * - Maintains reusable state across calls for incremental processing
+ *   在多次调用之间保持可复用的状态,支持增量处理
+ * 
+ * - Always returns a best-effort Message regardless of missing chunks
+ *   即使有缺失的块,也总是返回尽力而为的消息
+ * 
+ * - Handles out-of-order chunks by buffering and processing in sequence
+ *   通过缓冲机制处理无序到达的块,确保按顺序处理
+ * 
+ * ## Out-of-Order Handling / 无序处理机制
+ * 
+ * 1. **Buffering / 缓冲**
+ *    - All incoming chunks are first added to a buffer
+ *      所有传入的块首先被添加到缓冲区
+ *    - Chunks are stored by their sequence_number as keys
+ *      块按其 sequence_number 作为键存储
+ * 
+ * 2. **Sequential Processing / 顺序处理**
+ *    - Only processes chunks with consecutive sequence numbers
+ *      仅处理具有连续序列号的块
+ *    - Maintains lastProcessedSeq to track the last successfully processed sequence
+ *      维护 lastProcessedSeq 来跟踪最后成功处理的序列号
+ *    - Uses a do-while loop to process all available consecutive chunks
+ *      使用 do-while 循环处理所有可用的连续块
+ * 
+ * 3. **Tolerance Mechanism / 容错机制**
+ *    - If gap between buffered chunks and lastProcessedSeq exceeds MAX_GAP (10)
+ *      如果缓冲块与 lastProcessedSeq 之间的间隙超过 MAX_GAP (10)
+ *    - Assumes missing chunks won't arrive and processes remaining chunks
+ *      则假设缺失的块不会到达,继续处理剩余的块
+ *    - Prevents permanent blocking due to lost chunks
+ *      防止因丢失块而永久阻塞
+ * 
+ * ## Example Scenarios / 示例场景
+ * 
+ * - **Scenario 1 / 场景 1**: In-order arrival / 按序到达
+ *   ```
+ *   Input: [1, 2, 3] → Process: 1, 2, 3 immediately
+ *   输入: [1, 2, 3] → 处理: 立即处理 1, 2, 3
+ *   ```
+ * 
+ * - **Scenario 2 / 场景 2**: Out-of-order arrival / 无序到达
+ *   ```
+ *   Call 1: [1, 3] → Process: 1, Buffer: 3
+ *   Call 2: [2, 4] → Process: 2, 3, 4
+ *   调用 1: [1, 3] → 处理: 1,缓冲: 3
+ *   调用 2: [2, 4] → 处理: 2, 3, 4
+ *   ```
+ * 
+ * - **Scenario 3 / 场景 3**: Missing chunk with recovery / 缺失块并恢复
+ *   ```
+ *   Input: [1, 2, 4, 5, ..., 15] → Process: 1, 2, buffer others
+ *   After gap > 10 → Skip 3, process 4-15
+ *   输入: [1, 2, 4, 5, ..., 15] → 处理: 1, 2,缓冲其他
+ *   间隙 > 10 后 → 跳过 3,处理 4-15
+ *   ```
+ * 
+ * @param chunks - Array of incoming response chunks / 传入的响应块数组
+ * @param prevState - Previous state from last call (for incremental processing) / 上次调用的状态(用于增量处理)
+ * @returns Object containing the accumulated message and next state, or null if no chunks / 包含累积消息和下一个状态的对象,如果没有块则返回 null
+ */
+export default function streamingResponseToMessage(
+    chunks?: any,
+    prevState?: any
+) {
+    if (!chunks?.length) return null;
+
+    // Fast path: If last chunk is response.completed, return the complete response directly
+    // 快速路径:如果最后一个块是 response.completed,直接返回完整响应
+    const tail = chunks[chunks.length - 1];
+    if (tail.type === 'response.completed') {
+        const { response } = tail;
+        const { id, model, status, output, output_text, created_at } = response;
+        const message = {
+            id: id,
+            role: "assistant",
+            content: output,
+            createdAt: created_at,
+            output_text: output_text,
+            model: model,
+            status: status,
+        };
+        return { message, nextState: null };
+    }
+
+    // Initialize or restore state from previous call
+    // 初始化或从上次调用恢复状态
+    const state = prevState
+        ? {
+            // Restore existing state for incremental processing / 恢复现有状态以进行增量处理
+            processedSeq: new Set(prevState.processedSeq),  // Track processed sequence numbers / 跟踪已处理的序列号
+            outputs: new Map(prevState.outputs),            // Accumulated output items / 累积的输出项
+            meta: { ...prevState.meta },                    // Metadata (id, model, status, etc.) / 元数据(id、模型、状态等)
+            error: prevState.error ?? null,                 // Error information if any / 错误信息(如果有)
+            buffer: new Map(prevState.buffer),              // Buffer for out-of-order chunks / 无序块的缓冲区
+            lastProcessedSeq: prevState.lastProcessedSeq ?? -1,  // Last successfully processed sequence / 最后成功处理的序列号
+        }
+        : {
+            // Initialize fresh state / 初始化全新状态
+            processedSeq: new Set<number>(),
+            outputs: new Map<number, ContentItem | null>(),
+            meta: {} as any,
+            error: null as any,
+            buffer: new Map<number, ResponseChunk>(),
+            lastProcessedSeq: -1,  // Start with -1, so first expected sequence is 0 / 从 -1 开始,因此第一个预期序列是 0
+        };
+
+    // Filter out chunks already processed in previous calls / 过滤掉之前调用中已处理的块
+    const incoming: ResponseChunk[] = Array.isArray(chunks) ? chunks : [];
+    const unprocessed = incoming.filter(c => {
+        const seq = c?.sequence_number;
+        return typeof seq !== 'number' || !state.processedSeq.has(seq);
+    });
+
+    // Add unprocessed chunks to buffer / 将未处理的块添加到缓冲区
+    for (const chunk of unprocessed) {
+        const seq = chunk?.sequence_number;
+        if (typeof seq === 'number') {
+            // Store chunk with its sequence number as key / 使用序列号作为键存储块
+            state.buffer.set(seq, chunk);
+        } else {
+            // Handle chunks without sequence_number by assigning a decimal key
+            // 通过分配小数键来处理没有 sequence_number 的块
+            // 
+            // Using +0.5 provides a temporary unique "sequence" for chunks without sequence_number.
+            // This avoids key conflicts with existing integer sequence numbers.
+            // 使用 +0.5 为没有 sequence_number 的块提供一个临时且唯一的"顺序"。
+            // 这样可以避免与现有的整数序列号发生键冲突。
+            // 
+            // Why +0.5 instead of +1?
+            // 为什么使用 +0.5 而不是 +1?
+            // - Using lastProcessedSeq+1 could conflict with future sequence numbers
+            //   使用 lastProcessedSeq+1 可能与未来的序列号冲突
+            // - +0.5 allows insertion between sequences while guaranteeing uniqueness and monotonic increase
+            //   +0.5 可以插入到序列之间,同时保证唯一性和单调递增
+            state.buffer.set(state.lastProcessedSeq + 0.5, chunk);
+        }
+    }
+
+    // Define the chunk processing function that handles different chunk types
+    // 定义处理不同块类型的函数
+    const processChunk = (chunk: ResponseChunk) => {
+        switch (chunk.type) {
+            // ========== Response Metadata / 响应元数据 ==========
+            case 'response.created': {
+                // Initialize response metadata (id, model, status, timestamp)
+                // 初始化响应元数据(id、模型、状态、时间戳)
+                const { response } = chunk;
+                if (response) {
+                    state.meta.id = response.id ?? state.meta.id;
+                    state.meta.model = response.model ?? state.meta.model;
+                    state.meta.status = response.status ?? state.meta.status;
+                    state.meta.created_at = response.created_at ?? state.meta.created_at;
+                }
+                break;
+            }
+            
+            // ========== Output Items / 输出项 ==========
+            case 'response.output_item.added': {
+                // Add a new output item placeholder / 添加新的输出项占位符
+                // Deep clone to avoid modifying original chunk.item / 深拷贝以避免修改原始 chunk.item
+                const outIdx = typeof chunk.output_index === 'number' ? chunk.output_index : 0;
+                if (!state.outputs.has(outIdx)) {
+                    state.outputs.set(outIdx, deepClone(chunk.item ?? {}));
+                }
+                break;
+            }
+            case 'response.output_item.done': {
+                // Finalize an output item / 完成一个输出项
+                // Deep clone to avoid modifying original item / 深拷贝以避免修改原始 item
+                const { output_index, item } = chunk;
+                state.outputs.set(output_index, deepClone(item));
+                break;
+            }
+            
+            // ========== Output Text / 输出文本 ==========
+            case 'response.content_part.added':
+            case 'response.content_part.done': {
+                const { output_index, content_index, part } = chunk;
+                const item = state.outputs.get(output_index) as OutputMessage;
+                (item as OutputMessage).content = (item as OutputMessage).content ?? [];
+                (item as OutputMessage).content[content_index] = deepClone(part);
+                break;
+            }
+            case 'response.output_text.delta': {
+                // Incrementally append text delta to the output / 增量追加文本增量到输出
+                const { output_index, content_index, delta } = chunk;
+                const item = state.outputs.get(output_index) as OutputMessage;
+                (item as OutputMessage).content = (item as OutputMessage).content ?? [];
+                (item.content[content_index] as OutputText) = (item.content[content_index] as OutputText) ?? { type: 'output_text', text: '' };
+                (item.content[content_index] as OutputText).text = ((item.content[content_index] as OutputText).text ?? '') + (delta ?? '');
+                break;
+            }
+            case 'response.output_text.done': {
+                // Set final text content / 设置最终文本内容
+                const { output_index, content_index, text } = chunk;
+                const item = state.outputs.get(output_index) as OutputMessage;
+                (item as OutputMessage).content = (item as OutputMessage).content ?? [];
+                (item.content[content_index] as OutputText) = (item.content[content_index] as OutputText) ?? { type: 'output_text', text: '' };
+                (item.content[content_index] as OutputText).text = text;
+                break;
+            }
+            case 'response.output_text.annotation.added': {
+                // Add annotation to text output / 向文本输出添加注释
+                const { output_index, content_index, annotation_index, annotation } = chunk;
+                const item = state.outputs.get(output_index) as OutputMessage;
+                (item as OutputMessage).content = (item as OutputMessage).content ?? [];
+                (item.content[content_index] as OutputText) = (item.content[content_index] as OutputText) ?? { type: 'output_text', text: '', annotations: [] };
+                (item.content[content_index] as OutputText).annotations = (item.content[content_index] as OutputText).annotations ?? [];
+                (item.content[content_index] as OutputText).annotations[annotation_index] = deepClone(annotation);
+                break;
+            }
+            
+            // ========== Refusal / 拒绝响应 ==========
+            case 'response.refusal.delta': {
+                const { output_index, content_index, delta } = chunk;
+                const item = state.outputs.get(output_index) as OutputMessage;
+                (item as OutputMessage).content = (item as OutputMessage).content ?? [];
+                (item.content[content_index] as Refusal) = (item.content[content_index] as Refusal) ?? { type: 'refusal', refusal: '' };
+                (item.content[content_index] as Refusal).refusal = ((item.content[content_index] as Refusal).refusal ?? '') + (delta ?? '');
+                break;
+            }
+            case 'response.refusal.done': {
+                const { output_index, content_index, refusal } = chunk;
+                const item = state.outputs.get(output_index) as OutputMessage;
+                (item as OutputMessage).content = (item as OutputMessage).content ?? [];
+                (item.content[content_index] as Refusal) = (item.content[content_index] as Refusal) ?? { type: 'refusal', refusal: '' };
+                (item.content[content_index] as Refusal).refusal = refusal;
+                break;
+            }
+            // reasoning
+            case 'response.reasoning_summary_part.added':
+            case 'response.reasoning_summary_part.done': {
+                const { output_index, summary_index, part } = chunk;
+                const item = state.outputs.get(output_index) as Reasoning;
+                (item as Reasoning).summary = (item as Reasoning).summary ?? [];
+                (item as Reasoning).summary[summary_index] = deepClone(part) as any;
+                break;
+            }
+            case 'response.reasoning_summary_text.delta': {
+                const { output_index, summary_index, delta } = chunk;
+                const item = state.outputs.get(output_index) as Reasoning;
+                (item as Reasoning).summary = (item as Reasoning).summary ?? [];
+                (item as Reasoning).summary[summary_index] = (item as Reasoning).summary[summary_index] ?? { type: 'reasoning', text: '' };
+                (item as Reasoning).summary[summary_index].text = ((item as Reasoning).summary[summary_index].text ?? '') + (delta ?? '');
+                break;
+            }
+            case 'response.reasoning_summary_text.done': {
+                const { output_index, summary_index, text } = chunk;
+                const item = state.outputs.get(output_index) as Reasoning;
+                (item as Reasoning).summary = (item as Reasoning).summary ?? [];
+                (item as Reasoning).summary[summary_index] = (item as Reasoning).summary[summary_index] ?? { type: 'reasoning', text: '' };
+                (item as Reasoning).summary[summary_index].text = text;
+                break;
+            }
+            case 'response.reasoning_text.delta': {
+                const { output_index, content_index, delta } = chunk;
+                const item = state.outputs.get(output_index) as Reasoning;
+                (item as Reasoning).content = (item as Reasoning).content ?? [];
+                (item as Reasoning).content[content_index] = (item as Reasoning).content[content_index] ?? { type: 'reasoning', text: '' };
+                (item as Reasoning).content[content_index].text = ((item as Reasoning).content[content_index].text ?? '') + (delta ?? '');
+                break;
+            }
+            case 'response.reasoning_text.done': {
+                const { output_index, content_index, text } = chunk;
+                const item = state.outputs.get(output_index) as Reasoning;
+                (item as Reasoning).content = (item as Reasoning).content ?? [];
+                (item as Reasoning).content[content_index] = (item as Reasoning).content[content_index] ?? { type: 'reasoning', text: '' };
+                (item as Reasoning).content[content_index].text = text;
+                break;
+            }
+            // function call
+            case 'response.function_call_arguments.delta': {
+                const { output_index, delta } = chunk;
+                const item = state.outputs.get(output_index) as FunctionToolCall;
+                (item as FunctionToolCall).arguments = ((item as FunctionToolCall).arguments ?? '') + (delta ?? '');
+                break;
+            }
+            case 'response.function_call_arguments.done': {
+                const { output_index, name } = chunk;
+                const item = state.outputs.get(output_index) as FunctionToolCall;
+                (item as FunctionToolCall).arguments = chunk.arguments;
+                (item as FunctionToolCall).name = name;
+                break;
+            }
+            // custom_tool_call
+            case 'response.custom_tool_call_input.delta': {
+                const { output_index, delta } = chunk;
+                const item = state.outputs.get(output_index) as CustomToolCall;
+                (item as CustomToolCall).input = ((item as CustomToolCall).input ?? '') + (delta ?? '');
+                break;
+            }
+            case 'response.custom_tool_call_input.done': {
+                const { output_index, input } = chunk;
+                const item = state.outputs.get(output_index) as CustomToolCall;
+                (item as CustomToolCall).input = input;
+                break;
+            }
+            // mcp
+            case 'response.mcp_call_arguments.delta': {
+                const { output_index, delta } = chunk;
+                const item = state.outputs.get(output_index) as MCPToolCall;
+                (item as MCPToolCall).arguments = ((item as MCPToolCall).arguments ?? '') + (delta ?? '');
+                break;
+            }
+            case 'response.mcp_call_arguments.done': {
+                const { output_index } = chunk;
+                const item = state.outputs.get(output_index) as MCPToolCall;
+                (item as MCPToolCall).arguments = chunk.arguments;
+                break;
+            }
+            case 'response.file_search_call.in_progress':
+            case 'response.web_search_call.in_progress':
+            case 'response.image_generation_call.in_progress':
+            case 'response.mcp_call.in_progress':
+            case 'response.mcp_list_tools.in_progress':
+            case 'response.code_interpreter_call.in_progress': {
+                const out = state.outputs.get(chunk.output_index) as any;
+                if (out) out.status = 'in_progress';
+                break;
+            }
+            case 'response.mcp_call.failed':
+            case 'response.mcp_list_tools.failed': {
+                const out = state.outputs.get(chunk.output_index) as any;
+                if (out) out.status = 'failed';
+                break;
+            }
+            case 'response.file_search_call.completed':
+            case 'response.web_search_call.completed':
+            case 'response.image_generation_call.completed':
+            case 'response.mcp_call.completed':
+            case 'response.mcp_list_tools.completed':
+            case 'response.code_interpreter_call.completed': {
+                const out = state.outputs.get(chunk.output_index) as any;
+                if (out) out.status = 'completed';
+                break;
+            }
+            case 'response.code_interpreter_call_code.delta': {
+                const { output_index, delta } = chunk;
+                const item = state.outputs.get(output_index) as CodeInterpreterCall;
+                item.code = (item.code ?? '') + (delta ?? '');
+                break;
+            }
+            case 'response.code_interpreter_call_code.done': {
+                const { output_index, code } = chunk;
+                const item = state.outputs.get(output_index) as CodeInterpreterCall;
+                item.code = code;
+                break;
+            }
+            case 'response.image_generation_call.partial_image': {
+                const item = state.outputs.get(chunk.output_index) as ImageGenerationCall;
+                if (item) item.result = (chunk as any).partial_image_b64;
+                break;
+            }
+            case 'error': {
+                state.error = {
+                    code: chunk.code,
+                    message: chunk.message,
+                };
+                break;
+            }
+            case 'response.completed': {
+                if ((chunk as any).response) {
+                    state.meta.status = (chunk as any).response.status ?? 'completed';
+                } else {
+                    state.meta.status = 'completed';
+                }
+                break;
+            }
+            default: {
+                // Ignore unsupported chunk types / 忽略不支持的块类型
+                break;
+            }
+        }
+    };
+
+    // ==================== Sequential Processing / 顺序处理 ====================
+    // Process chunks in sequential order from the buffer
+    // Only chunks with consecutive sequence numbers are processed
+    // 从缓冲区按顺序处理块
+    // 只处理具有连续序列号的块
+    
+    let nextExpected = state.lastProcessedSeq + 1;  // Next sequence number we're waiting for / 我们等待的下一个序列号
+    let processed = false;  // Flag to track if any chunk was processed in this iteration / 标记此次迭代中是否处理了任何块
+    
+    do {
+        processed = false;
+        const chunk = state.buffer.get(nextExpected);
+        
+        if (chunk) {
+            // Found the next expected chunk, process it / 找到下一个预期的块,处理它
+            processChunk(chunk);
+            state.processedSeq.add(nextExpected);  // Mark as processed / 标记为已处理
+            state.buffer.delete(nextExpected);     // Remove from buffer / 从缓冲区移除
+            state.lastProcessedSeq = nextExpected; // Update last processed sequence / 更新最后处理的序列号
+            nextExpected++;                        // Move to next expected sequence / 移动到下一个预期序列
+            processed = true;                      // Continue the loop to check for more consecutive chunks / 继续循环检查更多连续块
+        } else {
+            // Check for chunks without sequence numbers (stored with decimal keys like N.5)
+            // 检查没有序列号的块(使用小数键存储,如 N.5)
+            const decimalKey = state.lastProcessedSeq + 0.5;
+            const noSeqChunk = state.buffer.get(decimalKey);
+            if (noSeqChunk) {
+                processChunk(noSeqChunk);
+                state.buffer.delete(decimalKey);
+                processed = true;  // Continue to check for next integer sequence / 继续检查下一个整数序列
+            }
+        }
+    } while (processed);  // Keep processing as long as we find consecutive chunks / 只要找到连续的块就继续处理
+
+    // ==================== Tolerance Mechanism / 容错机制 ====================
+    // Handle permanent missing chunks to prevent infinite waiting
+    // If the gap between buffered chunks and last processed sequence exceeds MAX_GAP,
+    // assume missing chunks won't arrive and continue processing remaining chunks
+    // 处理永久缺失的块以防止无限等待
+    // 如果缓冲块与最后处理序列之间的间隙超过 MAX_GAP,
+    // 则假设缺失的块不会到达,继续处理剩余的块
+    
+    const MAX_GAP = 10;  // Maximum allowed gap before assuming chunks are permanently lost / 在假设块永久丢失之前允许的最大间隙
+    
+    // Extract all integer sequence numbers from buffer and sort them
+    // 从缓冲区提取所有整数序列号并排序
+    const bufferedSeqs = Array.from(state.buffer.keys())
+        .filter((k): k is number => typeof k === 'number' && k === Math.floor(k))  // Only integer keys / 只要整数键
+        .sort((a, b) => a - b);  // Sort in ascending order / 升序排序
+    
+    if (bufferedSeqs.length > 0) {
+        const minBuffered = bufferedSeqs[0] as number;  // Smallest buffered sequence number / 最小的缓冲序列号
+        const gap = (minBuffered as number) - (state.lastProcessedSeq as number);  // Calculate the gap / 计算间隙
+        
+        if (gap > MAX_GAP) {
+            // Gap is too large, assume intermediate chunks are lost
+            // Process all remaining buffered chunks in order
+            // 间隙太大,假设中间的块已丢失
+            // 按顺序处理所有剩余的缓冲块
+            for (const seq of bufferedSeqs) {
+                const chunk = state.buffer.get(seq);
+                if (chunk) {
+                    processChunk(chunk);
+                    state.processedSeq.add(seq);
+                    state.buffer.delete(seq);
+                    state.lastProcessedSeq = seq;
+                }
+            }
+        }
+    }
+
+    // ==================== Build Final Message / 构建最终消息 ====================
+    const content = Array.from(state.outputs.values()).filter((item) => item !== null) as ContentItem[];
+    
+    // Extract and concatenate all text content for convenience
+    // 提取并连接所有文本内容以便使用
+    const output_text = content
+        .filter((p: any) => p?.type === 'output_text')  // Only text items / 只要文本项
+        .map((p: any) => p?.text ?? '')                 // Extract text / 提取文本
+        .join('');                                       // Join into single string / 连接成单个字符串
+
+    // Build the message object if we have any content or metadata
+    // 如果有任何内容或元数据,则构建消息对象
+    const message: Message | null = (content?.length || state.meta.id)
+        ? {
+            id: state.meta.id,
+            role: "assistant",
+            content,                                    // Array of content items / 内容项数组
+            createdAt: state.meta.created_at,
+            output_text,                                // Concatenated text for convenience / 连接的文本以便使用
+            model: state.meta.model,
+            status: state.meta.status ?? 'in_progress', // Default to in_progress if not set / 如果未设置则默认为 in_progress
+            error: state.error ?? null,                 // Include error if any / 包含错误(如果有)
+        }
+        : null;
+
+    return { message, nextState: state };
+}
+
+/**
+ * Deep clone an object to avoid modifying original data
+ * 深拷贝对象以避免修改原始数据
+ * 
+ * @param obj - Object to clone / 要克隆的对象
+ * @returns Cloned object / 克隆的对象
+ */
+function deepClone<T>(obj: T): T {
+    if (obj === null || typeof obj !== 'object') {
+        return obj;
+    }
+    
+    if (Array.isArray(obj)) {
+        return obj.map(item => deepClone(item)) as unknown as T;
+    }
+    
+    const cloned = {} as T;
+    for (const key in obj) {
+        if (Object.prototype.hasOwnProperty.call(obj, key)) {
+            cloned[key] = deepClone(obj[key]);
+        }
+    }
+    return cloned;
+}

+ 5 - 4
packages/semi-foundation/aiChatDialogue/foundation.ts

@@ -1,6 +1,6 @@
 import BaseFoundation, { DefaultAdapter } from "../base/foundation";
 import { getUuidv4 } from "../utils/uuid";
-import { ROLE } from "./constants";
+import { strings } from "./constants";
 
 export interface DialogueAdapter<P = Record<string, any>, S = Record<string, any>> extends DefaultAdapter<P, S> {
     updateSelected: (selectedIds: string[]) => void;
@@ -120,7 +120,7 @@ export default class DialogueFoundation <P = Record<string, any>, S = Record<str
     onHintClick = (hint: string) => {
         const { chats } = this.getStates();
         const newMessage = {
-            role: ROLE.USER,
+            role: strings.ROLE.USER,
             id: getUuidv4(),
             createAt: Date.now(),
             content: hint,
@@ -133,13 +133,14 @@ export default class DialogueFoundation <P = Record<string, any>, S = Record<str
 
 }
 
+
 export interface Message {
     id: string;
     content?: string | ContentItem[];
     output_text?: string;
     role: string;
-    createdAt?: string;
-    updatedAt?: string;
+    createdAt?: number;
+    updatedAt?: number;
     model?: string;
     status?: string;
     type?: string;

+ 51 - 0
packages/semi-ui/aiChatDialogue/_story/Data/ChatCompletionData.js

@@ -0,0 +1,51 @@
+export const CHAT_COMPLETION_DATA = {
+    "id": "chatcmpl-B9MBs8CjcvOU2jLn4n570S5qMJKcT",
+    "object": "chat.completion",
+    "created": 1741569952,
+    "model": "gpt-4.1-2025-04-14",
+    "choices": [
+        {
+            "index": 0,
+            "message": {
+                "role": "assistant",
+                "content": "Hello! How can I assist you today?",
+                // "refusal": " sorry! I can't assist you today!",
+                "refusal": null,
+                "annotations": [],
+                "function_call": {
+                    "name": "get_current_weather",
+                    "arguments": "{\n\"location\": \"Boston, MA\"\n}"
+                },
+                "tool_calls": [
+                    {
+                        "id": "call_abc123",
+                        "type": "function",
+                        "function": {
+                            "name": "get_current_weather",
+                            "arguments": "{\n\"location\": \"Boston, MA\"\n}"
+                        }
+                    }
+                ]
+            },
+            "logprobs": null,
+            "finish_reason": "stop"
+        }
+    ],
+    "usage": {
+        "prompt_tokens": 19,
+        "completion_tokens": 10,
+        "total_tokens": 29,
+        "prompt_tokens_details": {
+            "cached_tokens": 0,
+            "audio_tokens": 0
+        },
+        "completion_tokens_details": {
+            "reasoning_tokens": 0,
+            "audio_tokens": 0,
+            "accepted_prediction_tokens": 0,
+            "rejected_prediction_tokens": 0
+        }
+    },
+    "service_tier": "default"
+};
+  

+ 114 - 0
packages/semi-ui/aiChatDialogue/_story/Data/ResponseData.js

@@ -0,0 +1,114 @@
+export const RESPONSE_DATA = {
+    "id": "resp_67ccd3a9da748190baa7f1570fe91ac604becb25c45c1d41",
+    "object": "response",
+    "created_at": 1741476777,
+    "status": "completed",
+    "error": null,
+    "incomplete_details": null,
+    "instructions": null,
+    "max_output_tokens": null,
+    "model": "gpt-4o-2024-08-06",
+    "output": [
+        {
+            "id": "rs_6876cf02e0bc8192b74af0fb64b715ff06fa2fcced15a5ac",
+            "type": "reasoning",
+            "status": "completed",
+            "summary": [
+                {
+                    "type": "summary_text",
+                    "text": "**Answering a simple question**\n\nI\u2019m looking at a straightforward question: the capital of France is Paris. It\u2019s a well-known fact, and I want to keep it brief and to the point. Paris is known for its history, art, and culture, so it might be nice to add just a hint of that charm. But mostly, I\u2019ll aim to focus on delivering a clear and direct answer, ensuring the user gets what they\u2019re looking for without any extra fluff."
+                }
+            ]
+        },
+        {
+            "type": "message",
+            "id": "msg_67ccd3acc8d48190a77525dc6de64b4104becb25c45c1d41",
+            "status": "completed",
+            "role": "assistant",
+            "content": [
+                {
+                    "type": "output_text",
+                    "text": "The image depicts a scenic landscape with a wooden boardwalk or pathway leading through lush, green grass under a blue sky with some clouds. The setting suggests a peaceful natural area, possibly a park or nature reserve. There are trees and shrubs in the background.",
+                    "annotations": [
+                        {
+                            "title": '快乐星球',
+                            "url": 'https://semi.design/zh-CN/start/getting-started',
+                            "detail": '快乐星球是一个快乐的地方',
+                            "logo": 'https://lf3-static.bytednsdoc.com/obj/eden-cn/ptlz_zlp/ljhwZthlaukjlkulzlp/docs-icon.png'
+                        },
+                        {
+                            "title": '快乐星球',
+                            "url": 'https://semi.design/zh-CN/start/getting-started',
+                            "detail": '快乐星球是一个快乐的地方',
+                            "logo": 'https://lf3-static.bytednsdoc.com/obj/eden-cn/ptlz_zlp/ljhwZthlaukjlkulzlp/docs-icon.png'
+                        },
+                        {
+                            "title": '快乐星球',
+                            "url": 'https://semi.design/zh-CN/start/getting-started',
+                            "detail": '快乐星球是一个快乐的地方',
+                            "logo": 'https://lf3-static.bytednsdoc.com/obj/eden-cn/ptlz_zlp/ljhwZthlaukjlkulzlp/docs-icon.png'
+                        },
+                        {
+                            "title": '快乐星球',
+                            "url": 'https://semi.design/zh-CN/start/getting-started',
+                            "detail": '快乐星球是一个快乐的地方',
+                            "logo": 'https://lf3-static.bytednsdoc.com/obj/eden-cn/ptlz_zlp/ljhwZthlaukjlkulzlp/docs-icon.png'
+                        },
+                        {
+                            "title": '快乐星球',
+                            "url": 'https://semi.design/zh-CN/start/getting-started',
+                            "detail": '快乐星球是一个快乐的地方',
+                            "logo": 'https://lf3-static.bytednsdoc.com/obj/eden-cn/ptlz_zlp/ljhwZthlaukjlkulzlp/docs-icon.png'
+                        }
+                    ]
+                }
+            ]
+        },
+        {
+            "id": "fc_12345xyz",
+            "call_id": "call_12345xyz",
+            "type": "function_call",
+            "name": "get_weather",
+            "status": "completed",
+            "arguments": "{\"location\":\"Paris, France\"}"
+        },
+        {
+            "id": "ctc_6890e975e86c819c9338825b3e1994810694874912ae0ea6",
+            "type": "custom_tool_call",
+            "status": "completed",
+            "call_id": "call_aGiFQkRWSWAIsMQ19fKqxUgb",
+            "input": "print(\"hello world\")",
+            "name": "code_exec"
+        }
+    ],
+    "parallel_tool_calls": true,
+    "previous_response_id": null,
+    "reasoning": {
+        "effort": null,
+        "summary": null
+    },
+    "store": true,
+    "temperature": 1,
+    "text": {
+        "format": {
+            "type": "text"
+        }
+    },
+    "tool_choice": "auto",
+    "tools": [],
+    "top_p": 1,
+    "truncation": "disabled",
+    "usage": {
+        "input_tokens": 328,
+        "input_tokens_details": {
+            "cached_tokens": 0
+        },
+        "output_tokens": 52,
+        "output_tokens_details": {
+            "reasoning_tokens": 0
+        },
+        "total_tokens": 380
+    },
+    "user": null,
+    "metadata": {}
+};

+ 24 - 0
packages/semi-ui/aiChatDialogue/_story/Data/StreamingChatCompletion.js

@@ -0,0 +1,24 @@
+export const STREAMING_CHAT_COMPLETION_DATA = [
+    { "id": "chatcmpl-COjljxurV5GKrRUsg1wd7mIyQCiiT", "object": "chat.completion.chunk", "created": 1760011843, "model": "o3-mini-2025-01-31", "service_tier": "default", "system_fingerprint": "fp_6c43dcef8c", "choices": [{ "index": 0, "delta": { "role": "assistant", "content": "", "refusal": null }, "finish_reason": null }], "obfuscation": "ahPqlzj6DD" },
+    { "id": "chatcmpl-COjljxurV5GKrRUsg1wd7mIyQCiiT", "object": "chat.completion.chunk", "created": 1760011843, "model": "o3-mini-2025-01-31", "service_tier": "default", "system_fingerprint": "fp_6c43dcef8c", "choices": [{ "index": 0, "delta": { "content": "" }, "finish_reason": null }], "obfuscation": "i2PXRIwvc3D" },
+    // index 0: 输出文本增量
+    { "id": "chatcmpl-COjljxurV5GKrRUsg1wd7mIyQCiiT", "object": "chat.completion.chunk", "created": 1760011843, "model": "o3-mini-2025-01-31", "service_tier": "default", "system_fingerprint": "fp_6c43dcef8c", "choices": [{ "index": 0, "delta": { "content": "快" }, "finish_reason": null }], "obfuscation": "3sslO5QylW" },
+    { "id": "chatcmpl-COjljxurV5GKrRUsg1wd7mIyQCiiT", "object": "chat.completion.chunk", "created": 1760011843, "model": "o3-mini-2025-01-31", "service_tier": "default", "system_fingerprint": "fp_6c43dcef8c", "choices": [{ "index": 0, "delta": { "content": "乐" }, "finish_reason": null }], "obfuscation": "3sslO5QylW" },
+
+    // index 1: 工具调用增量(function_call / tool_calls)
+    { "id": "chatcmpl-COjljxurV5GKrRUsg1wd7mIyQCiiT", "object": "chat.completion.chunk", "created": 1760011845, "model": "o3-mini-2025-01-31", "service_tier": "default", "system_fingerprint": "fp_6c43dcef8c", "choices": [{ "index": 1, "delta": { "tool_calls": [{ "id": "call_1", "function": { "name": "searchWeather", "arguments": "{\"city\":\"北京\",\"day\":\"today\"}" } }] }, "finish_reason": null }], "obfuscation": "T1" },
+    { "id": "chatcmpl-COjljxurV5GKrRUsg1wd7mIyQCiiT", "object": "chat.completion.chunk", "created": 1760011846, "model": "o3-mini-2025-01-31", "service_tier": "default", "system_fingerprint": "fp_6c43dcef8c", "choices": [{ "index": 1, "delta": { "tool_calls": [{ "id": "call_1", "function": { "name": "searchWeather", "arguments": ",\"unit\":\"C\"}" } }] }, "finish_reason": null }], "obfuscation": "T2" },
+    { "id": "chatcmpl-COjljxurV5GKrRUsg1wd7mIyQCiiT", "object": "chat.completion.chunk", "created": 1760011847, "model": "o3-mini-2025-01-31", "service_tier": "default", "system_fingerprint": "fp_6c43dcef8c", "choices": [{ "index": 1, "delta": { "tool_calls": [{ "id": "call_2", "function": { "name": "recommendClothes", "arguments": "{\"temp\":\"5\"}" } }] }, "finish_reason": null }], "obfuscation": "T3" },
+
+    { "id": "chatcmpl-COjljxurV5GKrRUsg1wd7mIyQCiiT", "object": "chat.completion.chunk", "created": 1760011844, "model": "o3-mini-2025-01-31", "service_tier": "default", "system_fingerprint": "fp_6c43dcef8c", "choices": [{ "index": 0, "delta": { "content": "哈哈哈哈哈" }, "finish_reason": null }], "obfuscation": "X1" },
+    { "id": "chatcmpl-COjljxurV5GKrRUsg1wd7mIyQCiiT", "object": "chat.completion.chunk", "created": 1760011844, "model": "o3-mini-2025-01-31", "service_tier": "default", "system_fingerprint": "fp_6c43dcef8c", "choices": [{ "index": 0, "delta": { "content": " 🥳" }, "finish_reason": null }], "obfuscation": "X2" },
+
+    // index 2: function_call 增量
+    { "id": "chatcmpl-COjljxurV5GKrRUsg1wd7mIyQCiiT", "object": "chat.completion.chunk", "created": 1760011848, "model": "o3-mini-2025-01-31", "service_tier": "default", "system_fingerprint": "fp_6c43dcef8c", "choices": [{ "index": 2, "delta": { "function_call": { "name": "summarize", "arguments": "{\"topic\":\"news\"}" } }, "finish_reason": null }], "obfuscation": "F1" },
+    { "id": "chatcmpl-COjljxurV5GKrRUsg1wd7mIyQCiiT", "object": "chat.completion.chunk", "created": 1760011849, "model": "o3-mini-2025-01-31", "service_tier": "default", "system_fingerprint": "fp_6c43dcef8c", "choices": [{ "index": 2, "delta": { "function_call": { "name": "summarize", "arguments": ",\"length\":\"short\"}" } }, "finish_reason": null }], "obfuscation": "F2" },
+
+    // 终止信号
+    { "id": "chatcmpl-COjljxurV5GKrRUsg1wd7mIyQCiiT", "object": "chat.completion.chunk", "created": 1760011843, "model": "o3-mini-2025-01-31", "service_tier": "default", "system_fingerprint": "fp_6c43dcef8c", "choices": [{ "index": 0, "delta": {}, "finish_reason": "stop" }], "obfuscation": "n13SLf" },
+    { "id": "chatcmpl-COjljxurV5GKrRUsg1wd7mIyQCiiT", "object": "chat.completion.chunk", "created": 1760011843, "model": "o3-mini-2025-01-31", "service_tier": "default", "system_fingerprint": "fp_6c43dcef8c", "choices": [{ "index": 1, "delta": {}, "finish_reason": "stop" }], "obfuscation": "jt9rDb" },
+    { "id": "chatcmpl-COjljxurV5GKrRUsg1wd7mIyQCiiT", "object": "chat.completion.chunk", "created": 1760011850, "model": "o3-mini-2025-01-31", "service_tier": "default", "system_fingerprint": "fp_6c43dcef8c", "choices": [{ "index": 2, "delta": {}, "finish_reason": "stop" }], "obfuscation": "F_END" }
+];

File diff suppressed because it is too large
+ 16 - 0
packages/semi-ui/aiChatDialogue/_story/Data/StreamingResponseData.js


+ 1 - 0
packages/semi-ui/aiChatDialogue/_story/DataAdapter/aiChatInputContentToMessage.jsx

@@ -0,0 +1 @@
+// todo

+ 49 - 0
packages/semi-ui/aiChatDialogue/_story/DataAdapter/chatCompletionToMessage.jsx

@@ -0,0 +1,49 @@
+import React, { useState, useCallback, useEffect } from 'react';
+import { AIChatDialogue, chatCompletionToMessage } from '../../../index';
+import { CHAT_COMPLETION_DATA } from '../Data/ChatCompletionData';
+
+const defaultMessages = [{
+    id: '1',
+    role: 'user',
+    content: '此处是用户的输入',
+    status: 'completed',
+}];
+
+const roleConfig = {
+    user: {
+        name: 'User',
+        avatar: 'https://lf3-static.bytednsdoc.com/obj/eden-cn/ptlz_zlp/ljhwZthlaukjlkulzlp/docs-icon.png'
+    },
+    assistant: {
+        name: 'Assistant',
+        avatar: 'https://lf3-static.bytednsdoc.com/obj/eden-cn/ptlz_zlp/ljhwZthlaukjlkulzlp/other/logo.png'
+    },
+    system: {
+        name: 'System',
+        avatar: 'https://lf3-static.bytednsdoc.com/obj/eden-cn/ptlz_zlp/ljhwZthlaukjlkulzlp/other/logo.png'
+    }
+};
+
+export default function ChatCompletionToMessageDemo() {
+    const [messages, setMessage] = useState(defaultMessages);
+
+    const onChatsChange = useCallback((chats) => {
+        setMessage(chats);
+    }, []);
+
+    // only need to test text, refusal, function_call, tool_calls
+    useEffect(() => {
+        const message = chatCompletionToMessage(CHAT_COMPLETION_DATA);
+        setMessage([...defaultMessages, ...message]);
+    }, []);
+  
+    return (
+        <AIChatDialogue 
+            align="leftRight"
+            mode="bubble"
+            chats={messages}
+            roleConfig={roleConfig}
+            onChatsChange={onChatsChange}
+        />
+    );
+};

+ 49 - 0
packages/semi-ui/aiChatDialogue/_story/DataAdapter/responseToMessage.jsx

@@ -0,0 +1,49 @@
+import React, { useState, useCallback, useEffect } from 'react';
+import { AIChatDialogue, responseToMessage } from '../../../index';
+import { RESPONSE_DATA } from '../Data/ResponseData';
+
+const defaultMessages = [{
+    id: '1',
+    role: 'user',
+    content: '此处是用户的输入',
+    status: 'completed',
+}];
+
+const roleConfig = {
+    user: {
+        name: 'User',
+        avatar: 'https://lf3-static.bytednsdoc.com/obj/eden-cn/ptlz_zlp/ljhwZthlaukjlkulzlp/docs-icon.png'
+    },
+    assistant: {
+        name: 'Assistant',
+        avatar: 'https://lf3-static.bytednsdoc.com/obj/eden-cn/ptlz_zlp/ljhwZthlaukjlkulzlp/other/logo.png'
+    },
+    system: {
+        name: 'System',
+        avatar: 'https://lf3-static.bytednsdoc.com/obj/eden-cn/ptlz_zlp/ljhwZthlaukjlkulzlp/other/logo.png'
+    }
+};
+
+export default function ResponseToMessageDemo() {
+    const [messages, setMessage] = useState(defaultMessages);
+
+    const onChatsChange = useCallback((chats) => {
+        setMessage(chats);
+    }, []);
+
+    useEffect(() => {
+        const responseMessage = responseToMessage(RESPONSE_DATA);
+
+        setMessage([...defaultMessages, responseMessage]);
+    }, []);
+  
+    return (
+        <AIChatDialogue 
+            align="leftRight"
+            mode="bubble"
+            chats={messages}
+            roleConfig={roleConfig}
+            onChatsChange={onChatsChange}
+        />
+    );
+};

+ 73 - 0
packages/semi-ui/aiChatDialogue/_story/DataAdapter/streamingChatCompletionToMessage.jsx

@@ -0,0 +1,73 @@
+import React, { useState, useCallback, useEffect } from 'react';
+import { AIChatDialogue, streamingChatCompletionToMessage } from '../../../index';
+import { STREAMING_CHAT_COMPLETION_DATA } from '../Data/StreamingChatCompletion';
+
+const defaultMessages = [{
+    id: '1',
+    role: 'user',
+    content: '此处是用户的输入',
+    status: 'completed',
+}];
+
+const roleConfig = {
+    user: {
+        name: 'User',
+        avatar: 'https://lf3-static.bytednsdoc.com/obj/eden-cn/ptlz_zlp/ljhwZthlaukjlkulzlp/docs-icon.png'
+    },
+    assistant: {
+        name: 'Assistant',
+        avatar: 'https://lf3-static.bytednsdoc.com/obj/eden-cn/ptlz_zlp/ljhwZthlaukjlkulzlp/other/logo.png'
+    },
+    system: {
+        name: 'System',
+        avatar: 'https://lf3-static.bytednsdoc.com/obj/eden-cn/ptlz_zlp/ljhwZthlaukjlkulzlp/other/logo.png'
+    }
+};
+
+export default function StreamingChatCompletionToMessageDemo() {
+    const [messages, setMessage] = useState(defaultMessages);
+    const [state, setState] = useState();
+
+    const onChatsChange = useCallback((chats) => {
+        setMessage(chats);
+    }, []);
+
+    useEffect(() => {
+        // 以 100ms 为间隔,从 1 块逐步增加到全部块,模拟流逝输出
+        const total = STREAMING_CHAT_COMPLETION_DATA.length;
+        const accByIndex = new Map();
+        let i = 1;
+
+        const timer = setInterval(() => {
+            if (i > total) {
+                clearInterval(timer);
+                return;
+            }
+
+            const slice = STREAMING_CHAT_COMPLETION_DATA.slice(0, i);
+            const { messages: partial, state: nextState } = streamingChatCompletionToMessage(slice, state);
+            setState(nextState);
+
+            // 基于当前新增的最后一块,确定其 choice index 并合并
+            const lastChunk = slice[slice.length - 1];
+
+            // 将合并后的消息映射转换为数组,并加上默认的用户消息
+            const merged = [defaultMessages[0], ...partial];
+            setMessage(merged);
+
+            i += 1;
+        }, 100);
+
+        return () => clearInterval(timer);
+    }, []);
+  
+    return (
+        <AIChatDialogue 
+            align="leftRight"
+            mode="bubble"
+            chats={messages}
+            roleConfig={roleConfig}
+            onChatsChange={onChatsChange}
+        />
+    );
+};

+ 143 - 0
packages/semi-ui/aiChatDialogue/_story/DataAdapter/streamingResponseToMessage.jsx

@@ -0,0 +1,143 @@
+import React, { useState, useCallback, useEffect } from 'react';
+import { AIChatDialogue, streamingResponseToMessage } from '../../../index';
+import { REASONING_CHUNKS } from '../Data/StreamingResponseData';
+
+const defaultMessages = [{
+    id: '1',
+    role: 'user',
+    content: '此处是用户的输入',
+    status: 'completed',
+}];
+
+const roleConfig = {
+    user: {
+        name: 'User',
+        avatar: 'https://lf3-static.bytednsdoc.com/obj/eden-cn/ptlz_zlp/ljhwZthlaukjlkulzlp/docs-icon.png'
+    },
+    assistant: {
+        name: 'Assistant',
+        avatar: 'https://lf3-static.bytednsdoc.com/obj/eden-cn/ptlz_zlp/ljhwZthlaukjlkulzlp/other/logo.png'
+    },
+    system: {
+        name: 'System',
+        avatar: 'https://lf3-static.bytednsdoc.com/obj/eden-cn/ptlz_zlp/ljhwZthlaukjlkulzlp/other/logo.png'
+    }
+};
+
+// 固定的乱序索引数组 / Fixed shuffled index array
+// 演示块5延迟到达的效果 / Demonstrate the effect of chunk 5 arriving late
+const FIXED_SHUFFLED_INDICES = [
+    0,  // sequence_number: 0
+    1,  // sequence_number: 1
+    2,  // sequence_number: 2
+    3,  // sequence_number: 3
+    4,  // sequence_number: 4
+    // 5,  // sequence_number: 5 (块5延迟到达 / chunk 5 arrives late)
+    6,  // sequence_number: 6 (块5被跳过 / chunk 5 skipped)
+    6,  // sequence_number: 6 (块6重复到达)
+    7,  // sequence_number: 7
+    5,  // sequence_number: 5 (块5延迟到达 / chunk 5 arrives late)
+    8,  // sequence_number: 8
+    9,  // sequence_number: 9
+    10, // sequence_number: 10
+    11, // sequence_number: 11
+    12, // sequence_number: 12 
+    13, // sequence_number: 13 
+    14, // sequence_number: 14
+    15, // sequence_number: 15
+    16, // sequence_number: 16
+];
+
+export default function StreamingResponseToMessageDemo() {
+    const [messages, setMessage] = useState(defaultMessages);
+    const [currentState, setCurrentState] = useState(null);
+    const [currentLength, setCurrentLength] = useState(0);
+
+    const onChatsChange = useCallback((chats) => {
+        setMessage(chats);
+    }, []);
+
+
+    useEffect(() => {
+
+        // 如果已经处理完所有的 chunks,停止 / Stop if all chunks are processed
+        if (currentLength > FIXED_SHUFFLED_INDICES.length) {
+
+            console.log('=== All chunks processed ===');
+            return;
+        }
+
+        // 设置定时器,每 1 秒处理一次 / Set timer to process every 1 second
+        const timer = setTimeout(() => {
+            if (currentLength === 0) {
+                // 初始化,不处理任何块 / Initialize, don't process any chunks
+                setCurrentLength(1);
+                return;
+            }
+
+            // 获取前 currentLength 个乱序索引对应的块 / Get chunks corresponding to first currentLength shuffled indices
+            const currentIndices = FIXED_SHUFFLED_INDICES.slice(0, currentLength);
+            const currentChunks = currentIndices.map(index => REASONING_CHUNKS[index]);
+
+
+            // 调用累积函数 / Call accumulation function
+            const result = streamingResponseToMessage(currentChunks, currentState);
+            
+            if (result) {
+                const { message: responseMessage, nextState } = result;
+
+                console.log('currentChunks', REASONING_CHUNKS[2], nextState);
+
+                // 更新消息列表 / Update message list
+                if (responseMessage) {
+                    console.log('responseMessage', responseMessage);
+                    setMessage([...defaultMessages, responseMessage]);
+                }
+                
+                // 更新状态供下次使用 / Update state for next iteration
+                setCurrentState(nextState);
+            }
+
+            // 增加长度 / Increase length
+            setCurrentLength(prev => prev + 1);
+        }, 200);
+
+        // 清理定时器 / Cleanup timer
+        return () => clearTimeout(timer);
+    }, [currentLength, currentState, REASONING_CHUNKS]);
+  
+    return (
+        <div>
+            <div style={{ 
+                padding: '12px', 
+                background: 'linear-gradient(135deg, #667eea 0%, #764ba2 100%)',
+                color: 'white',
+                marginBottom: '10px',
+                borderRadius: '8px',
+                boxShadow: '0 2px 8px rgba(0,0,0,0.1)'
+            }}>
+                <div style={{ marginBottom: '8px' }}>
+                    <strong>流式输出模拟 / Streaming Simulation</strong>
+                </div>
+                <div style={{ fontSize: '14px', opacity: 0.9 }}>
+                    📊 当前进度 / Progress: {Math.max(0, currentLength - 1)} / {FIXED_SHUFFLED_INDICES.length} chunks
+                </div>
+                <div style={{ fontSize: '12px', opacity: 0.8, marginTop: '4px' }}>
+                    🔀 乱序传输 / Out-of-order transmission
+                </div>
+                {currentState?.buffer?.size > 0 && (
+                    <div style={{ fontSize: '12px', opacity: 0.8, marginTop: '4px' }}>
+                        📦 缓冲区 / Buffer: {currentState.buffer.size} 个块等待处理 / chunks waiting
+                    </div>
+                )}
+            </div>
+            <AIChatDialogue 
+                align="leftRight"
+                mode="bubble"
+                chats={messages}
+                roleConfig={roleConfig}
+                onChatsChange={onChatsChange}
+            />
+        </div>
+    );
+};

+ 32 - 20
packages/semi-ui/aiChatDialogue/_story/aiChatDialogue.stories.jsx

@@ -1,8 +1,13 @@
 import React, { useState, useCallback, useRef, useEffect } from 'react';
-import { AIChatDialogue, RadioGroup, Radio, Button, Input, Toast } from '../../index';
+import { AIChatDialogue, RadioGroup, Radio, Button, Input, Toast, ChatCompletionToMessage, StreamingChatCompletionToMessage, StreamingResponseToMessage } from '../../index';
 import { IconSearch, IconFile, IconCode, IconTerminal } from '@douyinfe/semi-icons';
 import CustomRenderContentItem from './CustomRenderContentItem';
 import RenderConfigContentItem from './RenderConfig';
+import ResponseToMessageDemo from './DataAdapter/responseToMessage';
+import StreamingChatCompletionToMessageDemo from './DataAdapter/streamingChatCompletionToMessage';
+import StreamingResponseToMessageDemo from './DataAdapter/streamingResponseToMessage';
+import ChatCompletionToMessageDemo from './DataAdapter/chatCompletionToMessage';
+// import { RESPONSE_CHUNKS, REFUSAL_CHUNKS, REASONING_CHUNKS, FUNCTION_CALL_CHUNKS, CUSTOM_TOOL_CALL_CHUNKS, MCP_CHUNKS } from './Data/StreamingResponseData'
 
 export default {
   title: 'AIChatDialogue',
@@ -147,9 +152,12 @@ const toolCallMessage = [{
   content: [{
     type: 'function_call',
     name: 'create_travel_guide',
-    arguments: {
-      city: '北京'
-    },
+    arguments: "{\"city\":\"北京\"}",
+    status: 'completed',
+  },{
+    type: 'function_call',
+    name: 'create_travel_guide1',
+    arguments: "{\"city\":\"北京\"}",
     status: 'completed',
   }], 
   status: 'completed',
@@ -712,6 +720,26 @@ export const Hints = () => {
     )
 }
 
+// export const Response2Message = ResponseToMessageDemo;
+// export const ChatCompletion2Message = ChatCompletionToMessageDemo;
+// export const StreamingResponse2Message = StreamingResponseToMessageDemo;
+
+// ResponseToMessageDemo.story = {
+//   name: 'response to message',
+// };
+
+export const ResponseToMessageStory = () => <ResponseToMessageDemo />;
+ResponseToMessageStory.storyName =  'adapter: response';
+
+export const ChatCompletionToMessageStory = () => <ChatCompletionToMessageDemo />;
+ChatCompletionToMessageStory.storyName =  'adapter: chatCompletion';
+
+export const StreamingResponseToMessageStory = () => <StreamingResponseToMessageDemo />;
+StreamingResponseToMessageStory.storyName =  'adapter: streaming response';
+
+export const StreamingChatCompletionToMessageStory = () => <StreamingChatCompletionToMessageDemo />;
+StreamingChatCompletionToMessageStory.storyName =  'adapter: streaming chatCompletion';
+
 // export const Streaming = () => {
 //     const [messages, setMessage] = useState(defaultMessages);
 
@@ -730,20 +758,4 @@ export const Hints = () => {
 //     )
 // }
 
-// export const DataAdapter = () => {
-//     const [messages, setMessage] = useState(defaultMessages);
 
-//     const onChatsChange = useCallback((chats) => {
-//         setMessage(chats);
-//     }, []);
-  
-//     return (
-//       <AIChatDialogue 
-//           align="leftRight"
-//           mode="bubble"
-//           chats={messages}
-//           roleConfig={roleConfig}
-//           onChatsChange={onChatsChange}
-//       />
-//     )
-// }

+ 20 - 5
packages/semi-ui/aiChatDialogue/widgets/contentItem/annotation.tsx

@@ -1,4 +1,4 @@
-import React, { useCallback } from 'react';
+import React, { useCallback, useEffect, useState } from 'react';
 import { cssClasses } from '@douyinfe/semi-foundation/aiChatDialogue/constants';
 import { IconChevronRight } from '@douyinfe/semi-icons';
 import { Annotation } from '@douyinfe/semi-foundation/aiChatDialogue/foundation';
@@ -25,10 +25,25 @@ export const AnnotationWidget = (props: AnnotationWidgetProps) => {
 
     const { annotation, description, maxCount, onClick } = props;
 
+    const [filteredAnnotation, setFilteredAnnotation] = useState<AnnotationItemProps[]>([]);
+
+    useEffect(() => {
+        const filteredAnnotation = annotation.filter((item: Annotation) => (item.type !== 'file_citation' && item.type !== 'container_file_citation'));
+        setFilteredAnnotation(filteredAnnotation);
+    }, [annotation]);
+
     const handleClick = useCallback((e?: React.MouseEvent<HTMLDivElement>) => {
         onClick?.(e, annotation as AnnotationItemProps[]);
     }, [annotation, onClick]);
 
+    const renderMore = useCallback((restNumber: number, restAvatars: React.ReactNode[]) => {
+        return <Avatar className={`${prefixCls}-content-logo-renderMore`} size="extra-extra-small" alt={'more'}>
+            {`+${restNumber}`}
+        </Avatar>;
+    }, []);
+
+    if (filteredAnnotation.length === 0) { return null; }
+
     return (
         <div 
             role="button"
@@ -43,12 +58,12 @@ export const AnnotationWidget = (props: AnnotationWidgetProps) => {
             }}
         >
             <div className={`${prefixCls}-content`}>
-                <AvatarGroup maxCount={maxCount} size="extra-extra-small" overlapFrom={'end'}>
-                    {annotation.map((item: AnnotationItemProps, index: number) => {
-                        return <Avatar className={`${prefixCls}-content-logo`} key={index} src={item.logo} alt={item.title} />;
+                <AvatarGroup maxCount={maxCount} size="extra-extra-small" overlapFrom={'end'} renderMore={renderMore}>
+                    {filteredAnnotation.map((item: AnnotationItemProps, index: number) => {
+                        return item.logo && <Avatar className={`${prefixCls}-content-logo`} key={index} src={item.logo} alt={item.title}/>;
                     })}
                 </AvatarGroup>
-                <div className={`${prefixCls}-content-description`}>{description || `${annotation.length} 篇资料`}</div>
+                <div className={`${prefixCls}-content-description`}>{description || `${filteredAnnotation.length} 篇资料`}</div>
                 <div className={`${prefixCls}-content-icon`}><IconChevronRight /></div>
             </div>
         </div>

+ 30 - 18
packages/semi-ui/aiChatDialogue/widgets/dialogueContent.tsx

@@ -4,7 +4,7 @@ import { DialogueContentProps } from '../interface';
 import MarkdownRender from '../../markdownRender';
 import { cssClasses, strings } from '@douyinfe/semi-foundation/aiChatDialogue/constants';
 import { Image } from '../../index';
-import { FunctionToolCall, InputFile, ContentItem, InputMessage, InputText, InputImage, OutputText, Reasoning, CustomToolCall, OutputMessage } from '@douyinfe/semi-foundation/aiChatDialogue/foundation';
+import { FunctionToolCall, InputFile, ContentItem, InputMessage, InputText, InputImage, OutputText, Reasoning, CustomToolCall, OutputMessage, Refusal } from '@douyinfe/semi-foundation/aiChatDialogue/foundation';
 import { DialogueContentItemRenderer } from '../interface';
 import { IconAlertCircle, IconCode, IconExcel, IconFile, IconImage, IconPdf, IconSpin, IconWord, IconWrench } from '@douyinfe/semi-icons';
 import { ReasoningWidget } from './contentItem/reasoning';
@@ -20,7 +20,7 @@ const DialogueContent = (props: DialogueContentProps) => {
         onFileClick, onImageClick, disabledFileItemClick, renderDialogueContentItem, onAnnotationClick } = props;
     const { content, role, status, references } = message;
 
-    const TEXT_TYPES = useMemo(() => [MESSAGE_ITEM_TYPE.INPUT_TEXT, MESSAGE_ITEM_TYPE.OUTPUT_TEXT], []);
+    const TEXT_TYPES = useMemo(() => [MESSAGE_ITEM_TYPE.INPUT_TEXT, MESSAGE_ITEM_TYPE.OUTPUT_TEXT, MESSAGE_ITEM_TYPE.REFUSAL], []);
     const TOOL_CALL_TYPES = useMemo(() => [MESSAGE_ITEM_TYPE.FUNCTION_CALL, MESSAGE_ITEM_TYPE.CUSTOM_TOOL_CALL, MESSAGE_ITEM_TYPE.MCP_CALL], []);
 
     const markdownComponents = useMemo(() => ({
@@ -121,7 +121,7 @@ const DialogueContent = (props: DialogueContentProps) => {
         const { name, status } = props;
         return <div className={`${PREFIX_CONTENT}-tool-call`}>
             {status !== STATUS.COMPLETED ? <IconSpin /> : <IconWrench />}
-            {name} {JSON.stringify(props.arguments)}
+            {name}  {props.arguments}
         </div>;
     });
 
@@ -158,16 +158,19 @@ const DialogueContent = (props: DialogueContentProps) => {
         return null;
     }, [customRenderMap, TOOL_CALL_TYPES]);
 
-    const renderMarkdown = useCallback((text: string, key: React.Key) => (
-        <div className={wrapCls} key={key}>
-            <MarkdownRender
-                format='md'
-                raw={text}
-                components={markdownComponents as any}
-                {...markdownRenderProps}
-            />
-        </div>
-    ), [wrapCls, markdownComponents, markdownRenderProps]);
+    const renderMarkdown = useCallback((text: string, key: React.Key) => {
+        if (text !== '') {
+            return <div className={wrapCls} key={key}>
+                <MarkdownRender
+                    format='md'
+                    raw={text}
+                    components={markdownComponents as any}
+                    {...markdownRenderProps}
+                />
+            </div>;
+        }
+        return null;
+    }, [wrapCls, markdownComponents, markdownRenderProps]);
 
     const renderMessage = useCallback((msg: InputMessage | OutputMessage, index: number) => {
         if (typeof msg.content === 'string') {
@@ -188,11 +191,13 @@ const DialogueContent = (props: DialogueContentProps) => {
                         {annotation && annotation.length > 0 &&
                             <AnnotationWidget 
                                 annotation={annotation} 
-                                maxCount={3}
+                                // todo: 需要支持动态配置
+                                maxCount={15}
                                 onClick={onAnnotationClick}
                             />
                         }
                         {renderMarkdown((i as InputText | OutputText).text || '', `msg-${index}-${innerIdx}`)}
+                        {renderMarkdown((i as Refusal).refusal || '', `msg-${index}-${innerIdx}-refusal`)}
                     </React.Fragment>
                 );
             }
@@ -227,15 +232,21 @@ const DialogueContent = (props: DialogueContentProps) => {
         [MESSAGE_ITEM_TYPE.CUSTOM_TOOL_CALL]: renderToolCall,
     } as Record<string, (item: ContentItem, index: number) => React.ReactNode>), [renderMessage, markdownRenderProps, customMarkDownComponents, renderToolCall]);
 
-    const node = useMemo(() => {
-        const isLoading = [STATUS.QUEUE, STATUS.IN_PROGRESS, STATUS.INCOMPLETE].includes(status);
+    const loadingNode = useMemo(() => {
+        // todo: 支持彩色 loading
+        const isLoading = [STATUS.QUEUED, STATUS.IN_PROGRESS, STATUS.INCOMPLETE].includes(status);
         if (isLoading) {
-            // todo: 支持彩色 loading
             return <span className={`${PREFIX_CONTENT}-loading`} >
                 <span className={`${PREFIX_CONTENT}-loading-item`} /> 
                 <span className={`${PREFIX_CONTENT}-loading-text`}>请稍候...</span>
             </span>;
-        } else if (editing) {
+        } else {
+            return null;
+        }
+    }, [status]);
+
+    const node = useMemo(() => {
+        if (editing) {
             // todo: 两种行为,内置 + 自定义传入
             // 内置只编辑纯消息,自定义支持编辑多模态消息
             return messageEditRender?.(message);
@@ -296,6 +307,7 @@ const DialogueContent = (props: DialogueContentProps) => {
         })}>
             {references && references.length > 0 && <ReferenceWidget references={references} />}
             {node}
+            {loadingNode}
         </div>; 
     } 
 };

+ 1 - 0
packages/semi-ui/index.ts

@@ -130,3 +130,4 @@ export { default as AudioPlayer } from './audioPlayer';
 export { default as UserGuide } from './userGuide';
 export { default as VideoPlayer } from './videoPlayer';
 export { default as AIChatDialogue } from './aiChatDialogue';
+export { chatCompletionToMessage, streamingChatCompletionToMessage, streamingResponseToMessage, responseToMessage } from '@douyinfe/semi-foundation/aiChatDialogue/dataAdapter';

Some files were not shown because too many files changed in this diff