fh.ai.js 2.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263
  1. import EncodeUtils from '../en-decode/endecode-lib.js';
  2. /**
  3. * 用零一万物大模型来进行流式问答输出
  4. */
  5. let AI = (() => {
  6. const defaultKey = 'MWFhZWE0M2Y3ZDBkNDJhNmJhNjMzOTZkOGJlNTA4ZmY=';
  7. async function streamChatCompletions(prompt,receivingCallback,apiKey) {
  8. const url = 'https://api.lingyiwanwu.com/v1/chat/completions';
  9. const options = {
  10. method: 'POST',
  11. headers: {
  12. 'Content-Type': 'application/json',
  13. 'Authorization': `Bearer ${apiKey || EncodeUtils.base64Decode(defaultKey)}`
  14. },
  15. body: JSON.stringify({
  16. model: "yi-large",
  17. messages: [{ role: "user", content: prompt }],
  18. temperature: 0.3,
  19. stream: true
  20. })
  21. };
  22. try {
  23. const response = await fetch(url, options);
  24. if (!response.ok) {
  25. throw new Error(`HTTP error! status: ${response.status}`);
  26. }
  27. // 创建一个ReadableStream用于处理流式数据
  28. const reader = response.body.getReader();
  29. const decoder = new TextDecoder('utf-8');
  30. let done = false;
  31. while (!done) {
  32. const { value, done: readerDone } = await reader.read();
  33. done = readerDone;
  34. if (value) {
  35. // 将接收到的数据块解码为字符串,并假设每一行都是一个独立的JSON对象
  36. const lines = decoder.decode(value, { stream: true }).split('\n').filter(line => line.trim() !== '');
  37. for (const line of lines) {
  38. try {
  39. // 解析每一行作为单独的JSON对象
  40. const message = JSON.parse(line.replace(/^data:\s+/,''));
  41. receivingCallback && receivingCallback(message);
  42. } catch (jsonError) {
  43. if(line === 'data: [DONE]'){
  44. receivingCallback && receivingCallback(null,true);
  45. }
  46. }
  47. }
  48. }
  49. }
  50. } catch (error) {
  51. console.error('Error fetching chat completions:', error);
  52. }
  53. }
  54. return {askYiLarge: streamChatCompletions};
  55. })();
  56. export default AI;