transform.ts 3.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114
  1. import type { ModelMessage } from "ai"
  2. import { unique } from "remeda"
  3. export namespace ProviderTransform {
  4. function normalizeToolCallIds(msgs: ModelMessage[]): ModelMessage[] {
  5. return msgs.map((msg) => {
  6. if ((msg.role === "assistant" || msg.role === "tool") && Array.isArray(msg.content)) {
  7. msg.content = msg.content.map((part) => {
  8. if ((part.type === "tool-call" || part.type === "tool-result") && "toolCallId" in part) {
  9. return {
  10. ...part,
  11. toolCallId: part.toolCallId.replace(/[^a-zA-Z0-9_-]/g, "_"),
  12. }
  13. }
  14. return part
  15. })
  16. }
  17. return msg
  18. })
  19. }
  20. function applyCaching(msgs: ModelMessage[], providerID: string): ModelMessage[] {
  21. const system = msgs.filter((msg) => msg.role === "system").slice(0, 2)
  22. const final = msgs.filter((msg) => msg.role !== "system").slice(-2)
  23. const providerOptions = {
  24. anthropic: {
  25. cacheControl: { type: "ephemeral" },
  26. },
  27. openrouter: {
  28. cache_control: { type: "ephemeral" },
  29. },
  30. bedrock: {
  31. cachePoint: { type: "ephemeral" },
  32. },
  33. openaiCompatible: {
  34. cache_control: { type: "ephemeral" },
  35. },
  36. }
  37. for (const msg of unique([...system, ...final])) {
  38. const shouldUseContentOptions = providerID !== "anthropic" && Array.isArray(msg.content) && msg.content.length > 0
  39. if (shouldUseContentOptions) {
  40. const lastContent = msg.content[msg.content.length - 1]
  41. if (lastContent && typeof lastContent === "object") {
  42. lastContent.providerOptions = {
  43. ...lastContent.providerOptions,
  44. ...providerOptions,
  45. }
  46. continue
  47. }
  48. }
  49. msg.providerOptions = {
  50. ...msg.providerOptions,
  51. ...providerOptions,
  52. }
  53. }
  54. return msgs
  55. }
  56. export function message(msgs: ModelMessage[], providerID: string, modelID: string) {
  57. if (modelID.includes("claude")) {
  58. msgs = normalizeToolCallIds(msgs)
  59. }
  60. if (providerID === "anthropic" || modelID.includes("anthropic") || modelID.includes("claude")) {
  61. msgs = applyCaching(msgs, providerID)
  62. }
  63. return msgs
  64. }
  65. export function temperature(_providerID: string, modelID: string) {
  66. if (modelID.toLowerCase().includes("qwen")) return 0.55
  67. return 0
  68. }
  69. export function topP(_providerID: string, modelID: string) {
  70. if (modelID.toLowerCase().includes("qwen")) return 1
  71. return undefined
  72. }
  73. export function options(providerID: string, modelID: string): Record<string, any> | undefined {
  74. if (modelID.includes("gpt-5")) {
  75. if (providerID === "azure") {
  76. return {
  77. reasoning_effort: "minimal",
  78. text_verbosity: "verbose",
  79. }
  80. }
  81. return {
  82. reasoningEffort: "minimal",
  83. textVerbosity: "low",
  84. // reasoningSummary: "auto",
  85. // include: ["reasoning.encrypted_content"],
  86. }
  87. }
  88. // if (modelID.includes("claude")) {
  89. // return {
  90. // thinking: {
  91. // type: "enabled",
  92. // budgetTokens: 32000,
  93. // },
  94. // }
  95. // }
  96. // if (_providerID === "bedrock") {
  97. // return {
  98. // reasoningConfig: { type: "enabled", budgetTokens: 32000 },
  99. // }
  100. // }
  101. }
  102. }