transform.ts 4.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146
  1. import type { ModelMessage } from "ai"
  2. import { unique } from "remeda"
  3. import type { JSONSchema } from "zod/v4/core"
  4. export namespace ProviderTransform {
  5. function normalizeToolCallIds(msgs: ModelMessage[]): ModelMessage[] {
  6. return msgs.map((msg) => {
  7. if ((msg.role === "assistant" || msg.role === "tool") && Array.isArray(msg.content)) {
  8. msg.content = msg.content.map((part) => {
  9. if ((part.type === "tool-call" || part.type === "tool-result") && "toolCallId" in part) {
  10. return {
  11. ...part,
  12. toolCallId: part.toolCallId.replace(/[^a-zA-Z0-9_-]/g, "_"),
  13. }
  14. }
  15. return part
  16. })
  17. }
  18. return msg
  19. })
  20. }
  21. function applyCaching(msgs: ModelMessage[], providerID: string): ModelMessage[] {
  22. const system = msgs.filter((msg) => msg.role === "system").slice(0, 2)
  23. const final = msgs.filter((msg) => msg.role !== "system").slice(-2)
  24. const providerOptions = {
  25. anthropic: {
  26. cacheControl: { type: "ephemeral" },
  27. },
  28. openrouter: {
  29. cache_control: { type: "ephemeral" },
  30. },
  31. bedrock: {
  32. cachePoint: { type: "ephemeral" },
  33. },
  34. openaiCompatible: {
  35. cache_control: { type: "ephemeral" },
  36. },
  37. }
  38. for (const msg of unique([...system, ...final])) {
  39. const shouldUseContentOptions = providerID !== "anthropic" && Array.isArray(msg.content) && msg.content.length > 0
  40. if (shouldUseContentOptions) {
  41. const lastContent = msg.content[msg.content.length - 1]
  42. if (lastContent && typeof lastContent === "object") {
  43. lastContent.providerOptions = {
  44. ...lastContent.providerOptions,
  45. ...providerOptions,
  46. }
  47. continue
  48. }
  49. }
  50. msg.providerOptions = {
  51. ...msg.providerOptions,
  52. ...providerOptions,
  53. }
  54. }
  55. return msgs
  56. }
  57. export function message(msgs: ModelMessage[], providerID: string, modelID: string) {
  58. if (modelID.includes("claude")) {
  59. msgs = normalizeToolCallIds(msgs)
  60. }
  61. if (providerID === "anthropic" || modelID.includes("anthropic") || modelID.includes("claude")) {
  62. msgs = applyCaching(msgs, providerID)
  63. }
  64. return msgs
  65. }
  66. export function temperature(_providerID: string, modelID: string) {
  67. if (modelID.toLowerCase().includes("qwen")) return 0.55
  68. if (modelID.toLowerCase().includes("claude")) return 1
  69. return 0
  70. }
  71. export function topP(_providerID: string, modelID: string) {
  72. if (modelID.toLowerCase().includes("qwen")) return 1
  73. return undefined
  74. }
  75. export function options(providerID: string, modelID: string, sessionID: string): Record<string, any> | undefined {
  76. const result: Record<string, any> = {}
  77. if (providerID === "openai") {
  78. result["promptCacheKey"] = sessionID
  79. }
  80. if (modelID.includes("gpt-5") && !modelID.includes("gpt-5-chat")) {
  81. result["reasoningEffort"] = "medium"
  82. if (providerID !== "azure") {
  83. result["textVerbosity"] = modelID.includes("codex") ? "medium" : "low"
  84. }
  85. if (providerID === "opencode") {
  86. result["promptCacheKey"] = sessionID
  87. result["include"] = ["reasoning.encrypted_content"]
  88. result["reasoningSummary"] = "auto"
  89. }
  90. }
  91. return result
  92. }
  93. export function maxOutputTokens(providerID: string, outputLimit: number, options: Record<string, any>): number {
  94. if (providerID === "anthropic") {
  95. const thinking = options["thinking"]
  96. if (typeof thinking === "object" && thinking !== null) {
  97. const type = thinking["type"]
  98. const budgetTokens = thinking["budgetTokens"]
  99. if (type === "enabled" && typeof budgetTokens === "number" && budgetTokens > 0) {
  100. return outputLimit - budgetTokens
  101. }
  102. }
  103. }
  104. return outputLimit
  105. }
  106. export function schema(_providerID: string, _modelID: string, schema: JSONSchema.BaseSchema) {
  107. /*
  108. if (["openai", "azure"].includes(providerID)) {
  109. if (schema.type === "object" && schema.properties) {
  110. for (const [key, value] of Object.entries(schema.properties)) {
  111. if (schema.required?.includes(key)) continue
  112. schema.properties[key] = {
  113. anyOf: [
  114. value as JSONSchema.JSONSchema,
  115. {
  116. type: "null",
  117. },
  118. ],
  119. }
  120. }
  121. }
  122. }
  123. if (providerID === "google") {
  124. }
  125. */
  126. return schema
  127. }
  128. }