transform.ts 9.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328
  1. import type { APICallError, ModelMessage } from "ai"
  2. import { unique } from "remeda"
  3. import type { JSONSchema } from "zod/v4/core"
  4. export namespace ProviderTransform {
  5. function normalizeMessages(msgs: ModelMessage[], providerID: string, modelID: string): ModelMessage[] {
  6. if (modelID.includes("claude")) {
  7. return msgs.map((msg) => {
  8. if ((msg.role === "assistant" || msg.role === "tool") && Array.isArray(msg.content)) {
  9. msg.content = msg.content.map((part) => {
  10. if ((part.type === "tool-call" || part.type === "tool-result") && "toolCallId" in part) {
  11. return {
  12. ...part,
  13. toolCallId: part.toolCallId.replace(/[^a-zA-Z0-9_-]/g, "_"),
  14. }
  15. }
  16. return part
  17. })
  18. }
  19. return msg
  20. })
  21. }
  22. if (providerID === "mistral" || modelID.toLowerCase().includes("mistral")) {
  23. const result: ModelMessage[] = []
  24. for (let i = 0; i < msgs.length; i++) {
  25. const msg = msgs[i]
  26. const nextMsg = msgs[i + 1]
  27. if ((msg.role === "assistant" || msg.role === "tool") && Array.isArray(msg.content)) {
  28. msg.content = msg.content.map((part) => {
  29. if ((part.type === "tool-call" || part.type === "tool-result") && "toolCallId" in part) {
  30. // Mistral requires alphanumeric tool call IDs with exactly 9 characters
  31. const normalizedId = part.toolCallId
  32. .replace(/[^a-zA-Z0-9]/g, "") // Remove non-alphanumeric characters
  33. .substring(0, 9) // Take first 9 characters
  34. .padEnd(9, "0") // Pad with zeros if less than 9 characters
  35. return {
  36. ...part,
  37. toolCallId: normalizedId,
  38. }
  39. }
  40. return part
  41. })
  42. }
  43. result.push(msg)
  44. // Fix message sequence: tool messages cannot be followed by user messages
  45. if (msg.role === "tool" && nextMsg?.role === "user") {
  46. result.push({
  47. role: "assistant",
  48. content: [
  49. {
  50. type: "text",
  51. text: "Done.",
  52. },
  53. ],
  54. })
  55. }
  56. }
  57. return result
  58. }
  59. return msgs
  60. }
  61. function applyCaching(msgs: ModelMessage[], providerID: string): ModelMessage[] {
  62. const system = msgs.filter((msg) => msg.role === "system").slice(0, 2)
  63. const final = msgs.filter((msg) => msg.role !== "system").slice(-2)
  64. const providerOptions = {
  65. anthropic: {
  66. cacheControl: { type: "ephemeral" },
  67. },
  68. openrouter: {
  69. cache_control: { type: "ephemeral" },
  70. },
  71. bedrock: {
  72. cachePoint: { type: "ephemeral" },
  73. },
  74. openaiCompatible: {
  75. cache_control: { type: "ephemeral" },
  76. },
  77. }
  78. for (const msg of unique([...system, ...final])) {
  79. const shouldUseContentOptions = providerID !== "anthropic" && Array.isArray(msg.content) && msg.content.length > 0
  80. if (shouldUseContentOptions) {
  81. const lastContent = msg.content[msg.content.length - 1]
  82. if (lastContent && typeof lastContent === "object") {
  83. lastContent.providerOptions = {
  84. ...lastContent.providerOptions,
  85. ...providerOptions,
  86. }
  87. continue
  88. }
  89. }
  90. msg.providerOptions = {
  91. ...msg.providerOptions,
  92. ...providerOptions,
  93. }
  94. }
  95. return msgs
  96. }
  97. export function message(msgs: ModelMessage[], providerID: string, modelID: string) {
  98. msgs = normalizeMessages(msgs, providerID, modelID)
  99. if (providerID === "anthropic" || modelID.includes("anthropic") || modelID.includes("claude")) {
  100. msgs = applyCaching(msgs, providerID)
  101. }
  102. return msgs
  103. }
  104. export function temperature(_providerID: string, modelID: string) {
  105. if (modelID.toLowerCase().includes("qwen")) return 0.55
  106. if (modelID.toLowerCase().includes("claude")) return undefined
  107. if (modelID.toLowerCase().includes("gemini-3-pro")) return 1.0
  108. return 0
  109. }
  110. export function topP(_providerID: string, modelID: string) {
  111. if (modelID.toLowerCase().includes("qwen")) return 1
  112. return undefined
  113. }
  114. export function options(
  115. providerID: string,
  116. modelID: string,
  117. npm: string,
  118. sessionID: string,
  119. providerOptions?: Record<string, any>,
  120. ): Record<string, any> {
  121. const result: Record<string, any> = {}
  122. // switch to providerID later, for now use this
  123. if (npm === "@openrouter/ai-sdk-provider") {
  124. result["usage"] = {
  125. include: true,
  126. }
  127. }
  128. if (providerID === "openai" || providerOptions?.setCacheKey) {
  129. result["promptCacheKey"] = sessionID
  130. }
  131. if (providerID === "google" || (providerID.startsWith("opencode") && modelID.includes("gemini-3"))) {
  132. result["thinkingConfig"] = {
  133. includeThoughts: true,
  134. }
  135. }
  136. if (modelID.includes("gpt-5") && !modelID.includes("gpt-5-chat")) {
  137. if (modelID.includes("codex")) {
  138. result["store"] = false
  139. }
  140. if (!modelID.includes("codex") && !modelID.includes("gpt-5-pro")) {
  141. result["reasoningEffort"] = "medium"
  142. }
  143. if (modelID.endsWith("gpt-5.1") && providerID !== "azure") {
  144. result["textVerbosity"] = "low"
  145. }
  146. if (providerID.startsWith("opencode")) {
  147. result["promptCacheKey"] = sessionID
  148. result["include"] = ["reasoning.encrypted_content"]
  149. result["reasoningSummary"] = "auto"
  150. }
  151. }
  152. return result
  153. }
  154. export function smallOptions(input: { providerID: string; modelID: string }) {
  155. const options: Record<string, any> = {}
  156. if (input.providerID === "openai" || input.modelID.includes("gpt-5")) {
  157. if (input.modelID.includes("5.1")) {
  158. options["reasoningEffort"] = "low"
  159. } else {
  160. options["reasoningEffort"] = "minimal"
  161. }
  162. }
  163. if (input.providerID === "google") {
  164. options["thinkingConfig"] = {
  165. thinkingBudget: 0,
  166. }
  167. }
  168. return options
  169. }
  170. export function providerOptions(npm: string | undefined, providerID: string, options: { [x: string]: any }) {
  171. switch (npm) {
  172. case "@ai-sdk/openai":
  173. case "@ai-sdk/azure":
  174. return {
  175. ["openai" as string]: options,
  176. }
  177. case "@ai-sdk/amazon-bedrock":
  178. return {
  179. ["bedrock" as string]: options,
  180. }
  181. case "@ai-sdk/anthropic":
  182. return {
  183. ["anthropic" as string]: options,
  184. }
  185. case "@ai-sdk/google":
  186. return {
  187. ["google" as string]: options,
  188. }
  189. case "@ai-sdk/gateway":
  190. return {
  191. ["gateway" as string]: options,
  192. }
  193. case "@openrouter/ai-sdk-provider":
  194. return {
  195. ["openrouter" as string]: options,
  196. }
  197. default:
  198. return {
  199. [providerID]: options,
  200. }
  201. }
  202. }
  203. export function maxOutputTokens(
  204. npm: string,
  205. options: Record<string, any>,
  206. modelLimit: number,
  207. globalLimit: number,
  208. ): number {
  209. const modelCap = modelLimit || globalLimit
  210. const standardLimit = Math.min(modelCap, globalLimit)
  211. if (npm === "@ai-sdk/anthropic") {
  212. const thinking = options?.["thinking"]
  213. const budgetTokens = typeof thinking?.["budgetTokens"] === "number" ? thinking["budgetTokens"] : 0
  214. const enabled = thinking?.["type"] === "enabled"
  215. if (enabled && budgetTokens > 0) {
  216. // Return text tokens so that text + thinking <= model cap, preferring 32k text when possible.
  217. if (budgetTokens + standardLimit <= modelCap) {
  218. return standardLimit
  219. }
  220. return modelCap - budgetTokens
  221. }
  222. }
  223. return standardLimit
  224. }
  225. export function schema(providerID: string, modelID: string, schema: JSONSchema.BaseSchema) {
  226. /*
  227. if (["openai", "azure"].includes(providerID)) {
  228. if (schema.type === "object" && schema.properties) {
  229. for (const [key, value] of Object.entries(schema.properties)) {
  230. if (schema.required?.includes(key)) continue
  231. schema.properties[key] = {
  232. anyOf: [
  233. value as JSONSchema.JSONSchema,
  234. {
  235. type: "null",
  236. },
  237. ],
  238. }
  239. }
  240. }
  241. }
  242. */
  243. // Convert integer enums to string enums for Google/Gemini
  244. if (providerID === "google" || modelID.includes("gemini")) {
  245. const sanitizeGemini = (obj: any): any => {
  246. if (obj === null || typeof obj !== "object") {
  247. return obj
  248. }
  249. if (Array.isArray(obj)) {
  250. return obj.map(sanitizeGemini)
  251. }
  252. const result: any = {}
  253. for (const [key, value] of Object.entries(obj)) {
  254. if (key === "enum" && Array.isArray(value)) {
  255. // Convert all enum values to strings
  256. result[key] = value.map((v) => String(v))
  257. // If we have integer type with enum, change type to string
  258. if (result.type === "integer" || result.type === "number") {
  259. result.type = "string"
  260. }
  261. } else if (typeof value === "object" && value !== null) {
  262. result[key] = sanitizeGemini(value)
  263. } else {
  264. result[key] = value
  265. }
  266. }
  267. // Filter required array to only include fields that exist in properties
  268. if (result.type === "object" && result.properties && Array.isArray(result.required)) {
  269. result.required = result.required.filter((field: any) => field in result.properties)
  270. }
  271. return result
  272. }
  273. schema = sanitizeGemini(schema)
  274. }
  275. return schema
  276. }
  277. export function error(providerID: string, error: APICallError) {
  278. let message = error.message
  279. if (providerID === "github-copilot" && message.includes("The requested model is not supported")) {
  280. return (
  281. message +
  282. "\n\nMake sure the model is enabled in your copilot settings: https://github.com/settings/copilot/features"
  283. )
  284. }
  285. return message
  286. }
  287. }