transform.test.ts 90 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119
  1. import { describe, expect, test } from "bun:test"
  2. import { ProviderTransform } from "../../src/provider/transform"
  3. import { ModelID, ProviderID } from "../../src/provider/schema"
  4. const OUTPUT_TOKEN_MAX = 32000
  5. describe("ProviderTransform.options - setCacheKey", () => {
  6. const sessionID = "test-session-123"
  7. const mockModel = {
  8. id: "anthropic/claude-3-5-sonnet",
  9. providerID: "anthropic",
  10. api: {
  11. id: "claude-3-5-sonnet-20241022",
  12. url: "https://api.anthropic.com",
  13. npm: "@ai-sdk/anthropic",
  14. },
  15. name: "Claude 3.5 Sonnet",
  16. capabilities: {
  17. temperature: true,
  18. reasoning: false,
  19. attachment: true,
  20. toolcall: true,
  21. input: { text: true, audio: false, image: true, video: false, pdf: true },
  22. output: { text: true, audio: false, image: false, video: false, pdf: false },
  23. interleaved: false,
  24. },
  25. cost: {
  26. input: 0.003,
  27. output: 0.015,
  28. cache: { read: 0.0003, write: 0.00375 },
  29. },
  30. limit: {
  31. context: 200000,
  32. output: 8192,
  33. },
  34. status: "active",
  35. options: {},
  36. headers: {},
  37. } as any
  38. test("should set promptCacheKey when providerOptions.setCacheKey is true", () => {
  39. const result = ProviderTransform.options({
  40. model: mockModel,
  41. sessionID,
  42. providerOptions: { setCacheKey: true },
  43. })
  44. expect(result.promptCacheKey).toBe(sessionID)
  45. })
  46. test("should not set promptCacheKey when providerOptions.setCacheKey is false", () => {
  47. const result = ProviderTransform.options({
  48. model: mockModel,
  49. sessionID,
  50. providerOptions: { setCacheKey: false },
  51. })
  52. expect(result.promptCacheKey).toBeUndefined()
  53. })
  54. test("should not set promptCacheKey when providerOptions is undefined", () => {
  55. const result = ProviderTransform.options({
  56. model: mockModel,
  57. sessionID,
  58. providerOptions: undefined,
  59. })
  60. expect(result.promptCacheKey).toBeUndefined()
  61. })
  62. test("should not set promptCacheKey when providerOptions does not have setCacheKey", () => {
  63. const result = ProviderTransform.options({ model: mockModel, sessionID, providerOptions: {} })
  64. expect(result.promptCacheKey).toBeUndefined()
  65. })
  66. test("should set promptCacheKey for openai provider regardless of setCacheKey", () => {
  67. const openaiModel = {
  68. ...mockModel,
  69. providerID: "openai",
  70. api: {
  71. id: "gpt-4",
  72. url: "https://api.openai.com",
  73. npm: "@ai-sdk/openai",
  74. },
  75. }
  76. const result = ProviderTransform.options({ model: openaiModel, sessionID, providerOptions: {} })
  77. expect(result.promptCacheKey).toBe(sessionID)
  78. })
  79. test("should set store=false for openai provider", () => {
  80. const openaiModel = {
  81. ...mockModel,
  82. providerID: "openai",
  83. api: {
  84. id: "gpt-4",
  85. url: "https://api.openai.com",
  86. npm: "@ai-sdk/openai",
  87. },
  88. }
  89. const result = ProviderTransform.options({
  90. model: openaiModel,
  91. sessionID,
  92. providerOptions: {},
  93. })
  94. expect(result.store).toBe(false)
  95. })
  96. })
  97. describe("ProviderTransform.options - google thinkingConfig gating", () => {
  98. const sessionID = "test-session-123"
  99. const createGoogleModel = (reasoning: boolean, npm: "@ai-sdk/google" | "@ai-sdk/google-vertex") =>
  100. ({
  101. id: `${npm === "@ai-sdk/google" ? "google" : "google-vertex"}/gemini-2.0-flash`,
  102. providerID: npm === "@ai-sdk/google" ? "google" : "google-vertex",
  103. api: {
  104. id: "gemini-2.0-flash",
  105. url: npm === "@ai-sdk/google" ? "https://generativelanguage.googleapis.com" : "https://vertexai.googleapis.com",
  106. npm,
  107. },
  108. name: "Gemini 2.0 Flash",
  109. capabilities: {
  110. temperature: true,
  111. reasoning,
  112. attachment: true,
  113. toolcall: true,
  114. input: { text: true, audio: false, image: true, video: false, pdf: true },
  115. output: { text: true, audio: false, image: false, video: false, pdf: false },
  116. interleaved: false,
  117. },
  118. cost: {
  119. input: 0.001,
  120. output: 0.002,
  121. cache: { read: 0.0001, write: 0.0002 },
  122. },
  123. limit: {
  124. context: 1_000_000,
  125. output: 8192,
  126. },
  127. status: "active",
  128. options: {},
  129. headers: {},
  130. }) as any
  131. test("does not set thinkingConfig for google models without reasoning capability", () => {
  132. const result = ProviderTransform.options({
  133. model: createGoogleModel(false, "@ai-sdk/google"),
  134. sessionID,
  135. providerOptions: {},
  136. })
  137. expect(result.thinkingConfig).toBeUndefined()
  138. })
  139. test("sets thinkingConfig for google models with reasoning capability", () => {
  140. const result = ProviderTransform.options({
  141. model: createGoogleModel(true, "@ai-sdk/google"),
  142. sessionID,
  143. providerOptions: {},
  144. })
  145. expect(result.thinkingConfig).toEqual({
  146. includeThoughts: true,
  147. })
  148. })
  149. test("does not set thinkingConfig for vertex models without reasoning capability", () => {
  150. const result = ProviderTransform.options({
  151. model: createGoogleModel(false, "@ai-sdk/google-vertex"),
  152. sessionID,
  153. providerOptions: {},
  154. })
  155. expect(result.thinkingConfig).toBeUndefined()
  156. })
  157. })
  158. describe("ProviderTransform.options - gpt-5 textVerbosity", () => {
  159. const sessionID = "test-session-123"
  160. const createGpt5Model = (apiId: string) =>
  161. ({
  162. id: `openai/${apiId}`,
  163. providerID: "openai",
  164. api: {
  165. id: apiId,
  166. url: "https://api.openai.com",
  167. npm: "@ai-sdk/openai",
  168. },
  169. name: apiId,
  170. capabilities: {
  171. temperature: true,
  172. reasoning: true,
  173. attachment: true,
  174. toolcall: true,
  175. input: { text: true, audio: false, image: true, video: false, pdf: false },
  176. output: { text: true, audio: false, image: false, video: false, pdf: false },
  177. interleaved: false,
  178. },
  179. cost: { input: 0.03, output: 0.06, cache: { read: 0.001, write: 0.002 } },
  180. limit: { context: 128000, output: 4096 },
  181. status: "active",
  182. options: {},
  183. headers: {},
  184. }) as any
  185. test("gpt-5.2 should have textVerbosity set to low", () => {
  186. const model = createGpt5Model("gpt-5.2")
  187. const result = ProviderTransform.options({ model, sessionID, providerOptions: {} })
  188. expect(result.textVerbosity).toBe("low")
  189. })
  190. test("gpt-5.1 should have textVerbosity set to low", () => {
  191. const model = createGpt5Model("gpt-5.1")
  192. const result = ProviderTransform.options({ model, sessionID, providerOptions: {} })
  193. expect(result.textVerbosity).toBe("low")
  194. })
  195. test("gpt-5.2-chat-latest should NOT have textVerbosity set (only supports medium)", () => {
  196. const model = createGpt5Model("gpt-5.2-chat-latest")
  197. const result = ProviderTransform.options({ model, sessionID, providerOptions: {} })
  198. expect(result.textVerbosity).toBeUndefined()
  199. })
  200. test("gpt-5.1-chat-latest should NOT have textVerbosity set (only supports medium)", () => {
  201. const model = createGpt5Model("gpt-5.1-chat-latest")
  202. const result = ProviderTransform.options({ model, sessionID, providerOptions: {} })
  203. expect(result.textVerbosity).toBeUndefined()
  204. })
  205. test("gpt-5.2-chat should NOT have textVerbosity set", () => {
  206. const model = createGpt5Model("gpt-5.2-chat")
  207. const result = ProviderTransform.options({ model, sessionID, providerOptions: {} })
  208. expect(result.textVerbosity).toBeUndefined()
  209. })
  210. test("gpt-5-chat should NOT have textVerbosity set", () => {
  211. const model = createGpt5Model("gpt-5-chat")
  212. const result = ProviderTransform.options({ model, sessionID, providerOptions: {} })
  213. expect(result.textVerbosity).toBeUndefined()
  214. })
  215. test("gpt-5.2-codex should NOT have textVerbosity set (codex models excluded)", () => {
  216. const model = createGpt5Model("gpt-5.2-codex")
  217. const result = ProviderTransform.options({ model, sessionID, providerOptions: {} })
  218. expect(result.textVerbosity).toBeUndefined()
  219. })
  220. })
  221. describe("ProviderTransform.options - gateway", () => {
  222. const sessionID = "test-session-123"
  223. const createModel = (id: string) =>
  224. ({
  225. id,
  226. providerID: "vercel",
  227. api: {
  228. id,
  229. url: "https://ai-gateway.vercel.sh/v3/ai",
  230. npm: "@ai-sdk/gateway",
  231. },
  232. name: id,
  233. capabilities: {
  234. temperature: true,
  235. reasoning: true,
  236. attachment: true,
  237. toolcall: true,
  238. input: { text: true, audio: false, image: true, video: false, pdf: true },
  239. output: { text: true, audio: false, image: false, video: false, pdf: false },
  240. interleaved: false,
  241. },
  242. cost: {
  243. input: 0.001,
  244. output: 0.002,
  245. cache: { read: 0.0001, write: 0.0002 },
  246. },
  247. limit: {
  248. context: 200_000,
  249. output: 8192,
  250. },
  251. status: "active",
  252. options: {},
  253. headers: {},
  254. release_date: "2024-01-01",
  255. }) as any
  256. test("puts gateway defaults under gateway key", () => {
  257. const model = createModel("anthropic/claude-sonnet-4")
  258. const result = ProviderTransform.options({ model, sessionID, providerOptions: {} })
  259. expect(result).toEqual({
  260. gateway: {
  261. caching: "auto",
  262. },
  263. })
  264. })
  265. })
  266. describe("ProviderTransform.providerOptions", () => {
  267. const createModel = (overrides: Partial<any> = {}) =>
  268. ({
  269. id: "test/test-model",
  270. providerID: "test",
  271. api: {
  272. id: "test-model",
  273. url: "https://api.test.com",
  274. npm: "@ai-sdk/openai",
  275. },
  276. name: "Test Model",
  277. capabilities: {
  278. temperature: true,
  279. reasoning: true,
  280. attachment: true,
  281. toolcall: true,
  282. input: { text: true, audio: false, image: true, video: false, pdf: false },
  283. output: { text: true, audio: false, image: false, video: false, pdf: false },
  284. interleaved: false,
  285. },
  286. cost: {
  287. input: 0.001,
  288. output: 0.002,
  289. cache: { read: 0.0001, write: 0.0002 },
  290. },
  291. limit: {
  292. context: 200_000,
  293. output: 64_000,
  294. },
  295. status: "active",
  296. options: {},
  297. headers: {},
  298. release_date: "2024-01-01",
  299. ...overrides,
  300. }) as any
  301. test("uses sdk key for non-gateway models", () => {
  302. const model = createModel({
  303. providerID: "my-bedrock",
  304. api: {
  305. id: "anthropic.claude-sonnet-4",
  306. url: "https://bedrock.aws",
  307. npm: "@ai-sdk/amazon-bedrock",
  308. },
  309. })
  310. expect(ProviderTransform.providerOptions(model, { cachePoint: { type: "default" } })).toEqual({
  311. bedrock: { cachePoint: { type: "default" } },
  312. })
  313. })
  314. test("uses gateway model provider slug for gateway models", () => {
  315. const model = createModel({
  316. providerID: "vercel",
  317. api: {
  318. id: "anthropic/claude-sonnet-4",
  319. url: "https://ai-gateway.vercel.sh/v3/ai",
  320. npm: "@ai-sdk/gateway",
  321. },
  322. })
  323. expect(ProviderTransform.providerOptions(model, { thinking: { type: "enabled", budgetTokens: 12_000 } })).toEqual({
  324. anthropic: { thinking: { type: "enabled", budgetTokens: 12_000 } },
  325. })
  326. })
  327. test("falls back to gateway key when gateway api id is unscoped", () => {
  328. const model = createModel({
  329. id: "anthropic/claude-sonnet-4",
  330. providerID: "vercel",
  331. api: {
  332. id: "claude-sonnet-4",
  333. url: "https://ai-gateway.vercel.sh/v3/ai",
  334. npm: "@ai-sdk/gateway",
  335. },
  336. })
  337. expect(ProviderTransform.providerOptions(model, { thinking: { type: "enabled", budgetTokens: 12_000 } })).toEqual({
  338. gateway: { thinking: { type: "enabled", budgetTokens: 12_000 } },
  339. })
  340. })
  341. test("splits gateway routing options from provider-specific options", () => {
  342. const model = createModel({
  343. providerID: "vercel",
  344. api: {
  345. id: "anthropic/claude-sonnet-4",
  346. url: "https://ai-gateway.vercel.sh/v3/ai",
  347. npm: "@ai-sdk/gateway",
  348. },
  349. })
  350. expect(
  351. ProviderTransform.providerOptions(model, {
  352. gateway: { order: ["vertex", "anthropic"] },
  353. thinking: { type: "enabled", budgetTokens: 12_000 },
  354. }),
  355. ).toEqual({
  356. gateway: { order: ["vertex", "anthropic"] },
  357. anthropic: { thinking: { type: "enabled", budgetTokens: 12_000 } },
  358. } as any)
  359. })
  360. test("falls back to gateway key when model id has no provider slug", () => {
  361. const model = createModel({
  362. id: "claude-sonnet-4",
  363. providerID: "vercel",
  364. api: {
  365. id: "claude-sonnet-4",
  366. url: "https://ai-gateway.vercel.sh/v3/ai",
  367. npm: "@ai-sdk/gateway",
  368. },
  369. })
  370. expect(ProviderTransform.providerOptions(model, { reasoningEffort: "high" })).toEqual({
  371. gateway: { reasoningEffort: "high" },
  372. })
  373. })
  374. test("maps amazon slug to bedrock for provider options", () => {
  375. const model = createModel({
  376. providerID: "vercel",
  377. api: {
  378. id: "amazon/nova-2-lite",
  379. url: "https://ai-gateway.vercel.sh/v3/ai",
  380. npm: "@ai-sdk/gateway",
  381. },
  382. })
  383. expect(ProviderTransform.providerOptions(model, { reasoningConfig: { type: "enabled" } })).toEqual({
  384. bedrock: { reasoningConfig: { type: "enabled" } },
  385. })
  386. })
  387. test("uses groq slug for groq models", () => {
  388. const model = createModel({
  389. providerID: "vercel",
  390. api: {
  391. id: "groq/llama-3.3-70b-versatile",
  392. url: "https://ai-gateway.vercel.sh/v3/ai",
  393. npm: "@ai-sdk/gateway",
  394. },
  395. })
  396. expect(ProviderTransform.providerOptions(model, { reasoningFormat: "parsed" })).toEqual({
  397. groq: { reasoningFormat: "parsed" },
  398. })
  399. })
  400. })
  401. describe("ProviderTransform.schema - gemini array items", () => {
  402. test("adds missing items for array properties", () => {
  403. const geminiModel = {
  404. providerID: "google",
  405. api: {
  406. id: "gemini-3-pro",
  407. },
  408. } as any
  409. const schema = {
  410. type: "object",
  411. properties: {
  412. nodes: { type: "array" },
  413. edges: { type: "array", items: { type: "string" } },
  414. },
  415. } as any
  416. const result = ProviderTransform.schema(geminiModel, schema) as any
  417. expect(result.properties.nodes.items).toBeDefined()
  418. expect(result.properties.edges.items.type).toBe("string")
  419. })
  420. })
  421. describe("ProviderTransform.schema - gemini nested array items", () => {
  422. const geminiModel = {
  423. providerID: "google",
  424. api: {
  425. id: "gemini-3-pro",
  426. },
  427. } as any
  428. test("adds type to 2D array with empty inner items", () => {
  429. const schema = {
  430. type: "object",
  431. properties: {
  432. values: {
  433. type: "array",
  434. items: {
  435. type: "array",
  436. items: {}, // Empty items object
  437. },
  438. },
  439. },
  440. } as any
  441. const result = ProviderTransform.schema(geminiModel, schema) as any
  442. // Inner items should have a default type
  443. expect(result.properties.values.items.items.type).toBe("string")
  444. })
  445. test("adds items and type to 2D array with missing inner items", () => {
  446. const schema = {
  447. type: "object",
  448. properties: {
  449. data: {
  450. type: "array",
  451. items: { type: "array" }, // No items at all
  452. },
  453. },
  454. } as any
  455. const result = ProviderTransform.schema(geminiModel, schema) as any
  456. expect(result.properties.data.items.items).toBeDefined()
  457. expect(result.properties.data.items.items.type).toBe("string")
  458. })
  459. test("handles deeply nested arrays (3D)", () => {
  460. const schema = {
  461. type: "object",
  462. properties: {
  463. matrix: {
  464. type: "array",
  465. items: {
  466. type: "array",
  467. items: {
  468. type: "array",
  469. // No items
  470. },
  471. },
  472. },
  473. },
  474. } as any
  475. const result = ProviderTransform.schema(geminiModel, schema) as any
  476. expect(result.properties.matrix.items.items.items).toBeDefined()
  477. expect(result.properties.matrix.items.items.items.type).toBe("string")
  478. })
  479. test("preserves existing item types in nested arrays", () => {
  480. const schema = {
  481. type: "object",
  482. properties: {
  483. numbers: {
  484. type: "array",
  485. items: {
  486. type: "array",
  487. items: { type: "number" }, // Has explicit type
  488. },
  489. },
  490. },
  491. } as any
  492. const result = ProviderTransform.schema(geminiModel, schema) as any
  493. // Should preserve the explicit type
  494. expect(result.properties.numbers.items.items.type).toBe("number")
  495. })
  496. test("handles mixed nested structures with objects and arrays", () => {
  497. const schema = {
  498. type: "object",
  499. properties: {
  500. spreadsheetData: {
  501. type: "object",
  502. properties: {
  503. rows: {
  504. type: "array",
  505. items: {
  506. type: "array",
  507. items: {}, // Empty items
  508. },
  509. },
  510. },
  511. },
  512. },
  513. } as any
  514. const result = ProviderTransform.schema(geminiModel, schema) as any
  515. expect(result.properties.spreadsheetData.properties.rows.items.items.type).toBe("string")
  516. })
  517. })
  518. describe("ProviderTransform.schema - gemini combiner nodes", () => {
  519. const geminiModel = {
  520. providerID: "google",
  521. api: {
  522. id: "gemini-3-pro",
  523. },
  524. } as any
  525. const walk = (node: any, cb: (node: any, path: (string | number)[]) => void, path: (string | number)[] = []) => {
  526. if (node === null || typeof node !== "object") {
  527. return
  528. }
  529. if (Array.isArray(node)) {
  530. node.forEach((item, i) => walk(item, cb, [...path, i]))
  531. return
  532. }
  533. cb(node, path)
  534. Object.entries(node).forEach(([key, value]) => walk(value, cb, [...path, key]))
  535. }
  536. test("keeps edits.items.anyOf without adding type", () => {
  537. const schema = {
  538. type: "object",
  539. properties: {
  540. edits: {
  541. type: "array",
  542. items: {
  543. anyOf: [
  544. {
  545. type: "object",
  546. properties: {
  547. old_string: { type: "string" },
  548. new_string: { type: "string" },
  549. },
  550. required: ["old_string", "new_string"],
  551. },
  552. {
  553. type: "object",
  554. properties: {
  555. old_string: { type: "string" },
  556. new_string: { type: "string" },
  557. replace_all: { type: "boolean" },
  558. },
  559. required: ["old_string", "new_string"],
  560. },
  561. ],
  562. },
  563. },
  564. },
  565. required: ["edits"],
  566. } as any
  567. const result = ProviderTransform.schema(geminiModel, schema) as any
  568. expect(Array.isArray(result.properties.edits.items.anyOf)).toBe(true)
  569. expect(result.properties.edits.items.type).toBeUndefined()
  570. })
  571. test("does not add sibling keys to combiner nodes during sanitize", () => {
  572. const schema = {
  573. type: "object",
  574. properties: {
  575. edits: {
  576. type: "array",
  577. items: {
  578. anyOf: [{ type: "string" }, { type: "number" }],
  579. },
  580. },
  581. value: {
  582. oneOf: [{ type: "string" }, { type: "boolean" }],
  583. },
  584. meta: {
  585. allOf: [
  586. {
  587. type: "object",
  588. properties: { a: { type: "string" } },
  589. },
  590. {
  591. type: "object",
  592. properties: { b: { type: "string" } },
  593. },
  594. ],
  595. },
  596. },
  597. } as any
  598. const input = JSON.parse(JSON.stringify(schema))
  599. const result = ProviderTransform.schema(geminiModel, schema) as any
  600. walk(result, (node, path) => {
  601. const hasCombiner = Array.isArray(node.anyOf) || Array.isArray(node.oneOf) || Array.isArray(node.allOf)
  602. if (!hasCombiner) {
  603. return
  604. }
  605. const before = path.reduce((acc: any, key) => acc?.[key], input)
  606. const added = Object.keys(node).filter((key) => !(key in before))
  607. expect(added).toEqual([])
  608. })
  609. })
  610. })
  611. describe("ProviderTransform.schema - gemini non-object properties removal", () => {
  612. const geminiModel = {
  613. providerID: "google",
  614. api: {
  615. id: "gemini-3-pro",
  616. },
  617. } as any
  618. test("removes properties from non-object types", () => {
  619. const schema = {
  620. type: "object",
  621. properties: {
  622. data: {
  623. type: "string",
  624. properties: { invalid: { type: "string" } },
  625. },
  626. },
  627. } as any
  628. const result = ProviderTransform.schema(geminiModel, schema) as any
  629. expect(result.properties.data.type).toBe("string")
  630. expect(result.properties.data.properties).toBeUndefined()
  631. })
  632. test("removes required from non-object types", () => {
  633. const schema = {
  634. type: "object",
  635. properties: {
  636. data: {
  637. type: "array",
  638. items: { type: "string" },
  639. required: ["invalid"],
  640. },
  641. },
  642. } as any
  643. const result = ProviderTransform.schema(geminiModel, schema) as any
  644. expect(result.properties.data.type).toBe("array")
  645. expect(result.properties.data.required).toBeUndefined()
  646. })
  647. test("removes properties and required from nested non-object types", () => {
  648. const schema = {
  649. type: "object",
  650. properties: {
  651. outer: {
  652. type: "object",
  653. properties: {
  654. inner: {
  655. type: "number",
  656. properties: { bad: { type: "string" } },
  657. required: ["bad"],
  658. },
  659. },
  660. },
  661. },
  662. } as any
  663. const result = ProviderTransform.schema(geminiModel, schema) as any
  664. expect(result.properties.outer.properties.inner.type).toBe("number")
  665. expect(result.properties.outer.properties.inner.properties).toBeUndefined()
  666. expect(result.properties.outer.properties.inner.required).toBeUndefined()
  667. })
  668. test("keeps properties and required on object types", () => {
  669. const schema = {
  670. type: "object",
  671. properties: {
  672. data: {
  673. type: "object",
  674. properties: { name: { type: "string" } },
  675. required: ["name"],
  676. },
  677. },
  678. } as any
  679. const result = ProviderTransform.schema(geminiModel, schema) as any
  680. expect(result.properties.data.type).toBe("object")
  681. expect(result.properties.data.properties).toBeDefined()
  682. expect(result.properties.data.required).toEqual(["name"])
  683. })
  684. test("does not affect non-gemini providers", () => {
  685. const openaiModel = {
  686. providerID: "openai",
  687. api: {
  688. id: "gpt-4",
  689. },
  690. } as any
  691. const schema = {
  692. type: "object",
  693. properties: {
  694. data: {
  695. type: "string",
  696. properties: { invalid: { type: "string" } },
  697. },
  698. },
  699. } as any
  700. const result = ProviderTransform.schema(openaiModel, schema) as any
  701. expect(result.properties.data.properties).toBeDefined()
  702. })
  703. })
  704. describe("ProviderTransform.message - DeepSeek reasoning content", () => {
  705. test("DeepSeek with tool calls includes reasoning_content in providerOptions", () => {
  706. const msgs = [
  707. {
  708. role: "assistant",
  709. content: [
  710. { type: "reasoning", text: "Let me think about this..." },
  711. {
  712. type: "tool-call",
  713. toolCallId: "test",
  714. toolName: "bash",
  715. input: { command: "echo hello" },
  716. },
  717. ],
  718. },
  719. ] as any[]
  720. const result = ProviderTransform.message(
  721. msgs,
  722. {
  723. id: ModelID.make("deepseek/deepseek-chat"),
  724. providerID: ProviderID.make("deepseek"),
  725. api: {
  726. id: "deepseek-chat",
  727. url: "https://api.deepseek.com",
  728. npm: "@ai-sdk/openai-compatible",
  729. },
  730. name: "DeepSeek Chat",
  731. capabilities: {
  732. temperature: true,
  733. reasoning: true,
  734. attachment: false,
  735. toolcall: true,
  736. input: { text: true, audio: false, image: false, video: false, pdf: false },
  737. output: { text: true, audio: false, image: false, video: false, pdf: false },
  738. interleaved: {
  739. field: "reasoning_content",
  740. },
  741. },
  742. cost: {
  743. input: 0.001,
  744. output: 0.002,
  745. cache: { read: 0.0001, write: 0.0002 },
  746. },
  747. limit: {
  748. context: 128000,
  749. output: 8192,
  750. },
  751. status: "active",
  752. options: {},
  753. headers: {},
  754. release_date: "2023-04-01",
  755. },
  756. {},
  757. )
  758. expect(result).toHaveLength(1)
  759. expect(result[0].content).toEqual([
  760. {
  761. type: "tool-call",
  762. toolCallId: "test",
  763. toolName: "bash",
  764. input: { command: "echo hello" },
  765. },
  766. ])
  767. expect(result[0].providerOptions?.openaiCompatible?.reasoning_content).toBe("Let me think about this...")
  768. })
  769. test("Non-DeepSeek providers leave reasoning content unchanged", () => {
  770. const msgs = [
  771. {
  772. role: "assistant",
  773. content: [
  774. { type: "reasoning", text: "Should not be processed" },
  775. { type: "text", text: "Answer" },
  776. ],
  777. },
  778. ] as any[]
  779. const result = ProviderTransform.message(
  780. msgs,
  781. {
  782. id: ModelID.make("openai/gpt-4"),
  783. providerID: ProviderID.make("openai"),
  784. api: {
  785. id: "gpt-4",
  786. url: "https://api.openai.com",
  787. npm: "@ai-sdk/openai",
  788. },
  789. name: "GPT-4",
  790. capabilities: {
  791. temperature: true,
  792. reasoning: false,
  793. attachment: true,
  794. toolcall: true,
  795. input: { text: true, audio: false, image: true, video: false, pdf: false },
  796. output: { text: true, audio: false, image: false, video: false, pdf: false },
  797. interleaved: false,
  798. },
  799. cost: {
  800. input: 0.03,
  801. output: 0.06,
  802. cache: { read: 0.001, write: 0.002 },
  803. },
  804. limit: {
  805. context: 128000,
  806. output: 4096,
  807. },
  808. status: "active",
  809. options: {},
  810. headers: {},
  811. release_date: "2023-04-01",
  812. },
  813. {},
  814. )
  815. expect(result[0].content).toEqual([
  816. { type: "reasoning", text: "Should not be processed" },
  817. { type: "text", text: "Answer" },
  818. ])
  819. expect(result[0].providerOptions?.openaiCompatible?.reasoning_content).toBeUndefined()
  820. })
  821. })
  822. describe("ProviderTransform.message - empty image handling", () => {
  823. const mockModel = {
  824. id: "anthropic/claude-3-5-sonnet",
  825. providerID: "anthropic",
  826. api: {
  827. id: "claude-3-5-sonnet-20241022",
  828. url: "https://api.anthropic.com",
  829. npm: "@ai-sdk/anthropic",
  830. },
  831. name: "Claude 3.5 Sonnet",
  832. capabilities: {
  833. temperature: true,
  834. reasoning: false,
  835. attachment: true,
  836. toolcall: true,
  837. input: { text: true, audio: false, image: true, video: false, pdf: true },
  838. output: { text: true, audio: false, image: false, video: false, pdf: false },
  839. interleaved: false,
  840. },
  841. cost: {
  842. input: 0.003,
  843. output: 0.015,
  844. cache: { read: 0.0003, write: 0.00375 },
  845. },
  846. limit: {
  847. context: 200000,
  848. output: 8192,
  849. },
  850. status: "active",
  851. options: {},
  852. headers: {},
  853. } as any
  854. test("should replace empty base64 image with error text", () => {
  855. const msgs = [
  856. {
  857. role: "user",
  858. content: [
  859. { type: "text", text: "What is in this image?" },
  860. { type: "image", image: "data:image/png;base64," },
  861. ],
  862. },
  863. ] as any[]
  864. const result = ProviderTransform.message(msgs, mockModel, {})
  865. expect(result).toHaveLength(1)
  866. expect(result[0].content).toHaveLength(2)
  867. expect(result[0].content[0]).toEqual({ type: "text", text: "What is in this image?" })
  868. expect(result[0].content[1]).toEqual({
  869. type: "text",
  870. text: "ERROR: Image file is empty or corrupted. Please provide a valid image.",
  871. })
  872. })
  873. test("should keep valid base64 images unchanged", () => {
  874. const validBase64 =
  875. "iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mNk+M9QDwADhgGAWjR9awAAAABJRU5ErkJggg=="
  876. const msgs = [
  877. {
  878. role: "user",
  879. content: [
  880. { type: "text", text: "What is in this image?" },
  881. { type: "image", image: `data:image/png;base64,${validBase64}` },
  882. ],
  883. },
  884. ] as any[]
  885. const result = ProviderTransform.message(msgs, mockModel, {})
  886. expect(result).toHaveLength(1)
  887. expect(result[0].content).toHaveLength(2)
  888. expect(result[0].content[0]).toEqual({ type: "text", text: "What is in this image?" })
  889. expect(result[0].content[1]).toEqual({ type: "image", image: `data:image/png;base64,${validBase64}` })
  890. })
  891. test("should handle mixed valid and empty images", () => {
  892. const validBase64 =
  893. "iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mNk+M9QDwADhgGAWjR9awAAAABJRU5ErkJggg=="
  894. const msgs = [
  895. {
  896. role: "user",
  897. content: [
  898. { type: "text", text: "Compare these images" },
  899. { type: "image", image: `data:image/png;base64,${validBase64}` },
  900. { type: "image", image: "data:image/jpeg;base64," },
  901. ],
  902. },
  903. ] as any[]
  904. const result = ProviderTransform.message(msgs, mockModel, {})
  905. expect(result).toHaveLength(1)
  906. expect(result[0].content).toHaveLength(3)
  907. expect(result[0].content[0]).toEqual({ type: "text", text: "Compare these images" })
  908. expect(result[0].content[1]).toEqual({ type: "image", image: `data:image/png;base64,${validBase64}` })
  909. expect(result[0].content[2]).toEqual({
  910. type: "text",
  911. text: "ERROR: Image file is empty or corrupted. Please provide a valid image.",
  912. })
  913. })
  914. })
  915. describe("ProviderTransform.message - anthropic empty content filtering", () => {
  916. const anthropicModel = {
  917. id: "anthropic/claude-3-5-sonnet",
  918. providerID: "anthropic",
  919. api: {
  920. id: "claude-3-5-sonnet-20241022",
  921. url: "https://api.anthropic.com",
  922. npm: "@ai-sdk/anthropic",
  923. },
  924. name: "Claude 3.5 Sonnet",
  925. capabilities: {
  926. temperature: true,
  927. reasoning: false,
  928. attachment: true,
  929. toolcall: true,
  930. input: { text: true, audio: false, image: true, video: false, pdf: true },
  931. output: { text: true, audio: false, image: false, video: false, pdf: false },
  932. interleaved: false,
  933. },
  934. cost: {
  935. input: 0.003,
  936. output: 0.015,
  937. cache: { read: 0.0003, write: 0.00375 },
  938. },
  939. limit: {
  940. context: 200000,
  941. output: 8192,
  942. },
  943. status: "active",
  944. options: {},
  945. headers: {},
  946. } as any
  947. test("filters out messages with empty string content", () => {
  948. const msgs = [
  949. { role: "user", content: "Hello" },
  950. { role: "assistant", content: "" },
  951. { role: "user", content: "World" },
  952. ] as any[]
  953. const result = ProviderTransform.message(msgs, anthropicModel, {})
  954. expect(result).toHaveLength(2)
  955. expect(result[0].content).toBe("Hello")
  956. expect(result[1].content).toBe("World")
  957. })
  958. test("filters out empty text parts from array content", () => {
  959. const msgs = [
  960. {
  961. role: "assistant",
  962. content: [
  963. { type: "text", text: "" },
  964. { type: "text", text: "Hello" },
  965. { type: "text", text: "" },
  966. ],
  967. },
  968. ] as any[]
  969. const result = ProviderTransform.message(msgs, anthropicModel, {})
  970. expect(result).toHaveLength(1)
  971. expect(result[0].content).toHaveLength(1)
  972. expect(result[0].content[0]).toEqual({ type: "text", text: "Hello" })
  973. })
  974. test("filters out empty reasoning parts from array content", () => {
  975. const msgs = [
  976. {
  977. role: "assistant",
  978. content: [
  979. { type: "reasoning", text: "" },
  980. { type: "text", text: "Answer" },
  981. { type: "reasoning", text: "" },
  982. ],
  983. },
  984. ] as any[]
  985. const result = ProviderTransform.message(msgs, anthropicModel, {})
  986. expect(result).toHaveLength(1)
  987. expect(result[0].content).toHaveLength(1)
  988. expect(result[0].content[0]).toEqual({ type: "text", text: "Answer" })
  989. })
  990. test("removes entire message when all parts are empty", () => {
  991. const msgs = [
  992. { role: "user", content: "Hello" },
  993. {
  994. role: "assistant",
  995. content: [
  996. { type: "text", text: "" },
  997. { type: "reasoning", text: "" },
  998. ],
  999. },
  1000. { role: "user", content: "World" },
  1001. ] as any[]
  1002. const result = ProviderTransform.message(msgs, anthropicModel, {})
  1003. expect(result).toHaveLength(2)
  1004. expect(result[0].content).toBe("Hello")
  1005. expect(result[1].content).toBe("World")
  1006. })
  1007. test("keeps non-text/reasoning parts even if text parts are empty", () => {
  1008. const msgs = [
  1009. {
  1010. role: "assistant",
  1011. content: [
  1012. { type: "text", text: "" },
  1013. { type: "tool-call", toolCallId: "123", toolName: "bash", input: { command: "ls" } },
  1014. ],
  1015. },
  1016. ] as any[]
  1017. const result = ProviderTransform.message(msgs, anthropicModel, {})
  1018. expect(result).toHaveLength(1)
  1019. expect(result[0].content).toHaveLength(1)
  1020. expect(result[0].content[0]).toEqual({
  1021. type: "tool-call",
  1022. toolCallId: "123",
  1023. toolName: "bash",
  1024. input: { command: "ls" },
  1025. })
  1026. })
  1027. test("keeps messages with valid text alongside empty parts", () => {
  1028. const msgs = [
  1029. {
  1030. role: "assistant",
  1031. content: [
  1032. { type: "reasoning", text: "Thinking..." },
  1033. { type: "text", text: "" },
  1034. { type: "text", text: "Result" },
  1035. ],
  1036. },
  1037. ] as any[]
  1038. const result = ProviderTransform.message(msgs, anthropicModel, {})
  1039. expect(result).toHaveLength(1)
  1040. expect(result[0].content).toHaveLength(2)
  1041. expect(result[0].content[0]).toEqual({ type: "reasoning", text: "Thinking..." })
  1042. expect(result[0].content[1]).toEqual({ type: "text", text: "Result" })
  1043. })
  1044. test("filters empty content for bedrock provider", () => {
  1045. const bedrockModel = {
  1046. ...anthropicModel,
  1047. id: "amazon-bedrock/anthropic.claude-opus-4-6",
  1048. providerID: "amazon-bedrock",
  1049. api: {
  1050. id: "anthropic.claude-opus-4-6",
  1051. url: "https://bedrock-runtime.us-east-1.amazonaws.com",
  1052. npm: "@ai-sdk/amazon-bedrock",
  1053. },
  1054. }
  1055. const msgs = [
  1056. { role: "user", content: "Hello" },
  1057. { role: "assistant", content: "" },
  1058. {
  1059. role: "assistant",
  1060. content: [
  1061. { type: "text", text: "" },
  1062. { type: "text", text: "Answer" },
  1063. ],
  1064. },
  1065. ] as any[]
  1066. const result = ProviderTransform.message(msgs, bedrockModel, {})
  1067. expect(result).toHaveLength(2)
  1068. expect(result[0].content).toBe("Hello")
  1069. expect(result[1].content).toHaveLength(1)
  1070. expect(result[1].content[0]).toEqual({ type: "text", text: "Answer" })
  1071. })
  1072. test("does not filter for non-anthropic providers", () => {
  1073. const openaiModel = {
  1074. ...anthropicModel,
  1075. providerID: "openai",
  1076. api: {
  1077. id: "gpt-4",
  1078. url: "https://api.openai.com",
  1079. npm: "@ai-sdk/openai",
  1080. },
  1081. }
  1082. const msgs = [
  1083. { role: "assistant", content: "" },
  1084. {
  1085. role: "assistant",
  1086. content: [{ type: "text", text: "" }],
  1087. },
  1088. ] as any[]
  1089. const result = ProviderTransform.message(msgs, openaiModel, {})
  1090. expect(result).toHaveLength(2)
  1091. expect(result[0].content).toBe("")
  1092. expect(result[1].content).toHaveLength(1)
  1093. })
  1094. })
  1095. describe("ProviderTransform.message - strip openai metadata when store=false", () => {
  1096. const openaiModel = {
  1097. id: "openai/gpt-5",
  1098. providerID: "openai",
  1099. api: {
  1100. id: "gpt-5",
  1101. url: "https://api.openai.com",
  1102. npm: "@ai-sdk/openai",
  1103. },
  1104. name: "GPT-5",
  1105. capabilities: {
  1106. temperature: true,
  1107. reasoning: true,
  1108. attachment: true,
  1109. toolcall: true,
  1110. input: { text: true, audio: false, image: true, video: false, pdf: false },
  1111. output: { text: true, audio: false, image: false, video: false, pdf: false },
  1112. interleaved: false,
  1113. },
  1114. cost: { input: 0.03, output: 0.06, cache: { read: 0.001, write: 0.002 } },
  1115. limit: { context: 128000, output: 4096 },
  1116. status: "active",
  1117. options: {},
  1118. headers: {},
  1119. } as any
  1120. test("preserves itemId and reasoningEncryptedContent when store=false", () => {
  1121. const msgs = [
  1122. {
  1123. role: "assistant",
  1124. content: [
  1125. {
  1126. type: "reasoning",
  1127. text: "thinking...",
  1128. providerOptions: {
  1129. openai: {
  1130. itemId: "rs_123",
  1131. reasoningEncryptedContent: "encrypted",
  1132. },
  1133. },
  1134. },
  1135. {
  1136. type: "text",
  1137. text: "Hello",
  1138. providerOptions: {
  1139. openai: {
  1140. itemId: "msg_456",
  1141. },
  1142. },
  1143. },
  1144. ],
  1145. },
  1146. ] as any[]
  1147. const result = ProviderTransform.message(msgs, openaiModel, { store: false }) as any[]
  1148. expect(result).toHaveLength(1)
  1149. expect(result[0].content[0].providerOptions?.openai?.itemId).toBe("rs_123")
  1150. expect(result[0].content[1].providerOptions?.openai?.itemId).toBe("msg_456")
  1151. })
  1152. test("preserves itemId and reasoningEncryptedContent when store=false even when not openai", () => {
  1153. const zenModel = {
  1154. ...openaiModel,
  1155. providerID: "zen",
  1156. }
  1157. const msgs = [
  1158. {
  1159. role: "assistant",
  1160. content: [
  1161. {
  1162. type: "reasoning",
  1163. text: "thinking...",
  1164. providerOptions: {
  1165. openai: {
  1166. itemId: "rs_123",
  1167. reasoningEncryptedContent: "encrypted",
  1168. },
  1169. },
  1170. },
  1171. {
  1172. type: "text",
  1173. text: "Hello",
  1174. providerOptions: {
  1175. openai: {
  1176. itemId: "msg_456",
  1177. },
  1178. },
  1179. },
  1180. ],
  1181. },
  1182. ] as any[]
  1183. const result = ProviderTransform.message(msgs, zenModel, { store: false }) as any[]
  1184. expect(result).toHaveLength(1)
  1185. expect(result[0].content[0].providerOptions?.openai?.itemId).toBe("rs_123")
  1186. expect(result[0].content[1].providerOptions?.openai?.itemId).toBe("msg_456")
  1187. })
  1188. test("preserves other openai options including itemId", () => {
  1189. const msgs = [
  1190. {
  1191. role: "assistant",
  1192. content: [
  1193. {
  1194. type: "text",
  1195. text: "Hello",
  1196. providerOptions: {
  1197. openai: {
  1198. itemId: "msg_123",
  1199. otherOption: "value",
  1200. },
  1201. },
  1202. },
  1203. ],
  1204. },
  1205. ] as any[]
  1206. const result = ProviderTransform.message(msgs, openaiModel, { store: false }) as any[]
  1207. expect(result[0].content[0].providerOptions?.openai?.itemId).toBe("msg_123")
  1208. expect(result[0].content[0].providerOptions?.openai?.otherOption).toBe("value")
  1209. })
  1210. test("preserves metadata for openai package when store is true", () => {
  1211. const msgs = [
  1212. {
  1213. role: "assistant",
  1214. content: [
  1215. {
  1216. type: "text",
  1217. text: "Hello",
  1218. providerOptions: {
  1219. openai: {
  1220. itemId: "msg_123",
  1221. },
  1222. },
  1223. },
  1224. ],
  1225. },
  1226. ] as any[]
  1227. // openai package preserves itemId regardless of store value
  1228. const result = ProviderTransform.message(msgs, openaiModel, { store: true }) as any[]
  1229. expect(result[0].content[0].providerOptions?.openai?.itemId).toBe("msg_123")
  1230. })
  1231. test("preserves metadata for non-openai packages when store is false", () => {
  1232. const anthropicModel = {
  1233. ...openaiModel,
  1234. providerID: "anthropic",
  1235. api: {
  1236. id: "claude-3",
  1237. url: "https://api.anthropic.com",
  1238. npm: "@ai-sdk/anthropic",
  1239. },
  1240. }
  1241. const msgs = [
  1242. {
  1243. role: "assistant",
  1244. content: [
  1245. {
  1246. type: "text",
  1247. text: "Hello",
  1248. providerOptions: {
  1249. openai: {
  1250. itemId: "msg_123",
  1251. },
  1252. },
  1253. },
  1254. ],
  1255. },
  1256. ] as any[]
  1257. // store=false preserves metadata for non-openai packages
  1258. const result = ProviderTransform.message(msgs, anthropicModel, { store: false }) as any[]
  1259. expect(result[0].content[0].providerOptions?.openai?.itemId).toBe("msg_123")
  1260. })
  1261. test("preserves metadata using providerID key when store is false", () => {
  1262. const opencodeModel = {
  1263. ...openaiModel,
  1264. providerID: "opencode",
  1265. api: {
  1266. id: "opencode-test",
  1267. url: "https://api.opencode.ai",
  1268. npm: "@ai-sdk/openai-compatible",
  1269. },
  1270. }
  1271. const msgs = [
  1272. {
  1273. role: "assistant",
  1274. content: [
  1275. {
  1276. type: "text",
  1277. text: "Hello",
  1278. providerOptions: {
  1279. opencode: {
  1280. itemId: "msg_123",
  1281. otherOption: "value",
  1282. },
  1283. },
  1284. },
  1285. ],
  1286. },
  1287. ] as any[]
  1288. const result = ProviderTransform.message(msgs, opencodeModel, { store: false }) as any[]
  1289. expect(result[0].content[0].providerOptions?.opencode?.itemId).toBe("msg_123")
  1290. expect(result[0].content[0].providerOptions?.opencode?.otherOption).toBe("value")
  1291. })
  1292. test("preserves itemId across all providerOptions keys", () => {
  1293. const opencodeModel = {
  1294. ...openaiModel,
  1295. providerID: "opencode",
  1296. api: {
  1297. id: "opencode-test",
  1298. url: "https://api.opencode.ai",
  1299. npm: "@ai-sdk/openai-compatible",
  1300. },
  1301. }
  1302. const msgs = [
  1303. {
  1304. role: "assistant",
  1305. providerOptions: {
  1306. openai: { itemId: "msg_root" },
  1307. opencode: { itemId: "msg_opencode" },
  1308. extra: { itemId: "msg_extra" },
  1309. },
  1310. content: [
  1311. {
  1312. type: "text",
  1313. text: "Hello",
  1314. providerOptions: {
  1315. openai: { itemId: "msg_openai_part" },
  1316. opencode: { itemId: "msg_opencode_part" },
  1317. extra: { itemId: "msg_extra_part" },
  1318. },
  1319. },
  1320. ],
  1321. },
  1322. ] as any[]
  1323. const result = ProviderTransform.message(msgs, opencodeModel, { store: false }) as any[]
  1324. expect(result[0].providerOptions?.openai?.itemId).toBe("msg_root")
  1325. expect(result[0].providerOptions?.opencode?.itemId).toBe("msg_opencode")
  1326. expect(result[0].providerOptions?.extra?.itemId).toBe("msg_extra")
  1327. expect(result[0].content[0].providerOptions?.openai?.itemId).toBe("msg_openai_part")
  1328. expect(result[0].content[0].providerOptions?.opencode?.itemId).toBe("msg_opencode_part")
  1329. expect(result[0].content[0].providerOptions?.extra?.itemId).toBe("msg_extra_part")
  1330. })
  1331. test("does not strip metadata for non-openai packages when store is not false", () => {
  1332. const anthropicModel = {
  1333. ...openaiModel,
  1334. providerID: "anthropic",
  1335. api: {
  1336. id: "claude-3",
  1337. url: "https://api.anthropic.com",
  1338. npm: "@ai-sdk/anthropic",
  1339. },
  1340. }
  1341. const msgs = [
  1342. {
  1343. role: "assistant",
  1344. content: [
  1345. {
  1346. type: "text",
  1347. text: "Hello",
  1348. providerOptions: {
  1349. openai: {
  1350. itemId: "msg_123",
  1351. },
  1352. },
  1353. },
  1354. ],
  1355. },
  1356. ] as any[]
  1357. const result = ProviderTransform.message(msgs, anthropicModel, {}) as any[]
  1358. expect(result[0].content[0].providerOptions?.openai?.itemId).toBe("msg_123")
  1359. })
  1360. })
  1361. describe("ProviderTransform.message - providerOptions key remapping", () => {
  1362. const createModel = (providerID: string, npm: string) =>
  1363. ({
  1364. id: `${providerID}/test-model`,
  1365. providerID,
  1366. api: {
  1367. id: "test-model",
  1368. url: "https://api.test.com",
  1369. npm,
  1370. },
  1371. name: "Test Model",
  1372. capabilities: {
  1373. temperature: true,
  1374. reasoning: false,
  1375. attachment: true,
  1376. toolcall: true,
  1377. input: { text: true, audio: false, image: true, video: false, pdf: true },
  1378. output: { text: true, audio: false, image: false, video: false, pdf: false },
  1379. interleaved: false,
  1380. },
  1381. cost: { input: 0.001, output: 0.002, cache: { read: 0.0001, write: 0.0002 } },
  1382. limit: { context: 128000, output: 8192 },
  1383. status: "active",
  1384. options: {},
  1385. headers: {},
  1386. }) as any
  1387. test("azure keeps 'azure' key and does not remap to 'openai'", () => {
  1388. const model = createModel("azure", "@ai-sdk/azure")
  1389. const msgs = [
  1390. {
  1391. role: "user",
  1392. content: "Hello",
  1393. providerOptions: {
  1394. azure: { someOption: "value" },
  1395. },
  1396. },
  1397. ] as any[]
  1398. const result = ProviderTransform.message(msgs, model, {})
  1399. expect(result[0].providerOptions?.azure).toEqual({ someOption: "value" })
  1400. expect(result[0].providerOptions?.openai).toBeUndefined()
  1401. })
  1402. test("azure cognitive services remaps providerID to 'azure' key", () => {
  1403. const model = createModel("azure-cognitive-services", "@ai-sdk/azure")
  1404. const msgs = [
  1405. {
  1406. role: "user",
  1407. content: [
  1408. {
  1409. type: "text",
  1410. text: "Hello",
  1411. providerOptions: {
  1412. "azure-cognitive-services": { part: true },
  1413. },
  1414. },
  1415. ],
  1416. providerOptions: {
  1417. "azure-cognitive-services": { someOption: "value" },
  1418. },
  1419. },
  1420. ] as any[]
  1421. const result = ProviderTransform.message(msgs, model, {}) as any[]
  1422. const part = result[0].content[0] as any
  1423. expect(result[0].providerOptions?.azure).toEqual({ someOption: "value" })
  1424. expect(result[0].providerOptions?.["azure-cognitive-services"]).toBeUndefined()
  1425. expect(part.providerOptions?.azure).toEqual({ part: true })
  1426. expect(part.providerOptions?.["azure-cognitive-services"]).toBeUndefined()
  1427. })
  1428. test("copilot remaps providerID to 'copilot' key", () => {
  1429. const model = createModel("github-copilot", "@ai-sdk/github-copilot")
  1430. const msgs = [
  1431. {
  1432. role: "user",
  1433. content: "Hello",
  1434. providerOptions: {
  1435. copilot: { someOption: "value" },
  1436. },
  1437. },
  1438. ] as any[]
  1439. const result = ProviderTransform.message(msgs, model, {})
  1440. expect(result[0].providerOptions?.copilot).toEqual({ someOption: "value" })
  1441. expect(result[0].providerOptions?.["github-copilot"]).toBeUndefined()
  1442. })
  1443. test("bedrock remaps providerID to 'bedrock' key", () => {
  1444. const model = createModel("my-bedrock", "@ai-sdk/amazon-bedrock")
  1445. const msgs = [
  1446. {
  1447. role: "user",
  1448. content: "Hello",
  1449. providerOptions: {
  1450. "my-bedrock": { someOption: "value" },
  1451. },
  1452. },
  1453. ] as any[]
  1454. const result = ProviderTransform.message(msgs, model, {})
  1455. expect(result[0].providerOptions?.bedrock).toEqual({ someOption: "value" })
  1456. expect(result[0].providerOptions?.["my-bedrock"]).toBeUndefined()
  1457. })
  1458. })
  1459. describe("ProviderTransform.message - claude w/bedrock custom inference profile", () => {
  1460. test("adds cachePoint", () => {
  1461. const model = {
  1462. id: "amazon-bedrock/custom-claude-sonnet-4.5",
  1463. providerID: "amazon-bedrock",
  1464. api: {
  1465. id: "arn:aws:bedrock:xxx:yyy:application-inference-profile/zzz",
  1466. url: "https://api.test.com",
  1467. npm: "@ai-sdk/amazon-bedrock",
  1468. },
  1469. name: "Custom inference profile",
  1470. capabilities: {},
  1471. options: {},
  1472. headers: {},
  1473. } as any
  1474. const msgs = [
  1475. {
  1476. role: "user",
  1477. content: "Hello",
  1478. },
  1479. ] as any[]
  1480. const result = ProviderTransform.message(msgs, model, {})
  1481. expect(result[0].providerOptions?.bedrock).toEqual(
  1482. expect.objectContaining({
  1483. cachePoint: {
  1484. type: "default",
  1485. },
  1486. }),
  1487. )
  1488. })
  1489. })
  1490. describe("ProviderTransform.message - bedrock caching with non-bedrock providerID", () => {
  1491. test("applies cache options at message level when npm package is amazon-bedrock", () => {
  1492. const model = {
  1493. id: "aws/us.anthropic.claude-opus-4-6-v1",
  1494. providerID: "aws",
  1495. api: {
  1496. id: "us.anthropic.claude-opus-4-6-v1",
  1497. url: "https://bedrock-runtime.us-east-1.amazonaws.com",
  1498. npm: "@ai-sdk/amazon-bedrock",
  1499. },
  1500. name: "Claude Opus 4.6",
  1501. capabilities: {},
  1502. options: {},
  1503. headers: {},
  1504. } as any
  1505. const msgs = [
  1506. {
  1507. role: "system",
  1508. content: [{ type: "text", text: "You are a helpful assistant" }],
  1509. },
  1510. {
  1511. role: "user",
  1512. content: [{ type: "text", text: "Hello" }],
  1513. },
  1514. ] as any[]
  1515. const result = ProviderTransform.message(msgs, model, {}) as any[]
  1516. // Cache should be at the message level and not the content-part level
  1517. expect(result[0].providerOptions?.bedrock).toEqual({
  1518. cachePoint: { type: "default" },
  1519. })
  1520. expect(result[0].content[0].providerOptions?.bedrock).toBeUndefined()
  1521. })
  1522. })
  1523. describe("ProviderTransform.message - cache control on gateway", () => {
  1524. const createModel = (overrides: Partial<any> = {}) =>
  1525. ({
  1526. id: "anthropic/claude-sonnet-4",
  1527. providerID: "vercel",
  1528. api: {
  1529. id: "anthropic/claude-sonnet-4",
  1530. url: "https://ai-gateway.vercel.sh/v3/ai",
  1531. npm: "@ai-sdk/gateway",
  1532. },
  1533. name: "Claude Sonnet 4",
  1534. capabilities: {
  1535. temperature: true,
  1536. reasoning: true,
  1537. attachment: true,
  1538. toolcall: true,
  1539. input: { text: true, audio: false, image: true, video: false, pdf: true },
  1540. output: { text: true, audio: false, image: false, video: false, pdf: false },
  1541. interleaved: false,
  1542. },
  1543. cost: { input: 0.001, output: 0.002, cache: { read: 0.0001, write: 0.0002 } },
  1544. limit: { context: 200_000, output: 8192 },
  1545. status: "active",
  1546. options: {},
  1547. headers: {},
  1548. ...overrides,
  1549. }) as any
  1550. test("gateway does not set cache control for anthropic models", () => {
  1551. const model = createModel()
  1552. const msgs = [
  1553. {
  1554. role: "system",
  1555. content: [{ type: "text", text: "You are a helpful assistant" }],
  1556. },
  1557. {
  1558. role: "user",
  1559. content: "Hello",
  1560. },
  1561. ] as any[]
  1562. const result = ProviderTransform.message(msgs, model, {}) as any[]
  1563. expect(result[0].content[0].providerOptions).toBeUndefined()
  1564. expect(result[0].providerOptions).toBeUndefined()
  1565. })
  1566. test("non-gateway anthropic keeps existing cache control behavior", () => {
  1567. const model = createModel({
  1568. providerID: "anthropic",
  1569. api: {
  1570. id: "claude-sonnet-4",
  1571. url: "https://api.anthropic.com",
  1572. npm: "@ai-sdk/anthropic",
  1573. },
  1574. })
  1575. const msgs = [
  1576. {
  1577. role: "system",
  1578. content: "You are a helpful assistant",
  1579. },
  1580. {
  1581. role: "user",
  1582. content: "Hello",
  1583. },
  1584. ] as any[]
  1585. const result = ProviderTransform.message(msgs, model, {}) as any[]
  1586. expect(result[0].providerOptions).toEqual({
  1587. anthropic: {
  1588. cacheControl: {
  1589. type: "ephemeral",
  1590. },
  1591. },
  1592. openrouter: {
  1593. cacheControl: {
  1594. type: "ephemeral",
  1595. },
  1596. },
  1597. bedrock: {
  1598. cachePoint: {
  1599. type: "default",
  1600. },
  1601. },
  1602. openaiCompatible: {
  1603. cache_control: {
  1604. type: "ephemeral",
  1605. },
  1606. },
  1607. copilot: {
  1608. copilot_cache_control: {
  1609. type: "ephemeral",
  1610. },
  1611. },
  1612. })
  1613. })
  1614. test("google-vertex-anthropic applies cache control", () => {
  1615. const model = createModel({
  1616. providerID: "google-vertex-anthropic",
  1617. api: {
  1618. id: "google-vertex-anthropic",
  1619. url: "https://us-central1-aiplatform.googleapis.com",
  1620. npm: "@ai-sdk/google-vertex/anthropic",
  1621. },
  1622. id: "claude-sonnet-4@20250514",
  1623. })
  1624. const msgs = [
  1625. {
  1626. role: "system",
  1627. content: "You are a helpful assistant",
  1628. },
  1629. {
  1630. role: "user",
  1631. content: "Hello",
  1632. },
  1633. ] as any[]
  1634. const result = ProviderTransform.message(msgs, model, {}) as any[]
  1635. expect(result[0].providerOptions).toEqual({
  1636. anthropic: {
  1637. cacheControl: {
  1638. type: "ephemeral",
  1639. },
  1640. },
  1641. openrouter: {
  1642. cacheControl: {
  1643. type: "ephemeral",
  1644. },
  1645. },
  1646. bedrock: {
  1647. cachePoint: {
  1648. type: "default",
  1649. },
  1650. },
  1651. openaiCompatible: {
  1652. cache_control: {
  1653. type: "ephemeral",
  1654. },
  1655. },
  1656. copilot: {
  1657. copilot_cache_control: {
  1658. type: "ephemeral",
  1659. },
  1660. },
  1661. })
  1662. })
  1663. })
  1664. describe("ProviderTransform.variants", () => {
  1665. const createMockModel = (overrides: Partial<any> = {}): any => ({
  1666. id: "test/test-model",
  1667. providerID: "test",
  1668. api: {
  1669. id: "test-model",
  1670. url: "https://api.test.com",
  1671. npm: "@ai-sdk/openai",
  1672. },
  1673. name: "Test Model",
  1674. capabilities: {
  1675. temperature: true,
  1676. reasoning: true,
  1677. attachment: true,
  1678. toolcall: true,
  1679. input: { text: true, audio: false, image: true, video: false, pdf: false },
  1680. output: { text: true, audio: false, image: false, video: false, pdf: false },
  1681. interleaved: false,
  1682. },
  1683. cost: {
  1684. input: 0.001,
  1685. output: 0.002,
  1686. cache: { read: 0.0001, write: 0.0002 },
  1687. },
  1688. limit: {
  1689. context: 200_000,
  1690. output: 64_000,
  1691. },
  1692. status: "active",
  1693. options: {},
  1694. headers: {},
  1695. release_date: "2024-01-01",
  1696. ...overrides,
  1697. })
  1698. test("returns empty object when model has no reasoning capabilities", () => {
  1699. const model = createMockModel({
  1700. capabilities: { reasoning: false },
  1701. })
  1702. const result = ProviderTransform.variants(model)
  1703. expect(result).toEqual({})
  1704. })
  1705. test("deepseek returns empty object", () => {
  1706. const model = createMockModel({
  1707. id: "deepseek/deepseek-chat",
  1708. providerID: "deepseek",
  1709. api: {
  1710. id: "deepseek-chat",
  1711. url: "https://api.deepseek.com",
  1712. npm: "@ai-sdk/openai-compatible",
  1713. },
  1714. })
  1715. const result = ProviderTransform.variants(model)
  1716. expect(result).toEqual({})
  1717. })
  1718. test("minimax returns empty object", () => {
  1719. const model = createMockModel({
  1720. id: "minimax/minimax-model",
  1721. providerID: "minimax",
  1722. api: {
  1723. id: "minimax-model",
  1724. url: "https://api.minimax.com",
  1725. npm: "@ai-sdk/openai-compatible",
  1726. },
  1727. })
  1728. const result = ProviderTransform.variants(model)
  1729. expect(result).toEqual({})
  1730. })
  1731. test("glm returns empty object", () => {
  1732. const model = createMockModel({
  1733. id: "glm/glm-4",
  1734. providerID: "glm",
  1735. api: {
  1736. id: "glm-4",
  1737. url: "https://api.glm.com",
  1738. npm: "@ai-sdk/openai-compatible",
  1739. },
  1740. })
  1741. const result = ProviderTransform.variants(model)
  1742. expect(Object.keys(result)).toEqual(["low", "medium", "high"])
  1743. expect(result.low).toEqual({ reasoningEffort: "low" })
  1744. })
  1745. test("mistral returns empty object", () => {
  1746. const model = createMockModel({
  1747. id: "mistral/mistral-large",
  1748. providerID: "mistral",
  1749. api: {
  1750. id: "mistral-large-latest",
  1751. url: "https://api.mistral.com",
  1752. npm: "@ai-sdk/mistral",
  1753. },
  1754. })
  1755. const result = ProviderTransform.variants(model)
  1756. expect(result).toEqual({})
  1757. })
  1758. describe("@openrouter/ai-sdk-provider", () => {
  1759. test("returns empty object for non-qualifying models", () => {
  1760. const model = createMockModel({
  1761. id: "openrouter/test-model",
  1762. providerID: "openrouter",
  1763. api: {
  1764. id: "test-model",
  1765. url: "https://openrouter.ai",
  1766. npm: "@openrouter/ai-sdk-provider",
  1767. },
  1768. })
  1769. const result = ProviderTransform.variants(model)
  1770. expect(result).toEqual({})
  1771. })
  1772. test("gpt models return OPENAI_EFFORTS with reasoning", () => {
  1773. const model = createMockModel({
  1774. id: "openrouter/gpt-4",
  1775. providerID: "openrouter",
  1776. api: {
  1777. id: "gpt-4",
  1778. url: "https://openrouter.ai",
  1779. npm: "@openrouter/ai-sdk-provider",
  1780. },
  1781. })
  1782. const result = ProviderTransform.variants(model)
  1783. expect(Object.keys(result)).toEqual(["none", "minimal", "low", "medium", "high", "xhigh"])
  1784. expect(result.low).toEqual({ reasoning: { effort: "low" } })
  1785. expect(result.high).toEqual({ reasoning: { effort: "high" } })
  1786. })
  1787. test("gemini-3 returns OPENAI_EFFORTS with reasoning", () => {
  1788. const model = createMockModel({
  1789. id: "openrouter/gemini-3-5-pro",
  1790. providerID: "openrouter",
  1791. api: {
  1792. id: "gemini-3-5-pro",
  1793. url: "https://openrouter.ai",
  1794. npm: "@openrouter/ai-sdk-provider",
  1795. },
  1796. })
  1797. const result = ProviderTransform.variants(model)
  1798. expect(Object.keys(result)).toEqual(["none", "minimal", "low", "medium", "high", "xhigh"])
  1799. })
  1800. // kilocode_change start
  1801. test("mercury-2 returns OPENAI_EFFORTS with reasoning", () => {
  1802. const model = createMockModel({
  1803. id: "openrouter/inception/mercury-2",
  1804. providerID: "openrouter",
  1805. api: {
  1806. id: "inception/mercury-2",
  1807. url: "https://openrouter.ai",
  1808. npm: "@openrouter/ai-sdk-provider",
  1809. },
  1810. })
  1811. const result = ProviderTransform.variants(model)
  1812. expect(Object.keys(result)).toEqual(["none", "minimal", "low", "medium", "high", "xhigh"])
  1813. expect(result.low).toEqual({ reasoning: { effort: "low" } })
  1814. expect(result.high).toEqual({ reasoning: { effort: "high" } })
  1815. })
  1816. // kilocode_change end
  1817. test("grok-4 returns empty object", () => {
  1818. const model = createMockModel({
  1819. id: "openrouter/grok-4",
  1820. providerID: "openrouter",
  1821. api: {
  1822. id: "grok-4",
  1823. url: "https://openrouter.ai",
  1824. npm: "@openrouter/ai-sdk-provider",
  1825. },
  1826. })
  1827. const result = ProviderTransform.variants(model)
  1828. expect(result).toEqual({})
  1829. })
  1830. test("grok-3-mini returns low and high with reasoning", () => {
  1831. const model = createMockModel({
  1832. id: "openrouter/grok-3-mini",
  1833. providerID: "openrouter",
  1834. api: {
  1835. id: "grok-3-mini",
  1836. url: "https://openrouter.ai",
  1837. npm: "@openrouter/ai-sdk-provider",
  1838. },
  1839. })
  1840. const result = ProviderTransform.variants(model)
  1841. expect(Object.keys(result)).toEqual(["low", "high"])
  1842. expect(result.low).toEqual({ reasoning: { effort: "low" } })
  1843. expect(result.high).toEqual({ reasoning: { effort: "high" } })
  1844. })
  1845. })
  1846. // kilocode_change start
  1847. describe("@kilocode/kilo-gateway", () => {
  1848. test("claude models return empty variants (reasoning disabled)", () => {
  1849. const model = createMockModel({
  1850. id: "kilo/anthropic/claude-sonnet-4",
  1851. providerID: "kilo",
  1852. capabilities: { reasoning: false },
  1853. api: {
  1854. id: "anthropic/claude-sonnet-4",
  1855. url: "https://gateway.kilo.ai",
  1856. npm: "@kilocode/kilo-gateway",
  1857. },
  1858. })
  1859. const result = ProviderTransform.variants(model)
  1860. expect(Object.keys(result)).toEqual([])
  1861. })
  1862. test("anthropic models in api.id return empty variants (reasoning disabled)", () => {
  1863. const model = createMockModel({
  1864. id: "kilo/anthropic/claude-opus-4",
  1865. providerID: "kilo",
  1866. capabilities: { reasoning: false },
  1867. api: {
  1868. id: "anthropic/claude-opus-4",
  1869. url: "https://gateway.kilo.ai",
  1870. npm: "@kilocode/kilo-gateway",
  1871. },
  1872. })
  1873. const result = ProviderTransform.variants(model)
  1874. expect(Object.keys(result)).toEqual([])
  1875. })
  1876. test("gpt models return OPENAI_EFFORTS with reasoning", () => {
  1877. const model = createMockModel({
  1878. id: "kilo/openai/gpt-5",
  1879. providerID: "kilo",
  1880. api: {
  1881. id: "openai/gpt-5",
  1882. url: "https://gateway.kilo.ai",
  1883. npm: "@kilocode/kilo-gateway",
  1884. },
  1885. })
  1886. const result = ProviderTransform.variants(model)
  1887. expect(Object.keys(result)).toEqual(["none", "minimal", "low", "medium", "high", "xhigh"])
  1888. expect(result.low).toEqual({ reasoning: { effort: "low" } })
  1889. })
  1890. test("gemini-3 models return OPENAI_EFFORTS with reasoning and encrypted content", () => {
  1891. const model = createMockModel({
  1892. id: "kilo/google/gemini-3-pro",
  1893. providerID: "kilo",
  1894. api: {
  1895. id: "google/gemini-3-pro",
  1896. url: "https://gateway.kilo.ai",
  1897. npm: "@kilocode/kilo-gateway",
  1898. },
  1899. })
  1900. const result = ProviderTransform.variants(model)
  1901. expect(Object.keys(result)).toEqual(["none", "minimal", "low", "medium", "high", "xhigh"])
  1902. })
  1903. test("non-qualifying models return empty object", () => {
  1904. const model = createMockModel({
  1905. id: "kilo/meta/llama-4",
  1906. providerID: "kilo",
  1907. api: {
  1908. id: "meta/llama-4",
  1909. url: "https://gateway.kilo.ai",
  1910. npm: "@kilocode/kilo-gateway",
  1911. },
  1912. })
  1913. const result = ProviderTransform.variants(model)
  1914. expect(result).toEqual({})
  1915. })
  1916. test("grok-3-mini returns low and high with reasoning", () => {
  1917. const model = createMockModel({
  1918. id: "kilo/x-ai/grok-3-mini",
  1919. providerID: "kilo",
  1920. api: {
  1921. id: "x-ai/grok-3-mini",
  1922. url: "https://gateway.kilo.ai",
  1923. npm: "@kilocode/kilo-gateway",
  1924. },
  1925. })
  1926. const result = ProviderTransform.variants(model)
  1927. expect(Object.keys(result)).toEqual(["low", "high"])
  1928. expect(result.low).toEqual({ reasoning: { effort: "low" } })
  1929. expect(result.high).toEqual({ reasoning: { effort: "high" } })
  1930. })
  1931. test("codex models return OPENAI_EFFORTS with object-based reasoning format", () => {
  1932. const model = createMockModel({
  1933. id: "kilo/openai/gpt-5.2-codex",
  1934. providerID: "kilo",
  1935. api: {
  1936. id: "openai/gpt-5.2-codex",
  1937. url: "https://gateway.kilo.ai",
  1938. npm: "@kilocode/kilo-gateway",
  1939. },
  1940. })
  1941. const result = ProviderTransform.variants(model)
  1942. expect(Object.keys(result)).toEqual(["none", "minimal", "low", "medium", "high", "xhigh"])
  1943. expect(result.low).toEqual({ reasoning: { effort: "low" } })
  1944. expect(result.high).toEqual({ reasoning: { effort: "high" } })
  1945. expect(result.xhigh).toEqual({ reasoning: { effort: "xhigh" } })
  1946. })
  1947. // kilocode_change start
  1948. test("mercury-2 uses server-provided variants from kilo gateway", () => {
  1949. const serverVariants = {
  1950. low: { reasoningEffort: "low" },
  1951. medium: { reasoningEffort: "medium" },
  1952. high: { reasoningEffort: "high" },
  1953. }
  1954. const model = createMockModel({
  1955. id: "kilo/inception/mercury-2",
  1956. providerID: "kilo",
  1957. api: {
  1958. id: "inception/mercury-2",
  1959. url: "https://gateway.kilo.ai",
  1960. npm: "@kilocode/kilo-gateway",
  1961. },
  1962. variants: serverVariants,
  1963. })
  1964. const result = ProviderTransform.variants(model)
  1965. expect(result).toEqual(serverVariants)
  1966. expect(Object.keys(result)).toEqual(["low", "medium", "high"])
  1967. })
  1968. // kilocode_change end
  1969. })
  1970. // kilocode_change end
  1971. describe("@ai-sdk/gateway", () => {
  1972. test("anthropic sonnet 4.6 models return adaptive thinking options", () => {
  1973. const model = createMockModel({
  1974. id: "anthropic/claude-sonnet-4-6",
  1975. providerID: "gateway",
  1976. api: {
  1977. id: "anthropic/claude-sonnet-4-6",
  1978. url: "https://gateway.ai",
  1979. npm: "@ai-sdk/gateway",
  1980. },
  1981. })
  1982. const result = ProviderTransform.variants(model)
  1983. expect(Object.keys(result)).toEqual(["low", "medium", "high", "max"])
  1984. expect(result.medium).toEqual({
  1985. thinking: {
  1986. type: "adaptive",
  1987. },
  1988. effort: "medium",
  1989. })
  1990. })
  1991. test("anthropic sonnet 4.6 dot-format models return adaptive thinking options", () => {
  1992. const model = createMockModel({
  1993. id: "anthropic/claude-sonnet-4-6",
  1994. providerID: "gateway",
  1995. api: {
  1996. id: "anthropic/claude-sonnet-4.6",
  1997. url: "https://gateway.ai",
  1998. npm: "@ai-sdk/gateway",
  1999. },
  2000. })
  2001. const result = ProviderTransform.variants(model)
  2002. expect(Object.keys(result)).toEqual(["low", "medium", "high", "max"])
  2003. expect(result.medium).toEqual({
  2004. thinking: {
  2005. type: "adaptive",
  2006. },
  2007. effort: "medium",
  2008. })
  2009. })
  2010. test("anthropic opus 4.6 dot-format models return adaptive thinking options", () => {
  2011. const model = createMockModel({
  2012. id: "anthropic/claude-opus-4-6",
  2013. providerID: "gateway",
  2014. api: {
  2015. id: "anthropic/claude-opus-4.6",
  2016. url: "https://gateway.ai",
  2017. npm: "@ai-sdk/gateway",
  2018. },
  2019. })
  2020. const result = ProviderTransform.variants(model)
  2021. expect(Object.keys(result)).toEqual(["low", "medium", "high", "max"])
  2022. expect(result.high).toEqual({
  2023. thinking: {
  2024. type: "adaptive",
  2025. },
  2026. effort: "high",
  2027. })
  2028. })
  2029. test("anthropic models return anthropic thinking options", () => {
  2030. const model = createMockModel({
  2031. id: "anthropic/claude-sonnet-4",
  2032. providerID: "gateway",
  2033. api: {
  2034. id: "anthropic/claude-sonnet-4",
  2035. url: "https://gateway.ai",
  2036. npm: "@ai-sdk/gateway",
  2037. },
  2038. })
  2039. const result = ProviderTransform.variants(model)
  2040. expect(Object.keys(result)).toEqual(["high", "max"])
  2041. expect(result.high).toEqual({
  2042. thinking: {
  2043. type: "enabled",
  2044. budgetTokens: 16000,
  2045. },
  2046. })
  2047. expect(result.max).toEqual({
  2048. thinking: {
  2049. type: "enabled",
  2050. budgetTokens: 31999,
  2051. },
  2052. })
  2053. })
  2054. test("returns OPENAI_EFFORTS with reasoningEffort", () => {
  2055. const model = createMockModel({
  2056. id: "gateway/gateway-model",
  2057. providerID: "gateway",
  2058. api: {
  2059. id: "gateway-model",
  2060. url: "https://gateway.ai",
  2061. npm: "@ai-sdk/gateway",
  2062. },
  2063. })
  2064. const result = ProviderTransform.variants(model)
  2065. expect(Object.keys(result)).toEqual(["none", "minimal", "low", "medium", "high", "xhigh"])
  2066. expect(result.low).toEqual({ reasoningEffort: "low" })
  2067. expect(result.high).toEqual({ reasoningEffort: "high" })
  2068. })
  2069. })
  2070. describe("@ai-sdk/github-copilot", () => {
  2071. test("standard models return low, medium, high", () => {
  2072. const model = createMockModel({
  2073. id: "gpt-4.5",
  2074. providerID: "github-copilot",
  2075. api: {
  2076. id: "gpt-4.5",
  2077. url: "https://api.githubcopilot.com",
  2078. npm: "@ai-sdk/github-copilot",
  2079. },
  2080. })
  2081. const result = ProviderTransform.variants(model)
  2082. expect(Object.keys(result)).toEqual(["low", "medium", "high"])
  2083. expect(result.low).toEqual({
  2084. reasoningEffort: "low",
  2085. reasoningSummary: "auto",
  2086. include: ["reasoning.encrypted_content"],
  2087. })
  2088. })
  2089. test("gpt-5.1-codex-max includes xhigh", () => {
  2090. const model = createMockModel({
  2091. id: "gpt-5.1-codex-max",
  2092. providerID: "github-copilot",
  2093. api: {
  2094. id: "gpt-5.1-codex-max",
  2095. url: "https://api.githubcopilot.com",
  2096. npm: "@ai-sdk/github-copilot",
  2097. },
  2098. })
  2099. const result = ProviderTransform.variants(model)
  2100. expect(Object.keys(result)).toEqual(["low", "medium", "high", "xhigh"])
  2101. })
  2102. test("gpt-5.1-codex-mini does not include xhigh", () => {
  2103. const model = createMockModel({
  2104. id: "gpt-5.1-codex-mini",
  2105. providerID: "github-copilot",
  2106. api: {
  2107. id: "gpt-5.1-codex-mini",
  2108. url: "https://api.githubcopilot.com",
  2109. npm: "@ai-sdk/github-copilot",
  2110. },
  2111. })
  2112. const result = ProviderTransform.variants(model)
  2113. expect(Object.keys(result)).toEqual(["low", "medium", "high"])
  2114. })
  2115. test("gpt-5.1-codex does not include xhigh", () => {
  2116. const model = createMockModel({
  2117. id: "gpt-5.1-codex",
  2118. providerID: "github-copilot",
  2119. api: {
  2120. id: "gpt-5.1-codex",
  2121. url: "https://api.githubcopilot.com",
  2122. npm: "@ai-sdk/github-copilot",
  2123. },
  2124. })
  2125. const result = ProviderTransform.variants(model)
  2126. expect(Object.keys(result)).toEqual(["low", "medium", "high"])
  2127. })
  2128. test("gpt-5.2 includes xhigh", () => {
  2129. const model = createMockModel({
  2130. id: "gpt-5.2",
  2131. providerID: "github-copilot",
  2132. api: {
  2133. id: "gpt-5.2",
  2134. url: "https://api.githubcopilot.com",
  2135. npm: "@ai-sdk/github-copilot",
  2136. },
  2137. })
  2138. const result = ProviderTransform.variants(model)
  2139. expect(Object.keys(result)).toEqual(["low", "medium", "high", "xhigh"])
  2140. expect(result.xhigh).toEqual({
  2141. reasoningEffort: "xhigh",
  2142. reasoningSummary: "auto",
  2143. include: ["reasoning.encrypted_content"],
  2144. })
  2145. })
  2146. test("gpt-5.2-codex includes xhigh", () => {
  2147. const model = createMockModel({
  2148. id: "gpt-5.2-codex",
  2149. providerID: "github-copilot",
  2150. api: {
  2151. id: "gpt-5.2-codex",
  2152. url: "https://api.githubcopilot.com",
  2153. npm: "@ai-sdk/github-copilot",
  2154. },
  2155. })
  2156. const result = ProviderTransform.variants(model)
  2157. expect(Object.keys(result)).toEqual(["low", "medium", "high", "xhigh"])
  2158. })
  2159. test("gpt-5.3-codex includes xhigh", () => {
  2160. const model = createMockModel({
  2161. id: "gpt-5.3-codex",
  2162. providerID: "github-copilot",
  2163. api: {
  2164. id: "gpt-5.3-codex",
  2165. url: "https://api.githubcopilot.com",
  2166. npm: "@ai-sdk/github-copilot",
  2167. },
  2168. })
  2169. const result = ProviderTransform.variants(model)
  2170. expect(Object.keys(result)).toEqual(["low", "medium", "high", "xhigh"])
  2171. })
  2172. test("gpt-5.4 includes xhigh", () => {
  2173. const model = createMockModel({
  2174. id: "gpt-5.4",
  2175. release_date: "2026-03-05",
  2176. providerID: "github-copilot",
  2177. api: {
  2178. id: "gpt-5.4",
  2179. url: "https://api.githubcopilot.com",
  2180. npm: "@ai-sdk/github-copilot",
  2181. },
  2182. })
  2183. const result = ProviderTransform.variants(model)
  2184. expect(Object.keys(result)).toEqual(["low", "medium", "high", "xhigh"])
  2185. })
  2186. })
  2187. // kilocode_change start
  2188. describe("@ai-sdk/azure", () => {
  2189. test("gpt-5.4 includes xhigh", () => {
  2190. const model = createMockModel({
  2191. id: "gpt-5.4",
  2192. release_date: "2026-03-05",
  2193. providerID: "azure",
  2194. api: {
  2195. id: "gpt-5.4",
  2196. url: "https://resource.openai.azure.com/openai",
  2197. npm: "@ai-sdk/azure",
  2198. },
  2199. })
  2200. const result = ProviderTransform.variants(model)
  2201. expect(Object.keys(result)).toEqual(["low", "medium", "high", "xhigh"])
  2202. expect(result.xhigh).toEqual({
  2203. reasoningEffort: "xhigh",
  2204. reasoningSummary: "auto",
  2205. include: ["reasoning.encrypted_content"],
  2206. })
  2207. })
  2208. })
  2209. // kilocode_change end
  2210. describe("@ai-sdk/cerebras", () => {
  2211. test("returns WIDELY_SUPPORTED_EFFORTS with reasoningEffort", () => {
  2212. const model = createMockModel({
  2213. id: "cerebras/llama-4",
  2214. providerID: "cerebras",
  2215. api: {
  2216. id: "llama-4-sc",
  2217. url: "https://api.cerebras.ai",
  2218. npm: "@ai-sdk/cerebras",
  2219. },
  2220. })
  2221. const result = ProviderTransform.variants(model)
  2222. expect(Object.keys(result)).toEqual(["low", "medium", "high"])
  2223. expect(result.low).toEqual({ reasoningEffort: "low" })
  2224. expect(result.high).toEqual({ reasoningEffort: "high" })
  2225. })
  2226. })
  2227. describe("@ai-sdk/togetherai", () => {
  2228. test("returns WIDELY_SUPPORTED_EFFORTS with reasoningEffort", () => {
  2229. const model = createMockModel({
  2230. id: "togetherai/llama-4",
  2231. providerID: "togetherai",
  2232. api: {
  2233. id: "llama-4-sc",
  2234. url: "https://api.togetherai.com",
  2235. npm: "@ai-sdk/togetherai",
  2236. },
  2237. })
  2238. const result = ProviderTransform.variants(model)
  2239. expect(Object.keys(result)).toEqual(["low", "medium", "high"])
  2240. expect(result.low).toEqual({ reasoningEffort: "low" })
  2241. expect(result.high).toEqual({ reasoningEffort: "high" })
  2242. })
  2243. })
  2244. describe("@ai-sdk/xai", () => {
  2245. test("grok-3 returns empty object", () => {
  2246. const model = createMockModel({
  2247. id: "xai/grok-3",
  2248. providerID: "xai",
  2249. api: {
  2250. id: "grok-3",
  2251. url: "https://api.x.ai",
  2252. npm: "@ai-sdk/xai",
  2253. },
  2254. })
  2255. const result = ProviderTransform.variants(model)
  2256. expect(result).toEqual({})
  2257. })
  2258. test("grok-3-mini returns low and high with reasoningEffort", () => {
  2259. const model = createMockModel({
  2260. id: "xai/grok-3-mini",
  2261. providerID: "xai",
  2262. api: {
  2263. id: "grok-3-mini",
  2264. url: "https://api.x.ai",
  2265. npm: "@ai-sdk/xai",
  2266. },
  2267. })
  2268. const result = ProviderTransform.variants(model)
  2269. expect(Object.keys(result)).toEqual(["low", "high"])
  2270. expect(result.low).toEqual({ reasoningEffort: "low" })
  2271. expect(result.high).toEqual({ reasoningEffort: "high" })
  2272. })
  2273. })
  2274. describe("@ai-sdk/deepinfra", () => {
  2275. test("returns WIDELY_SUPPORTED_EFFORTS with reasoningEffort", () => {
  2276. const model = createMockModel({
  2277. id: "deepinfra/llama-4",
  2278. providerID: "deepinfra",
  2279. api: {
  2280. id: "llama-4-sc",
  2281. url: "https://api.deepinfra.com",
  2282. npm: "@ai-sdk/deepinfra",
  2283. },
  2284. })
  2285. const result = ProviderTransform.variants(model)
  2286. expect(Object.keys(result)).toEqual(["low", "medium", "high"])
  2287. expect(result.low).toEqual({ reasoningEffort: "low" })
  2288. expect(result.high).toEqual({ reasoningEffort: "high" })
  2289. })
  2290. })
  2291. describe("@ai-sdk/openai-compatible", () => {
  2292. test("returns WIDELY_SUPPORTED_EFFORTS with reasoningEffort", () => {
  2293. const model = createMockModel({
  2294. id: "custom-provider/custom-model",
  2295. providerID: "custom-provider",
  2296. api: {
  2297. id: "custom-model",
  2298. url: "https://api.custom.com",
  2299. npm: "@ai-sdk/openai-compatible",
  2300. },
  2301. })
  2302. const result = ProviderTransform.variants(model)
  2303. expect(Object.keys(result)).toEqual(["low", "medium", "high"])
  2304. expect(result.low).toEqual({ reasoningEffort: "low" })
  2305. expect(result.high).toEqual({ reasoningEffort: "high" })
  2306. })
  2307. // kilocode_change start
  2308. test("mercury-2 returns WIDELY_SUPPORTED_EFFORTS with reasoningEffort", () => {
  2309. const model = createMockModel({
  2310. id: "inception/mercury-2",
  2311. providerID: "inception",
  2312. api: {
  2313. id: "mercury-2",
  2314. url: "https://api.inceptionlabs.ai",
  2315. npm: "@ai-sdk/openai-compatible",
  2316. },
  2317. })
  2318. const result = ProviderTransform.variants(model)
  2319. expect(Object.keys(result)).toEqual(["low", "medium", "high"])
  2320. expect(result.low).toEqual({ reasoningEffort: "low" })
  2321. expect(result.high).toEqual({ reasoningEffort: "high" })
  2322. })
  2323. // kilocode_change end
  2324. })
  2325. describe("@ai-sdk/azure", () => {
  2326. test("o1-mini returns empty object", () => {
  2327. const model = createMockModel({
  2328. id: "o1-mini",
  2329. providerID: "azure",
  2330. api: {
  2331. id: "o1-mini",
  2332. url: "https://azure.com",
  2333. npm: "@ai-sdk/azure",
  2334. },
  2335. })
  2336. const result = ProviderTransform.variants(model)
  2337. expect(result).toEqual({})
  2338. })
  2339. test("standard azure models return custom efforts with reasoningSummary", () => {
  2340. const model = createMockModel({
  2341. id: "o1",
  2342. providerID: "azure",
  2343. api: {
  2344. id: "o1",
  2345. url: "https://azure.com",
  2346. npm: "@ai-sdk/azure",
  2347. },
  2348. })
  2349. const result = ProviderTransform.variants(model)
  2350. expect(Object.keys(result)).toEqual(["low", "medium", "high"])
  2351. expect(result.low).toEqual({
  2352. reasoningEffort: "low",
  2353. reasoningSummary: "auto",
  2354. include: ["reasoning.encrypted_content"],
  2355. })
  2356. })
  2357. test("gpt-5 adds minimal effort", () => {
  2358. const model = createMockModel({
  2359. id: "gpt-5",
  2360. providerID: "azure",
  2361. api: {
  2362. id: "gpt-5",
  2363. url: "https://azure.com",
  2364. npm: "@ai-sdk/azure",
  2365. },
  2366. })
  2367. const result = ProviderTransform.variants(model)
  2368. expect(Object.keys(result)).toEqual(["minimal", "low", "medium", "high"])
  2369. })
  2370. })
  2371. describe("@ai-sdk/openai", () => {
  2372. test("gpt-5-pro returns empty object", () => {
  2373. const model = createMockModel({
  2374. id: "gpt-5-pro",
  2375. providerID: "openai",
  2376. api: {
  2377. id: "gpt-5-pro",
  2378. url: "https://api.openai.com",
  2379. npm: "@ai-sdk/openai",
  2380. },
  2381. })
  2382. const result = ProviderTransform.variants(model)
  2383. expect(result).toEqual({})
  2384. })
  2385. test("standard openai models return custom efforts with reasoningSummary", () => {
  2386. const model = createMockModel({
  2387. id: "gpt-5",
  2388. providerID: "openai",
  2389. api: {
  2390. id: "gpt-5",
  2391. url: "https://api.openai.com",
  2392. npm: "@ai-sdk/openai",
  2393. },
  2394. release_date: "2024-06-01",
  2395. })
  2396. const result = ProviderTransform.variants(model)
  2397. expect(Object.keys(result)).toEqual(["minimal", "low", "medium", "high"])
  2398. expect(result.low).toEqual({
  2399. reasoningEffort: "low",
  2400. reasoningSummary: "auto",
  2401. include: ["reasoning.encrypted_content"],
  2402. })
  2403. })
  2404. test("models after 2025-11-13 include 'none' effort", () => {
  2405. const model = createMockModel({
  2406. id: "gpt-5-nano",
  2407. providerID: "openai",
  2408. api: {
  2409. id: "gpt-5-nano",
  2410. url: "https://api.openai.com",
  2411. npm: "@ai-sdk/openai",
  2412. },
  2413. release_date: "2025-11-14",
  2414. })
  2415. const result = ProviderTransform.variants(model)
  2416. expect(Object.keys(result)).toEqual(["none", "minimal", "low", "medium", "high"])
  2417. })
  2418. test("models after 2025-12-04 include 'xhigh' effort", () => {
  2419. const model = createMockModel({
  2420. id: "openai/gpt-5-chat",
  2421. providerID: "openai",
  2422. api: {
  2423. id: "gpt-5-chat",
  2424. url: "https://api.openai.com",
  2425. npm: "@ai-sdk/openai",
  2426. },
  2427. release_date: "2025-12-05",
  2428. })
  2429. const result = ProviderTransform.variants(model)
  2430. expect(Object.keys(result)).toEqual(["none", "minimal", "low", "medium", "high", "xhigh"])
  2431. })
  2432. })
  2433. describe("@ai-sdk/anthropic", () => {
  2434. test("sonnet 4.6 returns adaptive thinking options", () => {
  2435. const model = createMockModel({
  2436. id: "anthropic/claude-sonnet-4-6",
  2437. providerID: "anthropic",
  2438. api: {
  2439. id: "claude-sonnet-4-6",
  2440. url: "https://api.anthropic.com",
  2441. npm: "@ai-sdk/anthropic",
  2442. },
  2443. })
  2444. const result = ProviderTransform.variants(model)
  2445. expect(Object.keys(result)).toEqual(["low", "medium", "high", "max"])
  2446. expect(result.high).toEqual({
  2447. thinking: {
  2448. type: "adaptive",
  2449. },
  2450. effort: "high",
  2451. })
  2452. })
  2453. test("returns high and max with thinking config", () => {
  2454. const model = createMockModel({
  2455. id: "anthropic/claude-4",
  2456. providerID: "anthropic",
  2457. api: {
  2458. id: "claude-4",
  2459. url: "https://api.anthropic.com",
  2460. npm: "@ai-sdk/anthropic",
  2461. },
  2462. })
  2463. const result = ProviderTransform.variants(model)
  2464. expect(Object.keys(result)).toEqual(["high", "max"])
  2465. expect(result.high).toEqual({
  2466. thinking: {
  2467. type: "enabled",
  2468. budgetTokens: 16000,
  2469. },
  2470. })
  2471. expect(result.max).toEqual({
  2472. thinking: {
  2473. type: "enabled",
  2474. budgetTokens: 31999,
  2475. },
  2476. })
  2477. })
  2478. })
  2479. describe("@ai-sdk/amazon-bedrock", () => {
  2480. test("anthropic sonnet 4.6 returns adaptive reasoning options", () => {
  2481. const model = createMockModel({
  2482. id: "bedrock/anthropic-claude-sonnet-4-6",
  2483. providerID: "bedrock",
  2484. api: {
  2485. id: "anthropic.claude-sonnet-4-6",
  2486. url: "https://bedrock.amazonaws.com",
  2487. npm: "@ai-sdk/amazon-bedrock",
  2488. },
  2489. })
  2490. const result = ProviderTransform.variants(model)
  2491. expect(Object.keys(result)).toEqual(["low", "medium", "high", "max"])
  2492. expect(result.max).toEqual({
  2493. reasoningConfig: {
  2494. type: "adaptive",
  2495. maxReasoningEffort: "max",
  2496. },
  2497. })
  2498. })
  2499. test("returns WIDELY_SUPPORTED_EFFORTS with reasoningConfig", () => {
  2500. const model = createMockModel({
  2501. id: "bedrock/llama-4",
  2502. providerID: "bedrock",
  2503. api: {
  2504. id: "llama-4-sc",
  2505. url: "https://bedrock.amazonaws.com",
  2506. npm: "@ai-sdk/amazon-bedrock",
  2507. },
  2508. })
  2509. const result = ProviderTransform.variants(model)
  2510. expect(Object.keys(result)).toEqual(["low", "medium", "high"])
  2511. expect(result.low).toEqual({
  2512. reasoningConfig: {
  2513. type: "enabled",
  2514. maxReasoningEffort: "low",
  2515. },
  2516. })
  2517. })
  2518. })
  2519. describe("@ai-sdk/google", () => {
  2520. test("gemini-2.5 returns high and max with thinkingConfig and thinkingBudget", () => {
  2521. const model = createMockModel({
  2522. id: "google/gemini-2.5-pro",
  2523. providerID: "google",
  2524. api: {
  2525. id: "gemini-2.5-pro",
  2526. url: "https://generativelanguage.googleapis.com",
  2527. npm: "@ai-sdk/google",
  2528. },
  2529. })
  2530. const result = ProviderTransform.variants(model)
  2531. expect(Object.keys(result)).toEqual(["high", "max"])
  2532. expect(result.high).toEqual({
  2533. thinkingConfig: {
  2534. includeThoughts: true,
  2535. thinkingBudget: 16000,
  2536. },
  2537. })
  2538. expect(result.max).toEqual({
  2539. thinkingConfig: {
  2540. includeThoughts: true,
  2541. thinkingBudget: 24576,
  2542. },
  2543. })
  2544. })
  2545. test("other gemini models return low and high with thinkingLevel", () => {
  2546. const model = createMockModel({
  2547. id: "google/gemini-2.0-pro",
  2548. providerID: "google",
  2549. api: {
  2550. id: "gemini-2.0-pro",
  2551. url: "https://generativelanguage.googleapis.com",
  2552. npm: "@ai-sdk/google",
  2553. },
  2554. })
  2555. const result = ProviderTransform.variants(model)
  2556. expect(Object.keys(result)).toEqual(["low", "high"])
  2557. expect(result.low).toEqual({
  2558. thinkingConfig: {
  2559. includeThoughts: true,
  2560. thinkingLevel: "low",
  2561. },
  2562. })
  2563. expect(result.high).toEqual({
  2564. thinkingConfig: {
  2565. includeThoughts: true,
  2566. thinkingLevel: "high",
  2567. },
  2568. })
  2569. })
  2570. })
  2571. describe("@ai-sdk/google-vertex", () => {
  2572. test("gemini-2.5 returns high and max with thinkingConfig and thinkingBudget", () => {
  2573. const model = createMockModel({
  2574. id: "google-vertex/gemini-2.5-pro",
  2575. providerID: "google-vertex",
  2576. api: {
  2577. id: "gemini-2.5-pro",
  2578. url: "https://vertexai.googleapis.com",
  2579. npm: "@ai-sdk/google-vertex",
  2580. },
  2581. })
  2582. const result = ProviderTransform.variants(model)
  2583. expect(Object.keys(result)).toEqual(["high", "max"])
  2584. })
  2585. test("other vertex models return low and high with thinkingLevel", () => {
  2586. const model = createMockModel({
  2587. id: "google-vertex/gemini-2.0-pro",
  2588. providerID: "google-vertex",
  2589. api: {
  2590. id: "gemini-2.0-pro",
  2591. url: "https://vertexai.googleapis.com",
  2592. npm: "@ai-sdk/google-vertex",
  2593. },
  2594. })
  2595. const result = ProviderTransform.variants(model)
  2596. expect(Object.keys(result)).toEqual(["low", "high"])
  2597. })
  2598. })
  2599. describe("@ai-sdk/cohere", () => {
  2600. test("returns empty object", () => {
  2601. const model = createMockModel({
  2602. id: "cohere/command-r",
  2603. providerID: "cohere",
  2604. api: {
  2605. id: "command-r",
  2606. url: "https://api.cohere.com",
  2607. npm: "@ai-sdk/cohere",
  2608. },
  2609. })
  2610. const result = ProviderTransform.variants(model)
  2611. expect(result).toEqual({})
  2612. })
  2613. })
  2614. describe("@ai-sdk/groq", () => {
  2615. test("returns none and WIDELY_SUPPORTED_EFFORTS with thinkingLevel", () => {
  2616. const model = createMockModel({
  2617. id: "groq/llama-4",
  2618. providerID: "groq",
  2619. api: {
  2620. id: "llama-4-sc",
  2621. url: "https://api.groq.com",
  2622. npm: "@ai-sdk/groq",
  2623. },
  2624. })
  2625. const result = ProviderTransform.variants(model)
  2626. expect(Object.keys(result)).toEqual(["none", "low", "medium", "high"])
  2627. expect(result.none).toEqual({
  2628. reasoningEffort: "none",
  2629. })
  2630. expect(result.low).toEqual({
  2631. reasoningEffort: "low",
  2632. })
  2633. })
  2634. })
  2635. describe("@ai-sdk/perplexity", () => {
  2636. test("returns empty object", () => {
  2637. const model = createMockModel({
  2638. id: "perplexity/sonar-plus",
  2639. providerID: "perplexity",
  2640. api: {
  2641. id: "sonar-plus",
  2642. url: "https://api.perplexity.ai",
  2643. npm: "@ai-sdk/perplexity",
  2644. },
  2645. })
  2646. const result = ProviderTransform.variants(model)
  2647. expect(result).toEqual({})
  2648. })
  2649. })
  2650. describe("@jerome-benoit/sap-ai-provider-v2", () => {
  2651. test("anthropic models return thinking variants", () => {
  2652. const model = createMockModel({
  2653. id: "sap-ai-core/anthropic--claude-sonnet-4",
  2654. providerID: "sap-ai-core",
  2655. api: {
  2656. id: "anthropic--claude-sonnet-4",
  2657. url: "https://api.ai.sap",
  2658. npm: "@jerome-benoit/sap-ai-provider-v2",
  2659. },
  2660. })
  2661. const result = ProviderTransform.variants(model)
  2662. expect(Object.keys(result)).toEqual(["high", "max"])
  2663. expect(result.high).toEqual({
  2664. thinking: {
  2665. type: "enabled",
  2666. budgetTokens: 16000,
  2667. },
  2668. })
  2669. expect(result.max).toEqual({
  2670. thinking: {
  2671. type: "enabled",
  2672. budgetTokens: 31999,
  2673. },
  2674. })
  2675. })
  2676. test("anthropic 4.6 models return adaptive thinking variants", () => {
  2677. const model = createMockModel({
  2678. id: "sap-ai-core/anthropic--claude-sonnet-4-6",
  2679. providerID: "sap-ai-core",
  2680. api: {
  2681. id: "anthropic--claude-sonnet-4-6",
  2682. url: "https://api.ai.sap",
  2683. npm: "@jerome-benoit/sap-ai-provider-v2",
  2684. },
  2685. })
  2686. const result = ProviderTransform.variants(model)
  2687. expect(Object.keys(result)).toEqual(["low", "medium", "high", "max"])
  2688. expect(result.low).toEqual({
  2689. thinking: {
  2690. type: "adaptive",
  2691. },
  2692. effort: "low",
  2693. })
  2694. expect(result.max).toEqual({
  2695. thinking: {
  2696. type: "adaptive",
  2697. },
  2698. effort: "max",
  2699. })
  2700. })
  2701. test("gemini 2.5 models return thinkingConfig variants", () => {
  2702. const model = createMockModel({
  2703. id: "sap-ai-core/gcp--gemini-2.5-pro",
  2704. providerID: "sap-ai-core",
  2705. api: {
  2706. id: "gcp--gemini-2.5-pro",
  2707. url: "https://api.ai.sap",
  2708. npm: "@jerome-benoit/sap-ai-provider-v2",
  2709. },
  2710. })
  2711. const result = ProviderTransform.variants(model)
  2712. expect(Object.keys(result)).toEqual(["high", "max"])
  2713. expect(result.high).toEqual({
  2714. thinkingConfig: {
  2715. includeThoughts: true,
  2716. thinkingBudget: 16000,
  2717. },
  2718. })
  2719. expect(result.max).toEqual({
  2720. thinkingConfig: {
  2721. includeThoughts: true,
  2722. thinkingBudget: 24576,
  2723. },
  2724. })
  2725. })
  2726. test("gpt models return reasoningEffort variants", () => {
  2727. const model = createMockModel({
  2728. id: "sap-ai-core/azure-openai--gpt-4o",
  2729. providerID: "sap-ai-core",
  2730. api: {
  2731. id: "azure-openai--gpt-4o",
  2732. url: "https://api.ai.sap",
  2733. npm: "@jerome-benoit/sap-ai-provider-v2",
  2734. },
  2735. })
  2736. const result = ProviderTransform.variants(model)
  2737. expect(Object.keys(result)).toEqual(["low", "medium", "high"])
  2738. expect(result.low).toEqual({ reasoningEffort: "low" })
  2739. expect(result.high).toEqual({ reasoningEffort: "high" })
  2740. })
  2741. test("o-series models return reasoningEffort variants", () => {
  2742. const model = createMockModel({
  2743. id: "sap-ai-core/azure-openai--o3-mini",
  2744. providerID: "sap-ai-core",
  2745. api: {
  2746. id: "azure-openai--o3-mini",
  2747. url: "https://api.ai.sap",
  2748. npm: "@jerome-benoit/sap-ai-provider-v2",
  2749. },
  2750. })
  2751. const result = ProviderTransform.variants(model)
  2752. expect(Object.keys(result)).toEqual(["low", "medium", "high"])
  2753. expect(result.low).toEqual({ reasoningEffort: "low" })
  2754. expect(result.high).toEqual({ reasoningEffort: "high" })
  2755. })
  2756. test("sonar models return empty object", () => {
  2757. const model = createMockModel({
  2758. id: "sap-ai-core/perplexity--sonar-pro",
  2759. providerID: "sap-ai-core",
  2760. api: {
  2761. id: "perplexity--sonar-pro",
  2762. url: "https://api.ai.sap",
  2763. npm: "@jerome-benoit/sap-ai-provider-v2",
  2764. },
  2765. })
  2766. const result = ProviderTransform.variants(model)
  2767. expect(result).toEqual({})
  2768. })
  2769. test("mistral models return empty object", () => {
  2770. const model = createMockModel({
  2771. id: "sap-ai-core/mistral--mistral-large",
  2772. providerID: "sap-ai-core",
  2773. api: {
  2774. id: "mistral--mistral-large",
  2775. url: "https://api.ai.sap",
  2776. npm: "@jerome-benoit/sap-ai-provider-v2",
  2777. },
  2778. })
  2779. const result = ProviderTransform.variants(model)
  2780. expect(result).toEqual({})
  2781. })
  2782. })
  2783. // kilocode_change start
  2784. describe("ProviderTransform.smallOptions", () => {
  2785. describe("@kilocode/kilo-gateway", () => {
  2786. test("claude models return reasoningEffort minimal", () => {
  2787. const model = createMockModel({
  2788. id: "kilo/anthropic/claude-sonnet-4",
  2789. providerID: "kilo",
  2790. api: {
  2791. id: "anthropic/claude-sonnet-4",
  2792. url: "https://gateway.kilo.ai",
  2793. npm: "@kilocode/kilo-gateway",
  2794. },
  2795. })
  2796. const result = ProviderTransform.smallOptions(model)
  2797. expect(result).toEqual({ reasoningEffort: "minimal" })
  2798. })
  2799. test("non-claude models use reasoningEffort format", () => {
  2800. const model = createMockModel({
  2801. id: "kilo/openai/gpt-4",
  2802. providerID: "kilo",
  2803. api: {
  2804. id: "openai/gpt-4",
  2805. url: "https://gateway.kilo.ai",
  2806. npm: "@kilocode/kilo-gateway",
  2807. },
  2808. })
  2809. const result = ProviderTransform.smallOptions(model)
  2810. expect(result).toEqual({ reasoningEffort: "minimal" })
  2811. })
  2812. test("google models disable reasoning", () => {
  2813. const model = createMockModel({
  2814. id: "kilo/google/gemini-2.0-flash",
  2815. providerID: "kilo",
  2816. api: {
  2817. id: "google/gemini-2.0-flash",
  2818. url: "https://gateway.kilo.ai",
  2819. npm: "@kilocode/kilo-gateway",
  2820. },
  2821. })
  2822. const result = ProviderTransform.smallOptions(model)
  2823. expect(result).toEqual({ reasoning: { enabled: false } })
  2824. })
  2825. })
  2826. })
  2827. describe("@ai-sdk/groq", () => {
  2828. test("returns none and WIDELY_SUPPORTED_EFFORTS with thinkingLevel", () => {
  2829. const model = createMockModel({
  2830. id: "groq/llama-4",
  2831. providerID: "groq",
  2832. api: {
  2833. id: "llama-4-sc",
  2834. url: "https://api.groq.com",
  2835. npm: "@ai-sdk/groq",
  2836. },
  2837. })
  2838. const result = ProviderTransform.variants(model)
  2839. expect(Object.keys(result)).toEqual(["none", "low", "medium", "high"])
  2840. expect(result.none).toEqual({
  2841. reasoningEffort: "none",
  2842. })
  2843. expect(result.low).toEqual({
  2844. reasoningEffort: "low",
  2845. })
  2846. })
  2847. })
  2848. describe("@ai-sdk/perplexity", () => {
  2849. test("returns empty object", () => {
  2850. const model = createMockModel({
  2851. id: "perplexity/sonar-plus",
  2852. providerID: "perplexity",
  2853. api: {
  2854. id: "sonar-plus",
  2855. url: "https://api.perplexity.ai",
  2856. npm: "@ai-sdk/perplexity",
  2857. },
  2858. })
  2859. const result = ProviderTransform.variants(model)
  2860. expect(result).toEqual({})
  2861. })
  2862. })
  2863. })
  2864. // kilocode_change end