resolveToolProtocol.spec.ts 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335
  1. import { describe, it, expect } from "vitest"
  2. import { resolveToolProtocol } from "../resolveToolProtocol"
  3. import { TOOL_PROTOCOL, openAiModelInfoSaneDefaults } from "@roo-code/types"
  4. import type { ProviderSettings, ModelInfo } from "@roo-code/types"
  5. describe("resolveToolProtocol", () => {
  6. describe("Precedence Level 1: User Profile Setting", () => {
  7. it("should use profile toolProtocol when explicitly set to xml", () => {
  8. const settings: ProviderSettings = {
  9. toolProtocol: "xml",
  10. apiProvider: "anthropic",
  11. }
  12. const result = resolveToolProtocol(settings)
  13. expect(result).toBe(TOOL_PROTOCOL.XML)
  14. })
  15. it("should use profile toolProtocol when explicitly set to native", () => {
  16. const settings: ProviderSettings = {
  17. toolProtocol: "native",
  18. apiProvider: "anthropic",
  19. }
  20. const modelInfo: ModelInfo = {
  21. maxTokens: 4096,
  22. contextWindow: 128000,
  23. supportsPromptCache: false,
  24. supportsNativeTools: true, // Model supports native tools
  25. }
  26. const result = resolveToolProtocol(settings, modelInfo)
  27. expect(result).toBe(TOOL_PROTOCOL.NATIVE)
  28. })
  29. it("should override model default when profile setting is present", () => {
  30. const settings: ProviderSettings = {
  31. toolProtocol: "xml",
  32. apiProvider: "openai-native",
  33. }
  34. const modelInfo: ModelInfo = {
  35. maxTokens: 4096,
  36. contextWindow: 128000,
  37. supportsPromptCache: false,
  38. defaultToolProtocol: "native",
  39. supportsNativeTools: true,
  40. }
  41. const result = resolveToolProtocol(settings, modelInfo)
  42. expect(result).toBe(TOOL_PROTOCOL.XML) // Profile setting wins
  43. })
  44. it("should override model capability when profile setting is present", () => {
  45. const settings: ProviderSettings = {
  46. toolProtocol: "xml",
  47. apiProvider: "openai-native",
  48. }
  49. const modelInfo: ModelInfo = {
  50. maxTokens: 4096,
  51. contextWindow: 128000,
  52. supportsPromptCache: false,
  53. supportsNativeTools: true,
  54. }
  55. const result = resolveToolProtocol(settings, modelInfo)
  56. expect(result).toBe(TOOL_PROTOCOL.XML) // Profile setting wins
  57. })
  58. })
  59. describe("Precedence Level 2: Model Default", () => {
  60. it("should use model defaultToolProtocol when no profile setting", () => {
  61. const settings: ProviderSettings = {
  62. apiProvider: "roo",
  63. }
  64. const modelInfo: ModelInfo = {
  65. maxTokens: 4096,
  66. contextWindow: 128000,
  67. supportsPromptCache: false,
  68. defaultToolProtocol: "native",
  69. supportsNativeTools: true, // Model must support native tools
  70. }
  71. const result = resolveToolProtocol(settings, modelInfo)
  72. expect(result).toBe(TOOL_PROTOCOL.NATIVE) // Model default wins when experiment is disabled
  73. })
  74. it("should override model capability when model default is present", () => {
  75. const settings: ProviderSettings = {
  76. apiProvider: "roo",
  77. }
  78. const modelInfo: ModelInfo = {
  79. maxTokens: 4096,
  80. contextWindow: 128000,
  81. supportsPromptCache: false,
  82. defaultToolProtocol: "xml",
  83. supportsNativeTools: true,
  84. }
  85. const result = resolveToolProtocol(settings, modelInfo)
  86. expect(result).toBe(TOOL_PROTOCOL.XML) // Model default wins over capability
  87. })
  88. })
  89. describe("Support Validation", () => {
  90. it("should fall back to XML when model doesn't support native", () => {
  91. const settings: ProviderSettings = {
  92. apiProvider: "anthropic",
  93. }
  94. const modelInfo: ModelInfo = {
  95. maxTokens: 4096,
  96. contextWindow: 128000,
  97. supportsPromptCache: false,
  98. supportsNativeTools: false,
  99. }
  100. const result = resolveToolProtocol(settings, modelInfo)
  101. expect(result).toBe(TOOL_PROTOCOL.XML)
  102. })
  103. it("should fall back to XML when user prefers native but model doesn't support it", () => {
  104. const settings: ProviderSettings = {
  105. toolProtocol: "native", // User wants native
  106. apiProvider: "anthropic",
  107. }
  108. const modelInfo: ModelInfo = {
  109. maxTokens: 4096,
  110. contextWindow: 128000,
  111. supportsPromptCache: false,
  112. supportsNativeTools: false, // But model doesn't support it
  113. }
  114. const result = resolveToolProtocol(settings, modelInfo)
  115. expect(result).toBe(TOOL_PROTOCOL.XML) // Falls back to XML due to lack of support
  116. })
  117. it("should fall back to XML when user prefers native but model support is undefined", () => {
  118. const settings: ProviderSettings = {
  119. toolProtocol: "native", // User wants native
  120. apiProvider: "anthropic",
  121. }
  122. const modelInfo: ModelInfo = {
  123. maxTokens: 4096,
  124. contextWindow: 128000,
  125. supportsPromptCache: false,
  126. // supportsNativeTools is undefined (not specified)
  127. }
  128. const result = resolveToolProtocol(settings, modelInfo)
  129. expect(result).toBe(TOOL_PROTOCOL.XML) // Falls back to XML - undefined treated as unsupported
  130. })
  131. })
  132. describe("Precedence Level 3: Native Fallback", () => {
  133. it("should use Native fallback when no model default is specified and model supports native", () => {
  134. const settings: ProviderSettings = {
  135. apiProvider: "anthropic",
  136. }
  137. const modelInfo: ModelInfo = {
  138. maxTokens: 4096,
  139. contextWindow: 128000,
  140. supportsPromptCache: false,
  141. supportsNativeTools: true,
  142. }
  143. const result = resolveToolProtocol(settings, modelInfo)
  144. expect(result).toBe(TOOL_PROTOCOL.NATIVE) // Native fallback
  145. })
  146. })
  147. describe("Complete Precedence Chain", () => {
  148. it("should respect full precedence: Profile > Model Default > Native Fallback", () => {
  149. // Set up a scenario with all levels defined
  150. const settings: ProviderSettings = {
  151. toolProtocol: "native", // Level 1: User profile setting
  152. apiProvider: "roo",
  153. }
  154. const modelInfo: ModelInfo = {
  155. maxTokens: 4096,
  156. contextWindow: 128000,
  157. supportsPromptCache: false,
  158. defaultToolProtocol: "xml", // Level 2: Model default
  159. supportsNativeTools: true, // Support check
  160. }
  161. const result = resolveToolProtocol(settings, modelInfo)
  162. expect(result).toBe(TOOL_PROTOCOL.NATIVE) // Profile setting wins
  163. })
  164. it("should skip to model default when profile setting is undefined", () => {
  165. const settings: ProviderSettings = {
  166. apiProvider: "openai-native",
  167. }
  168. const modelInfo: ModelInfo = {
  169. maxTokens: 4096,
  170. contextWindow: 128000,
  171. supportsPromptCache: false,
  172. defaultToolProtocol: "xml", // Level 2
  173. supportsNativeTools: true, // Support check
  174. }
  175. const result = resolveToolProtocol(settings, modelInfo)
  176. expect(result).toBe(TOOL_PROTOCOL.XML) // Model default wins
  177. })
  178. it("should skip to Native fallback when profile and model default are undefined", () => {
  179. const settings: ProviderSettings = {
  180. apiProvider: "openai-native",
  181. }
  182. const modelInfo: ModelInfo = {
  183. maxTokens: 4096,
  184. contextWindow: 128000,
  185. supportsPromptCache: false,
  186. supportsNativeTools: true,
  187. }
  188. const result = resolveToolProtocol(settings, modelInfo)
  189. expect(result).toBe(TOOL_PROTOCOL.NATIVE) // Native fallback
  190. })
  191. it("should skip to XML fallback when model info is unavailable", () => {
  192. const settings: ProviderSettings = {
  193. apiProvider: "anthropic",
  194. }
  195. const result = resolveToolProtocol(settings, undefined)
  196. expect(result).toBe(TOOL_PROTOCOL.XML) // XML fallback (no model info means no native support)
  197. })
  198. })
  199. describe("Edge Cases", () => {
  200. it("should handle missing provider name gracefully", () => {
  201. const settings: ProviderSettings = {}
  202. const result = resolveToolProtocol(settings)
  203. expect(result).toBe(TOOL_PROTOCOL.XML) // Falls back to XML (no model info)
  204. })
  205. it("should handle undefined model info gracefully", () => {
  206. const settings: ProviderSettings = {
  207. apiProvider: "openai-native",
  208. }
  209. const result = resolveToolProtocol(settings, undefined)
  210. expect(result).toBe(TOOL_PROTOCOL.XML) // XML fallback (no model info)
  211. })
  212. it("should fall back to XML when model doesn't support native", () => {
  213. const settings: ProviderSettings = {
  214. apiProvider: "roo",
  215. }
  216. const modelInfo: ModelInfo = {
  217. maxTokens: 4096,
  218. contextWindow: 128000,
  219. supportsPromptCache: false,
  220. supportsNativeTools: false, // Model doesn't support native
  221. }
  222. const result = resolveToolProtocol(settings, modelInfo)
  223. expect(result).toBe(TOOL_PROTOCOL.XML) // Falls back to XML due to lack of support
  224. })
  225. })
  226. describe("Real-world Scenarios", () => {
  227. it("should use Native fallback for models without defaultToolProtocol", () => {
  228. const settings: ProviderSettings = {
  229. apiProvider: "openai-native",
  230. }
  231. const modelInfo: ModelInfo = {
  232. maxTokens: 4096,
  233. contextWindow: 128000,
  234. supportsPromptCache: false,
  235. supportsNativeTools: true,
  236. }
  237. const result = resolveToolProtocol(settings, modelInfo)
  238. expect(result).toBe(TOOL_PROTOCOL.NATIVE) // Native fallback
  239. })
  240. it("should use XML for Claude models with Anthropic provider", () => {
  241. const settings: ProviderSettings = {
  242. apiProvider: "anthropic",
  243. }
  244. const modelInfo: ModelInfo = {
  245. maxTokens: 8192,
  246. contextWindow: 200000,
  247. supportsPromptCache: true,
  248. supportsNativeTools: false,
  249. }
  250. const result = resolveToolProtocol(settings, modelInfo)
  251. expect(result).toBe(TOOL_PROTOCOL.XML)
  252. })
  253. it("should allow user to force XML on native-supporting model", () => {
  254. const settings: ProviderSettings = {
  255. toolProtocol: "xml", // User explicitly wants XML
  256. apiProvider: "openai-native",
  257. }
  258. const modelInfo: ModelInfo = {
  259. maxTokens: 4096,
  260. contextWindow: 128000,
  261. supportsPromptCache: false,
  262. supportsNativeTools: true, // Model supports native but user wants XML
  263. defaultToolProtocol: "native",
  264. }
  265. const result = resolveToolProtocol(settings, modelInfo)
  266. expect(result).toBe(TOOL_PROTOCOL.XML) // User preference wins
  267. })
  268. it("should not allow user to force native when model doesn't support it", () => {
  269. const settings: ProviderSettings = {
  270. toolProtocol: "native", // User wants native
  271. apiProvider: "anthropic",
  272. }
  273. const modelInfo: ModelInfo = {
  274. maxTokens: 4096,
  275. contextWindow: 128000,
  276. supportsPromptCache: false,
  277. supportsNativeTools: false, // Model doesn't support native
  278. }
  279. const result = resolveToolProtocol(settings, modelInfo)
  280. expect(result).toBe(TOOL_PROTOCOL.XML) // Falls back to XML due to lack of support
  281. })
  282. it("should use model default when available", () => {
  283. const settings: ProviderSettings = {
  284. apiProvider: "roo",
  285. }
  286. const modelInfo: ModelInfo = {
  287. maxTokens: 8192,
  288. contextWindow: 200000,
  289. supportsPromptCache: true,
  290. defaultToolProtocol: "xml",
  291. supportsNativeTools: true,
  292. }
  293. const result = resolveToolProtocol(settings, modelInfo)
  294. expect(result).toBe(TOOL_PROTOCOL.XML) // Model default wins
  295. })
  296. it("should use native tools for OpenAI compatible provider with default model info", () => {
  297. const settings: ProviderSettings = {
  298. apiProvider: "openai",
  299. }
  300. // Using the actual openAiModelInfoSaneDefaults to verify the fix
  301. const result = resolveToolProtocol(settings, openAiModelInfoSaneDefaults)
  302. expect(result).toBe(TOOL_PROTOCOL.NATIVE) // Should use native tools by default
  303. })
  304. })
  305. })