github-copilot-models.test.ts 2.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117
  1. import { afterEach, expect, mock, test } from "bun:test"
  2. import { CopilotModels } from "@/plugin/github-copilot/models"
  3. const originalFetch = globalThis.fetch
  4. afterEach(() => {
  5. globalThis.fetch = originalFetch
  6. })
  7. test("preserves temperature support from existing provider models", async () => {
  8. globalThis.fetch = mock(() =>
  9. Promise.resolve(
  10. new Response(
  11. JSON.stringify({
  12. data: [
  13. {
  14. model_picker_enabled: true,
  15. id: "gpt-4o",
  16. name: "GPT-4o",
  17. version: "gpt-4o-2024-05-13",
  18. capabilities: {
  19. family: "gpt",
  20. limits: {
  21. max_context_window_tokens: 64000,
  22. max_output_tokens: 16384,
  23. max_prompt_tokens: 64000,
  24. },
  25. supports: {
  26. streaming: true,
  27. tool_calls: true,
  28. },
  29. },
  30. },
  31. {
  32. model_picker_enabled: true,
  33. id: "brand-new",
  34. name: "Brand New",
  35. version: "brand-new-2026-04-01",
  36. capabilities: {
  37. family: "test",
  38. limits: {
  39. max_context_window_tokens: 32000,
  40. max_output_tokens: 8192,
  41. max_prompt_tokens: 32000,
  42. },
  43. supports: {
  44. streaming: true,
  45. tool_calls: false,
  46. },
  47. },
  48. },
  49. ],
  50. }),
  51. { status: 200 },
  52. ),
  53. ),
  54. ) as unknown as typeof fetch
  55. const models = await CopilotModels.get(
  56. "https://api.githubcopilot.com",
  57. {},
  58. {
  59. "gpt-4o": {
  60. id: "gpt-4o",
  61. providerID: "github-copilot",
  62. api: {
  63. id: "gpt-4o",
  64. url: "https://api.githubcopilot.com",
  65. npm: "@ai-sdk/openai-compatible",
  66. },
  67. name: "GPT-4o",
  68. family: "gpt",
  69. capabilities: {
  70. temperature: true,
  71. reasoning: false,
  72. attachment: true,
  73. toolcall: true,
  74. input: {
  75. text: true,
  76. audio: false,
  77. image: true,
  78. video: false,
  79. pdf: false,
  80. },
  81. output: {
  82. text: true,
  83. audio: false,
  84. image: false,
  85. video: false,
  86. pdf: false,
  87. },
  88. interleaved: false,
  89. },
  90. cost: {
  91. input: 0,
  92. output: 0,
  93. cache: {
  94. read: 0,
  95. write: 0,
  96. },
  97. },
  98. limit: {
  99. context: 64000,
  100. output: 16384,
  101. },
  102. options: {},
  103. headers: {},
  104. release_date: "2024-05-13",
  105. variants: {},
  106. status: "active",
  107. },
  108. },
  109. )
  110. expect(models["gpt-4o"].capabilities.temperature).toBe(true)
  111. expect(models["brand-new"].capabilities.temperature).toBe(true)
  112. })