openai-format.test.ts 9.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257
  1. import { convertToOpenAiMessages, convertToAnthropicMessage } from '../openai-format';
  2. import { Anthropic } from '@anthropic-ai/sdk';
  3. import OpenAI from 'openai';
  4. type PartialChatCompletion = Omit<OpenAI.Chat.Completions.ChatCompletion, 'choices'> & {
  5. choices: Array<Partial<OpenAI.Chat.Completions.ChatCompletion.Choice> & {
  6. message: OpenAI.Chat.Completions.ChatCompletion.Choice['message'];
  7. finish_reason: string;
  8. index: number;
  9. }>;
  10. };
  11. describe('OpenAI Format Transformations', () => {
  12. describe('convertToOpenAiMessages', () => {
  13. it('should convert simple text messages', () => {
  14. const anthropicMessages: Anthropic.Messages.MessageParam[] = [
  15. {
  16. role: 'user',
  17. content: 'Hello'
  18. },
  19. {
  20. role: 'assistant',
  21. content: 'Hi there!'
  22. }
  23. ];
  24. const openAiMessages = convertToOpenAiMessages(anthropicMessages);
  25. expect(openAiMessages).toHaveLength(2);
  26. expect(openAiMessages[0]).toEqual({
  27. role: 'user',
  28. content: 'Hello'
  29. });
  30. expect(openAiMessages[1]).toEqual({
  31. role: 'assistant',
  32. content: 'Hi there!'
  33. });
  34. });
  35. it('should handle messages with image content', () => {
  36. const anthropicMessages: Anthropic.Messages.MessageParam[] = [
  37. {
  38. role: 'user',
  39. content: [
  40. {
  41. type: 'text',
  42. text: 'What is in this image?'
  43. },
  44. {
  45. type: 'image',
  46. source: {
  47. type: 'base64',
  48. media_type: 'image/jpeg',
  49. data: 'base64data'
  50. }
  51. }
  52. ]
  53. }
  54. ];
  55. const openAiMessages = convertToOpenAiMessages(anthropicMessages);
  56. expect(openAiMessages).toHaveLength(1);
  57. expect(openAiMessages[0].role).toBe('user');
  58. const content = openAiMessages[0].content as Array<{
  59. type: string;
  60. text?: string;
  61. image_url?: { url: string };
  62. }>;
  63. expect(Array.isArray(content)).toBe(true);
  64. expect(content).toHaveLength(2);
  65. expect(content[0]).toEqual({ type: 'text', text: 'What is in this image?' });
  66. expect(content[1]).toEqual({
  67. type: 'image_url',
  68. image_url: { url: 'data:image/jpeg;base64,base64data' }
  69. });
  70. });
  71. it('should handle assistant messages with tool use', () => {
  72. const anthropicMessages: Anthropic.Messages.MessageParam[] = [
  73. {
  74. role: 'assistant',
  75. content: [
  76. {
  77. type: 'text',
  78. text: 'Let me check the weather.'
  79. },
  80. {
  81. type: 'tool_use',
  82. id: 'weather-123',
  83. name: 'get_weather',
  84. input: { city: 'London' }
  85. }
  86. ]
  87. }
  88. ];
  89. const openAiMessages = convertToOpenAiMessages(anthropicMessages);
  90. expect(openAiMessages).toHaveLength(1);
  91. const assistantMessage = openAiMessages[0] as OpenAI.Chat.ChatCompletionAssistantMessageParam;
  92. expect(assistantMessage.role).toBe('assistant');
  93. expect(assistantMessage.content).toBe('Let me check the weather.');
  94. expect(assistantMessage.tool_calls).toHaveLength(1);
  95. expect(assistantMessage.tool_calls![0]).toEqual({
  96. id: 'weather-123',
  97. type: 'function',
  98. function: {
  99. name: 'get_weather',
  100. arguments: JSON.stringify({ city: 'London' })
  101. }
  102. });
  103. });
  104. it('should handle user messages with tool results', () => {
  105. const anthropicMessages: Anthropic.Messages.MessageParam[] = [
  106. {
  107. role: 'user',
  108. content: [
  109. {
  110. type: 'tool_result',
  111. tool_use_id: 'weather-123',
  112. content: 'Current temperature in London: 20°C'
  113. }
  114. ]
  115. }
  116. ];
  117. const openAiMessages = convertToOpenAiMessages(anthropicMessages);
  118. expect(openAiMessages).toHaveLength(1);
  119. const toolMessage = openAiMessages[0] as OpenAI.Chat.ChatCompletionToolMessageParam;
  120. expect(toolMessage.role).toBe('tool');
  121. expect(toolMessage.tool_call_id).toBe('weather-123');
  122. expect(toolMessage.content).toBe('Current temperature in London: 20°C');
  123. });
  124. });
  125. describe('convertToAnthropicMessage', () => {
  126. it('should convert simple completion', () => {
  127. const openAiCompletion: PartialChatCompletion = {
  128. id: 'completion-123',
  129. model: 'gpt-4',
  130. choices: [{
  131. message: {
  132. role: 'assistant',
  133. content: 'Hello there!',
  134. refusal: null
  135. },
  136. finish_reason: 'stop',
  137. index: 0
  138. }],
  139. usage: {
  140. prompt_tokens: 10,
  141. completion_tokens: 5,
  142. total_tokens: 15
  143. },
  144. created: 123456789,
  145. object: 'chat.completion'
  146. };
  147. const anthropicMessage = convertToAnthropicMessage(openAiCompletion as OpenAI.Chat.Completions.ChatCompletion);
  148. expect(anthropicMessage.id).toBe('completion-123');
  149. expect(anthropicMessage.role).toBe('assistant');
  150. expect(anthropicMessage.content).toHaveLength(1);
  151. expect(anthropicMessage.content[0]).toEqual({
  152. type: 'text',
  153. text: 'Hello there!'
  154. });
  155. expect(anthropicMessage.stop_reason).toBe('end_turn');
  156. expect(anthropicMessage.usage).toEqual({
  157. input_tokens: 10,
  158. output_tokens: 5
  159. });
  160. });
  161. it('should handle tool calls in completion', () => {
  162. const openAiCompletion: PartialChatCompletion = {
  163. id: 'completion-123',
  164. model: 'gpt-4',
  165. choices: [{
  166. message: {
  167. role: 'assistant',
  168. content: 'Let me check the weather.',
  169. tool_calls: [{
  170. id: 'weather-123',
  171. type: 'function',
  172. function: {
  173. name: 'get_weather',
  174. arguments: '{"city":"London"}'
  175. }
  176. }],
  177. refusal: null
  178. },
  179. finish_reason: 'tool_calls',
  180. index: 0
  181. }],
  182. usage: {
  183. prompt_tokens: 15,
  184. completion_tokens: 8,
  185. total_tokens: 23
  186. },
  187. created: 123456789,
  188. object: 'chat.completion'
  189. };
  190. const anthropicMessage = convertToAnthropicMessage(openAiCompletion as OpenAI.Chat.Completions.ChatCompletion);
  191. expect(anthropicMessage.content).toHaveLength(2);
  192. expect(anthropicMessage.content[0]).toEqual({
  193. type: 'text',
  194. text: 'Let me check the weather.'
  195. });
  196. expect(anthropicMessage.content[1]).toEqual({
  197. type: 'tool_use',
  198. id: 'weather-123',
  199. name: 'get_weather',
  200. input: { city: 'London' }
  201. });
  202. expect(anthropicMessage.stop_reason).toBe('tool_use');
  203. });
  204. it('should handle invalid tool call arguments', () => {
  205. const openAiCompletion: PartialChatCompletion = {
  206. id: 'completion-123',
  207. model: 'gpt-4',
  208. choices: [{
  209. message: {
  210. role: 'assistant',
  211. content: 'Testing invalid arguments',
  212. tool_calls: [{
  213. id: 'test-123',
  214. type: 'function',
  215. function: {
  216. name: 'test_function',
  217. arguments: 'invalid json'
  218. }
  219. }],
  220. refusal: null
  221. },
  222. finish_reason: 'tool_calls',
  223. index: 0
  224. }],
  225. created: 123456789,
  226. object: 'chat.completion'
  227. };
  228. const anthropicMessage = convertToAnthropicMessage(openAiCompletion as OpenAI.Chat.Completions.ChatCompletion);
  229. expect(anthropicMessage.content).toHaveLength(2);
  230. expect(anthropicMessage.content[1]).toEqual({
  231. type: 'tool_use',
  232. id: 'test-123',
  233. name: 'test_function',
  234. input: {} // Should default to empty object for invalid JSON
  235. });
  236. });
  237. });
  238. });