processor-effect.test.ts 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872
  1. import { NodeFileSystem } from "@effect/platform-node"
  2. import { expect } from "bun:test"
  3. import { APICallError } from "ai"
  4. import { Cause, Effect, Exit, Fiber, Layer, ServiceMap } from "effect"
  5. import * as Stream from "effect/Stream"
  6. import path from "path"
  7. import type { Agent } from "../../src/agent/agent"
  8. import { Agent as AgentSvc } from "../../src/agent/agent"
  9. import { Bus } from "../../src/bus"
  10. import { Config } from "../../src/config/config"
  11. import { Permission } from "../../src/permission"
  12. import { Plugin } from "../../src/plugin"
  13. import type { Provider } from "../../src/provider/provider"
  14. import { ModelID, ProviderID } from "../../src/provider/schema"
  15. import { Session } from "../../src/session"
  16. import { LLM } from "../../src/session/llm"
  17. import { MessageV2 } from "../../src/session/message-v2"
  18. import { SessionProcessor } from "../../src/session/processor"
  19. import { MessageID, PartID, SessionID } from "../../src/session/schema"
  20. import { SessionStatus } from "../../src/session/status"
  21. import { Snapshot } from "../../src/snapshot"
  22. import { Log } from "../../src/util/log"
  23. import * as CrossSpawnSpawner from "../../src/effect/cross-spawn-spawner"
  24. import { provideTmpdirInstance } from "../fixture/fixture"
  25. import { testEffect } from "../lib/effect"
  26. Log.init({ print: false })
  27. const ref = {
  28. providerID: ProviderID.make("test"),
  29. modelID: ModelID.make("test-model"),
  30. }
  31. type Script = Stream.Stream<LLM.Event, unknown> | ((input: LLM.StreamInput) => Stream.Stream<LLM.Event, unknown>)
  32. class TestLLM extends ServiceMap.Service<
  33. TestLLM,
  34. {
  35. readonly push: (stream: Script) => Effect.Effect<void>
  36. readonly reply: (...items: LLM.Event[]) => Effect.Effect<void>
  37. readonly calls: Effect.Effect<number>
  38. readonly inputs: Effect.Effect<LLM.StreamInput[]>
  39. }
  40. >()("@test/SessionProcessorLLM") {}
  41. function stream(...items: LLM.Event[]) {
  42. return Stream.make(...items)
  43. }
  44. function usage(input = 1, output = 1, total = input + output) {
  45. return {
  46. inputTokens: input,
  47. outputTokens: output,
  48. totalTokens: total,
  49. inputTokenDetails: {
  50. noCacheTokens: undefined,
  51. cacheReadTokens: undefined,
  52. cacheWriteTokens: undefined,
  53. },
  54. outputTokenDetails: {
  55. textTokens: undefined,
  56. reasoningTokens: undefined,
  57. },
  58. }
  59. }
  60. function start(): LLM.Event {
  61. return { type: "start" }
  62. }
  63. function textStart(id = "t"): LLM.Event {
  64. return { type: "text-start", id }
  65. }
  66. function textDelta(id: string, text: string): LLM.Event {
  67. return { type: "text-delta", id, text }
  68. }
  69. function textEnd(id = "t"): LLM.Event {
  70. return { type: "text-end", id }
  71. }
  72. function reasoningStart(id: string): LLM.Event {
  73. return { type: "reasoning-start", id }
  74. }
  75. function reasoningDelta(id: string, text: string): LLM.Event {
  76. return { type: "reasoning-delta", id, text }
  77. }
  78. function reasoningEnd(id: string): LLM.Event {
  79. return { type: "reasoning-end", id }
  80. }
  81. function finishStep(): LLM.Event {
  82. return {
  83. type: "finish-step",
  84. finishReason: "stop",
  85. rawFinishReason: "stop",
  86. response: { id: "res", modelId: "test-model", timestamp: new Date() },
  87. providerMetadata: undefined,
  88. usage: usage(),
  89. }
  90. }
  91. function finish(): LLM.Event {
  92. return { type: "finish", finishReason: "stop", rawFinishReason: "stop", totalUsage: usage() }
  93. }
  94. function toolInputStart(id: string, toolName: string): LLM.Event {
  95. return { type: "tool-input-start", id, toolName }
  96. }
  97. function toolCall(toolCallId: string, toolName: string, input: unknown): LLM.Event {
  98. return { type: "tool-call", toolCallId, toolName, input }
  99. }
  100. function fail<E>(err: E, ...items: LLM.Event[]) {
  101. return stream(...items).pipe(Stream.concat(Stream.fail(err)))
  102. }
  103. function hang(_input: LLM.StreamInput, ...items: LLM.Event[]) {
  104. return stream(...items).pipe(Stream.concat(Stream.fromEffect(Effect.never)))
  105. }
  106. function model(context: number): Provider.Model {
  107. return {
  108. id: "test-model",
  109. providerID: "test",
  110. name: "Test",
  111. limit: { context, output: 10 },
  112. cost: { input: 0, output: 0, cache: { read: 0, write: 0 } },
  113. capabilities: {
  114. toolcall: true,
  115. attachment: false,
  116. reasoning: false,
  117. temperature: true,
  118. input: { text: true, image: false, audio: false, video: false },
  119. output: { text: true, image: false, audio: false, video: false },
  120. },
  121. api: { npm: "@ai-sdk/anthropic" },
  122. options: {},
  123. } as Provider.Model
  124. }
  125. function agent(): Agent.Info {
  126. return {
  127. name: "build",
  128. mode: "primary",
  129. options: {},
  130. permission: [{ permission: "*", pattern: "*", action: "allow" }],
  131. }
  132. }
  133. function defer<T>() {
  134. let resolve!: (value: T | PromiseLike<T>) => void
  135. const promise = new Promise<T>((done) => {
  136. resolve = done
  137. })
  138. return { promise, resolve }
  139. }
  140. const user = Effect.fn("TestSession.user")(function* (sessionID: SessionID, text: string) {
  141. const session = yield* Session.Service
  142. const msg = yield* session.updateMessage({
  143. id: MessageID.ascending(),
  144. role: "user",
  145. sessionID,
  146. agent: "build",
  147. model: ref,
  148. time: { created: Date.now() },
  149. })
  150. yield* session.updatePart({
  151. id: PartID.ascending(),
  152. messageID: msg.id,
  153. sessionID,
  154. type: "text",
  155. text,
  156. })
  157. return msg
  158. })
  159. const assistant = Effect.fn("TestSession.assistant")(function* (
  160. sessionID: SessionID,
  161. parentID: MessageID,
  162. root: string,
  163. ) {
  164. const session = yield* Session.Service
  165. const msg: MessageV2.Assistant = {
  166. id: MessageID.ascending(),
  167. role: "assistant",
  168. sessionID,
  169. mode: "build",
  170. agent: "build",
  171. path: { cwd: root, root },
  172. cost: 0,
  173. tokens: {
  174. total: 0,
  175. input: 0,
  176. output: 0,
  177. reasoning: 0,
  178. cache: { read: 0, write: 0 },
  179. },
  180. modelID: ref.modelID,
  181. providerID: ref.providerID,
  182. parentID,
  183. time: { created: Date.now() },
  184. finish: "end_turn",
  185. }
  186. yield* session.updateMessage(msg)
  187. return msg
  188. })
  189. const llm = Layer.unwrap(
  190. Effect.gen(function* () {
  191. const queue: Script[] = []
  192. const inputs: LLM.StreamInput[] = []
  193. let calls = 0
  194. const push = Effect.fn("TestLLM.push")((item: Script) => {
  195. queue.push(item)
  196. return Effect.void
  197. })
  198. const reply = Effect.fn("TestLLM.reply")((...items: LLM.Event[]) => push(stream(...items)))
  199. return Layer.mergeAll(
  200. Layer.succeed(
  201. LLM.Service,
  202. LLM.Service.of({
  203. stream: (input) => {
  204. calls += 1
  205. inputs.push(input)
  206. const item = queue.shift() ?? Stream.empty
  207. return typeof item === "function" ? item(input) : item
  208. },
  209. }),
  210. ),
  211. Layer.succeed(
  212. TestLLM,
  213. TestLLM.of({
  214. push,
  215. reply,
  216. calls: Effect.sync(() => calls),
  217. inputs: Effect.sync(() => [...inputs]),
  218. }),
  219. ),
  220. )
  221. }),
  222. )
  223. const status = SessionStatus.layer.pipe(Layer.provideMerge(Bus.layer))
  224. const infra = Layer.mergeAll(NodeFileSystem.layer, CrossSpawnSpawner.defaultLayer)
  225. const deps = Layer.mergeAll(
  226. Session.defaultLayer,
  227. Snapshot.defaultLayer,
  228. AgentSvc.defaultLayer,
  229. Permission.layer,
  230. Plugin.defaultLayer,
  231. Config.defaultLayer,
  232. status,
  233. llm,
  234. ).pipe(Layer.provideMerge(infra))
  235. const env = SessionProcessor.layer.pipe(Layer.provideMerge(deps))
  236. const it = testEffect(env)
  237. it.live("session.processor effect tests capture llm input cleanly", () => {
  238. return provideTmpdirInstance(
  239. (dir) =>
  240. Effect.gen(function* () {
  241. const test = yield* TestLLM
  242. const processors = yield* SessionProcessor.Service
  243. const session = yield* Session.Service
  244. yield* test.reply(start(), textStart(), textDelta("t", "hello"), textEnd(), finishStep(), finish())
  245. const chat = yield* session.create({})
  246. const parent = yield* user(chat.id, "hi")
  247. const msg = yield* assistant(chat.id, parent.id, path.resolve(dir))
  248. const mdl = model(100)
  249. const handle = yield* processors.create({
  250. assistantMessage: msg,
  251. sessionID: chat.id,
  252. model: mdl,
  253. })
  254. const input = {
  255. user: {
  256. id: parent.id,
  257. sessionID: chat.id,
  258. role: "user",
  259. time: parent.time,
  260. agent: parent.agent,
  261. model: { providerID: ref.providerID, modelID: ref.modelID },
  262. } satisfies MessageV2.User,
  263. sessionID: chat.id,
  264. model: mdl,
  265. agent: agent(),
  266. system: [],
  267. messages: [{ role: "user", content: "hi" }],
  268. tools: {},
  269. } satisfies LLM.StreamInput
  270. const value = yield* handle.process(input)
  271. const parts = yield* Effect.promise(() => MessageV2.parts(msg.id))
  272. const calls = yield* test.calls
  273. const inputs = yield* test.inputs
  274. expect(value).toBe("continue")
  275. expect(calls).toBe(1)
  276. expect(inputs).toHaveLength(1)
  277. expect(inputs[0].messages).toStrictEqual([{ role: "user", content: "hi" }])
  278. expect(parts.some((part) => part.type === "text" && part.text === "hello")).toBe(true)
  279. }),
  280. { git: true },
  281. )
  282. })
  283. it.live("session.processor effect tests stop after token overflow requests compaction", () => {
  284. return provideTmpdirInstance(
  285. (dir) =>
  286. Effect.gen(function* () {
  287. const test = yield* TestLLM
  288. const processors = yield* SessionProcessor.Service
  289. const session = yield* Session.Service
  290. yield* test.reply(
  291. start(),
  292. {
  293. type: "finish-step",
  294. finishReason: "stop",
  295. rawFinishReason: "stop",
  296. response: { id: "res", modelId: "test-model", timestamp: new Date() },
  297. providerMetadata: undefined,
  298. usage: usage(100, 0, 100),
  299. },
  300. textStart(),
  301. textDelta("t", "after"),
  302. textEnd(),
  303. )
  304. const chat = yield* session.create({})
  305. const parent = yield* user(chat.id, "compact")
  306. const msg = yield* assistant(chat.id, parent.id, path.resolve(dir))
  307. const mdl = model(20)
  308. const handle = yield* processors.create({
  309. assistantMessage: msg,
  310. sessionID: chat.id,
  311. model: mdl,
  312. })
  313. const value = yield* handle.process({
  314. user: {
  315. id: parent.id,
  316. sessionID: chat.id,
  317. role: "user",
  318. time: parent.time,
  319. agent: parent.agent,
  320. model: { providerID: ref.providerID, modelID: ref.modelID },
  321. } satisfies MessageV2.User,
  322. sessionID: chat.id,
  323. model: mdl,
  324. agent: agent(),
  325. system: [],
  326. messages: [{ role: "user", content: "compact" }],
  327. tools: {},
  328. })
  329. const parts = yield* Effect.promise(() => MessageV2.parts(msg.id))
  330. expect(value).toBe("compact")
  331. expect(parts.some((part) => part.type === "text")).toBe(false)
  332. expect(parts.some((part) => part.type === "step-finish")).toBe(true)
  333. }),
  334. { git: true },
  335. )
  336. })
  337. it.live("session.processor effect tests reset reasoning state across retries", () => {
  338. return provideTmpdirInstance(
  339. (dir) =>
  340. Effect.gen(function* () {
  341. const test = yield* TestLLM
  342. const processors = yield* SessionProcessor.Service
  343. const session = yield* Session.Service
  344. yield* test.push(
  345. fail(
  346. new APICallError({
  347. message: "boom",
  348. url: "https://example.com/v1/chat/completions",
  349. requestBodyValues: {},
  350. statusCode: 503,
  351. responseHeaders: { "retry-after-ms": "0" },
  352. responseBody: '{"error":"boom"}',
  353. isRetryable: true,
  354. }),
  355. start(),
  356. reasoningStart("r"),
  357. reasoningDelta("r", "one"),
  358. ),
  359. )
  360. yield* test.reply(
  361. start(),
  362. reasoningStart("r"),
  363. reasoningDelta("r", "two"),
  364. reasoningEnd("r"),
  365. finishStep(),
  366. finish(),
  367. )
  368. const chat = yield* session.create({})
  369. const parent = yield* user(chat.id, "reason")
  370. const msg = yield* assistant(chat.id, parent.id, path.resolve(dir))
  371. const mdl = model(100)
  372. const handle = yield* processors.create({
  373. assistantMessage: msg,
  374. sessionID: chat.id,
  375. model: mdl,
  376. })
  377. const value = yield* handle.process({
  378. user: {
  379. id: parent.id,
  380. sessionID: chat.id,
  381. role: "user",
  382. time: parent.time,
  383. agent: parent.agent,
  384. model: { providerID: ref.providerID, modelID: ref.modelID },
  385. } satisfies MessageV2.User,
  386. sessionID: chat.id,
  387. model: mdl,
  388. agent: agent(),
  389. system: [],
  390. messages: [{ role: "user", content: "reason" }],
  391. tools: {},
  392. })
  393. const parts = yield* Effect.promise(() => MessageV2.parts(msg.id))
  394. const reasoning = parts.filter((part): part is MessageV2.ReasoningPart => part.type === "reasoning")
  395. expect(value).toBe("continue")
  396. expect(yield* test.calls).toBe(2)
  397. expect(reasoning.some((part) => part.text === "two")).toBe(true)
  398. expect(reasoning.some((part) => part.text === "onetwo")).toBe(false)
  399. }),
  400. { git: true },
  401. )
  402. })
  403. it.live("session.processor effect tests do not retry unknown json errors", () => {
  404. return provideTmpdirInstance(
  405. (dir) =>
  406. Effect.gen(function* () {
  407. const test = yield* TestLLM
  408. const processors = yield* SessionProcessor.Service
  409. const session = yield* Session.Service
  410. yield* test.push(fail({ error: { message: "no_kv_space" } }, start()))
  411. const chat = yield* session.create({})
  412. const parent = yield* user(chat.id, "json")
  413. const msg = yield* assistant(chat.id, parent.id, path.resolve(dir))
  414. const mdl = model(100)
  415. const handle = yield* processors.create({
  416. assistantMessage: msg,
  417. sessionID: chat.id,
  418. model: mdl,
  419. })
  420. const value = yield* handle.process({
  421. user: {
  422. id: parent.id,
  423. sessionID: chat.id,
  424. role: "user",
  425. time: parent.time,
  426. agent: parent.agent,
  427. model: { providerID: ref.providerID, modelID: ref.modelID },
  428. } satisfies MessageV2.User,
  429. sessionID: chat.id,
  430. model: mdl,
  431. agent: agent(),
  432. system: [],
  433. messages: [{ role: "user", content: "json" }],
  434. tools: {},
  435. })
  436. expect(value).toBe("stop")
  437. expect(yield* test.calls).toBe(1)
  438. expect(yield* test.inputs).toHaveLength(1)
  439. expect(handle.message.error?.name).toBe("UnknownError")
  440. }),
  441. { git: true },
  442. )
  443. })
  444. it.live("session.processor effect tests retry recognized structured json errors", () => {
  445. return provideTmpdirInstance(
  446. (dir) =>
  447. Effect.gen(function* () {
  448. const test = yield* TestLLM
  449. const processors = yield* SessionProcessor.Service
  450. const session = yield* Session.Service
  451. yield* test.push(fail({ type: "error", error: { type: "too_many_requests" } }, start()))
  452. yield* test.reply(start(), textStart(), textDelta("t", "after"), textEnd(), finishStep(), finish())
  453. const chat = yield* session.create({})
  454. const parent = yield* user(chat.id, "retry json")
  455. const msg = yield* assistant(chat.id, parent.id, path.resolve(dir))
  456. const mdl = model(100)
  457. const handle = yield* processors.create({
  458. assistantMessage: msg,
  459. sessionID: chat.id,
  460. model: mdl,
  461. })
  462. const value = yield* handle.process({
  463. user: {
  464. id: parent.id,
  465. sessionID: chat.id,
  466. role: "user",
  467. time: parent.time,
  468. agent: parent.agent,
  469. model: { providerID: ref.providerID, modelID: ref.modelID },
  470. } satisfies MessageV2.User,
  471. sessionID: chat.id,
  472. model: mdl,
  473. agent: agent(),
  474. system: [],
  475. messages: [{ role: "user", content: "retry json" }],
  476. tools: {},
  477. })
  478. const parts = yield* Effect.promise(() => MessageV2.parts(msg.id))
  479. expect(value).toBe("continue")
  480. expect(yield* test.calls).toBe(2)
  481. expect(parts.some((part) => part.type === "text" && part.text === "after")).toBe(true)
  482. expect(handle.message.error).toBeUndefined()
  483. }),
  484. { git: true },
  485. )
  486. })
  487. it.live("session.processor effect tests publish retry status updates", () => {
  488. return provideTmpdirInstance(
  489. (dir) =>
  490. Effect.gen(function* () {
  491. const test = yield* TestLLM
  492. const processors = yield* SessionProcessor.Service
  493. const session = yield* Session.Service
  494. const bus = yield* Bus.Service
  495. yield* test.push(
  496. fail(
  497. new APICallError({
  498. message: "boom",
  499. url: "https://example.com/v1/chat/completions",
  500. requestBodyValues: {},
  501. statusCode: 503,
  502. responseHeaders: { "retry-after-ms": "0" },
  503. responseBody: '{"error":"boom"}',
  504. isRetryable: true,
  505. }),
  506. start(),
  507. ),
  508. )
  509. yield* test.reply(start(), finishStep(), finish())
  510. const chat = yield* session.create({})
  511. const parent = yield* user(chat.id, "retry")
  512. const msg = yield* assistant(chat.id, parent.id, path.resolve(dir))
  513. const mdl = model(100)
  514. const states: number[] = []
  515. const off = yield* bus.subscribeCallback(SessionStatus.Event.Status, (evt) => {
  516. if (evt.properties.sessionID !== chat.id) return
  517. if (evt.properties.status.type === "retry") states.push(evt.properties.status.attempt)
  518. })
  519. const handle = yield* processors.create({
  520. assistantMessage: msg,
  521. sessionID: chat.id,
  522. model: mdl,
  523. })
  524. const value = yield* handle.process({
  525. user: {
  526. id: parent.id,
  527. sessionID: chat.id,
  528. role: "user",
  529. time: parent.time,
  530. agent: parent.agent,
  531. model: { providerID: ref.providerID, modelID: ref.modelID },
  532. } satisfies MessageV2.User,
  533. sessionID: chat.id,
  534. model: mdl,
  535. agent: agent(),
  536. system: [],
  537. messages: [{ role: "user", content: "retry" }],
  538. tools: {},
  539. })
  540. off()
  541. expect(value).toBe("continue")
  542. expect(yield* test.calls).toBe(2)
  543. expect(states).toStrictEqual([1])
  544. }),
  545. { git: true },
  546. )
  547. })
  548. it.live("session.processor effect tests compact on structured context overflow", () => {
  549. return provideTmpdirInstance(
  550. (dir) =>
  551. Effect.gen(function* () {
  552. const test = yield* TestLLM
  553. const processors = yield* SessionProcessor.Service
  554. const session = yield* Session.Service
  555. yield* test.push(fail({ type: "error", error: { code: "context_length_exceeded" } }, start()))
  556. const chat = yield* session.create({})
  557. const parent = yield* user(chat.id, "compact json")
  558. const msg = yield* assistant(chat.id, parent.id, path.resolve(dir))
  559. const mdl = model(100)
  560. const handle = yield* processors.create({
  561. assistantMessage: msg,
  562. sessionID: chat.id,
  563. model: mdl,
  564. })
  565. const value = yield* handle.process({
  566. user: {
  567. id: parent.id,
  568. sessionID: chat.id,
  569. role: "user",
  570. time: parent.time,
  571. agent: parent.agent,
  572. model: { providerID: ref.providerID, modelID: ref.modelID },
  573. } satisfies MessageV2.User,
  574. sessionID: chat.id,
  575. model: mdl,
  576. agent: agent(),
  577. system: [],
  578. messages: [{ role: "user", content: "compact json" }],
  579. tools: {},
  580. })
  581. expect(value).toBe("compact")
  582. expect(yield* test.calls).toBe(1)
  583. expect(handle.message.error).toBeUndefined()
  584. }),
  585. { git: true },
  586. )
  587. })
  588. it.live("session.processor effect tests mark pending tools as aborted on cleanup", () => {
  589. return provideTmpdirInstance(
  590. (dir) =>
  591. Effect.gen(function* () {
  592. const ready = defer<void>()
  593. const test = yield* TestLLM
  594. const processors = yield* SessionProcessor.Service
  595. const session = yield* Session.Service
  596. yield* test.push((input) =>
  597. hang(input, start(), toolInputStart("tool-1", "bash"), toolCall("tool-1", "bash", { cmd: "pwd" })).pipe(
  598. Stream.tap((event) => (event.type === "tool-call" ? Effect.sync(() => ready.resolve()) : Effect.void)),
  599. ),
  600. )
  601. const chat = yield* session.create({})
  602. const parent = yield* user(chat.id, "tool abort")
  603. const msg = yield* assistant(chat.id, parent.id, path.resolve(dir))
  604. const mdl = model(100)
  605. const handle = yield* processors.create({
  606. assistantMessage: msg,
  607. sessionID: chat.id,
  608. model: mdl,
  609. })
  610. const run = yield* handle
  611. .process({
  612. user: {
  613. id: parent.id,
  614. sessionID: chat.id,
  615. role: "user",
  616. time: parent.time,
  617. agent: parent.agent,
  618. model: { providerID: ref.providerID, modelID: ref.modelID },
  619. } satisfies MessageV2.User,
  620. sessionID: chat.id,
  621. model: mdl,
  622. agent: agent(),
  623. system: [],
  624. messages: [{ role: "user", content: "tool abort" }],
  625. tools: {},
  626. })
  627. .pipe(Effect.forkChild)
  628. yield* Effect.promise(() => ready.promise)
  629. yield* Fiber.interrupt(run)
  630. const exit = yield* Fiber.await(run)
  631. if (Exit.isFailure(exit) && Cause.hasInterruptsOnly(exit.cause)) {
  632. yield* handle.abort()
  633. }
  634. const parts = yield* Effect.promise(() => MessageV2.parts(msg.id))
  635. const tool = parts.find((part): part is MessageV2.ToolPart => part.type === "tool")
  636. expect(Exit.isFailure(exit)).toBe(true)
  637. if (Exit.isFailure(exit)) {
  638. expect(Cause.hasInterruptsOnly(exit.cause)).toBe(true)
  639. }
  640. expect(yield* test.calls).toBe(1)
  641. expect(tool?.state.status).toBe("error")
  642. if (tool?.state.status === "error") {
  643. expect(tool.state.error).toBe("Tool execution aborted")
  644. expect(tool.state.time.end).toBeDefined()
  645. }
  646. }),
  647. { git: true },
  648. )
  649. })
  650. it.live("session.processor effect tests record aborted errors and idle state", () => {
  651. return provideTmpdirInstance(
  652. (dir) =>
  653. Effect.gen(function* () {
  654. const ready = defer<void>()
  655. const seen = defer<void>()
  656. const test = yield* TestLLM
  657. const processors = yield* SessionProcessor.Service
  658. const session = yield* Session.Service
  659. const bus = yield* Bus.Service
  660. const status = yield* SessionStatus.Service
  661. yield* test.push((input) =>
  662. hang(input, start()).pipe(
  663. Stream.tap((event) => (event.type === "start" ? Effect.sync(() => ready.resolve()) : Effect.void)),
  664. ),
  665. )
  666. const chat = yield* session.create({})
  667. const parent = yield* user(chat.id, "abort")
  668. const msg = yield* assistant(chat.id, parent.id, path.resolve(dir))
  669. const mdl = model(100)
  670. const errs: string[] = []
  671. const off = yield* bus.subscribeCallback(Session.Event.Error, (evt) => {
  672. if (evt.properties.sessionID !== chat.id) return
  673. if (!evt.properties.error) return
  674. errs.push(evt.properties.error.name)
  675. seen.resolve()
  676. })
  677. const handle = yield* processors.create({
  678. assistantMessage: msg,
  679. sessionID: chat.id,
  680. model: mdl,
  681. })
  682. const run = yield* handle
  683. .process({
  684. user: {
  685. id: parent.id,
  686. sessionID: chat.id,
  687. role: "user",
  688. time: parent.time,
  689. agent: parent.agent,
  690. model: { providerID: ref.providerID, modelID: ref.modelID },
  691. } satisfies MessageV2.User,
  692. sessionID: chat.id,
  693. model: mdl,
  694. agent: agent(),
  695. system: [],
  696. messages: [{ role: "user", content: "abort" }],
  697. tools: {},
  698. })
  699. .pipe(Effect.forkChild)
  700. yield* Effect.promise(() => ready.promise)
  701. yield* Fiber.interrupt(run)
  702. const exit = yield* Fiber.await(run)
  703. if (Exit.isFailure(exit) && Cause.hasInterruptsOnly(exit.cause)) {
  704. yield* handle.abort()
  705. }
  706. yield* Effect.promise(() => seen.promise)
  707. const stored = yield* Effect.promise(() => MessageV2.get({ sessionID: chat.id, messageID: msg.id }))
  708. const state = yield* status.get(chat.id)
  709. off()
  710. expect(Exit.isFailure(exit)).toBe(true)
  711. if (Exit.isFailure(exit)) {
  712. expect(Cause.hasInterruptsOnly(exit.cause)).toBe(true)
  713. }
  714. expect(handle.message.error?.name).toBe("MessageAbortedError")
  715. expect(stored.info.role).toBe("assistant")
  716. if (stored.info.role === "assistant") {
  717. expect(stored.info.error?.name).toBe("MessageAbortedError")
  718. }
  719. expect(state).toMatchObject({ type: "idle" })
  720. expect(errs).toContain("MessageAbortedError")
  721. }),
  722. { git: true },
  723. )
  724. })
  725. it.live("session.processor effect tests mark interruptions aborted without manual abort", () => {
  726. return provideTmpdirInstance(
  727. (dir) =>
  728. Effect.gen(function* () {
  729. const ready = defer<void>()
  730. const processors = yield* SessionProcessor.Service
  731. const session = yield* Session.Service
  732. const status = yield* SessionStatus.Service
  733. const test = yield* TestLLM
  734. yield* test.push((input) =>
  735. hang(input, start()).pipe(
  736. Stream.tap((event) => (event.type === "start" ? Effect.sync(() => ready.resolve()) : Effect.void)),
  737. ),
  738. )
  739. const chat = yield* session.create({})
  740. const parent = yield* user(chat.id, "interrupt")
  741. const msg = yield* assistant(chat.id, parent.id, path.resolve(dir))
  742. const mdl = model(100)
  743. const handle = yield* processors.create({
  744. assistantMessage: msg,
  745. sessionID: chat.id,
  746. model: mdl,
  747. })
  748. const run = yield* handle
  749. .process({
  750. user: {
  751. id: parent.id,
  752. sessionID: chat.id,
  753. role: "user",
  754. time: parent.time,
  755. agent: parent.agent,
  756. model: { providerID: ref.providerID, modelID: ref.modelID },
  757. } satisfies MessageV2.User,
  758. sessionID: chat.id,
  759. model: mdl,
  760. agent: agent(),
  761. system: [],
  762. messages: [{ role: "user", content: "interrupt" }],
  763. tools: {},
  764. })
  765. .pipe(Effect.forkChild)
  766. yield* Effect.promise(() => ready.promise)
  767. yield* Fiber.interrupt(run)
  768. const exit = yield* Fiber.await(run)
  769. const stored = yield* Effect.promise(() => MessageV2.get({ sessionID: chat.id, messageID: msg.id }))
  770. const state = yield* status.get(chat.id)
  771. expect(Exit.isFailure(exit)).toBe(true)
  772. expect(handle.message.error?.name).toBe("MessageAbortedError")
  773. expect(stored.info.role).toBe("assistant")
  774. if (stored.info.role === "assistant") {
  775. expect(stored.info.error?.name).toBe("MessageAbortedError")
  776. }
  777. expect(state).toMatchObject({ type: "idle" })
  778. }),
  779. { git: true },
  780. )
  781. })