processor-effect.test.ts 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747
  1. import { NodeFileSystem } from "@effect/platform-node"
  2. import { expect } from "bun:test"
  3. import { Cause, Effect, Exit, Fiber, Layer } from "effect"
  4. import path from "path"
  5. import type { Agent } from "../../src/agent/agent"
  6. import { Agent as AgentSvc } from "../../src/agent/agent"
  7. import { Bus } from "../../src/bus"
  8. import { Config } from "../../src/config/config"
  9. import { Permission } from "../../src/permission"
  10. import { Plugin } from "../../src/plugin"
  11. import { Provider } from "../../src/provider/provider"
  12. import { ModelID, ProviderID } from "../../src/provider/schema"
  13. import { Session } from "../../src/session"
  14. import { LLM } from "../../src/session/llm"
  15. import { MessageV2 } from "../../src/session/message-v2"
  16. import { SessionProcessor } from "../../src/session/processor"
  17. import { MessageID, PartID, SessionID } from "../../src/session/schema"
  18. import { SessionStatus } from "../../src/session/status"
  19. import { Snapshot } from "../../src/snapshot"
  20. import { Log } from "../../src/util/log"
  21. import * as CrossSpawnSpawner from "../../src/effect/cross-spawn-spawner"
  22. import { provideTmpdirServer } from "../fixture/fixture"
  23. import { testEffect } from "../lib/effect"
  24. import { reply, TestLLMServer } from "../lib/llm-server"
  25. Log.init({ print: false })
  26. const ref = {
  27. providerID: ProviderID.make("test"),
  28. modelID: ModelID.make("test-model"),
  29. }
  30. const cfg = {
  31. provider: {
  32. test: {
  33. name: "Test",
  34. id: "test",
  35. env: [],
  36. npm: "@ai-sdk/openai-compatible",
  37. models: {
  38. "test-model": {
  39. id: "test-model",
  40. name: "Test Model",
  41. attachment: false,
  42. reasoning: false,
  43. temperature: false,
  44. tool_call: true,
  45. release_date: "2025-01-01",
  46. limit: { context: 100000, output: 10000 },
  47. cost: { input: 0, output: 0 },
  48. options: {},
  49. },
  50. },
  51. options: {
  52. apiKey: "test-key",
  53. baseURL: "http://localhost:1/v1",
  54. },
  55. },
  56. },
  57. }
  58. function providerCfg(url: string) {
  59. return {
  60. ...cfg,
  61. provider: {
  62. ...cfg.provider,
  63. test: {
  64. ...cfg.provider.test,
  65. options: {
  66. ...cfg.provider.test.options,
  67. baseURL: url,
  68. },
  69. },
  70. },
  71. }
  72. }
  73. function agent(): Agent.Info {
  74. return {
  75. name: "build",
  76. mode: "primary",
  77. options: {},
  78. permission: [{ permission: "*", pattern: "*", action: "allow" }],
  79. }
  80. }
  81. function defer<T>() {
  82. let resolve!: (value: T | PromiseLike<T>) => void
  83. const promise = new Promise<T>((done) => {
  84. resolve = done
  85. })
  86. return { promise, resolve }
  87. }
  88. const user = Effect.fn("TestSession.user")(function* (sessionID: SessionID, text: string) {
  89. const session = yield* Session.Service
  90. const msg = yield* session.updateMessage({
  91. id: MessageID.ascending(),
  92. role: "user",
  93. sessionID,
  94. agent: "build",
  95. model: ref,
  96. time: { created: Date.now() },
  97. })
  98. yield* session.updatePart({
  99. id: PartID.ascending(),
  100. messageID: msg.id,
  101. sessionID,
  102. type: "text",
  103. text,
  104. })
  105. return msg
  106. })
  107. const assistant = Effect.fn("TestSession.assistant")(function* (
  108. sessionID: SessionID,
  109. parentID: MessageID,
  110. root: string,
  111. ) {
  112. const session = yield* Session.Service
  113. const msg: MessageV2.Assistant = {
  114. id: MessageID.ascending(),
  115. role: "assistant",
  116. sessionID,
  117. mode: "build",
  118. agent: "build",
  119. path: { cwd: root, root },
  120. cost: 0,
  121. tokens: {
  122. total: 0,
  123. input: 0,
  124. output: 0,
  125. reasoning: 0,
  126. cache: { read: 0, write: 0 },
  127. },
  128. modelID: ref.modelID,
  129. providerID: ref.providerID,
  130. parentID,
  131. time: { created: Date.now() },
  132. finish: "end_turn",
  133. }
  134. yield* session.updateMessage(msg)
  135. return msg
  136. })
  137. const status = SessionStatus.layer.pipe(Layer.provideMerge(Bus.layer))
  138. const infra = Layer.mergeAll(NodeFileSystem.layer, CrossSpawnSpawner.defaultLayer)
  139. const deps = Layer.mergeAll(
  140. Session.defaultLayer,
  141. Snapshot.defaultLayer,
  142. AgentSvc.defaultLayer,
  143. Permission.layer,
  144. Plugin.defaultLayer,
  145. Config.defaultLayer,
  146. LLM.defaultLayer,
  147. Provider.defaultLayer,
  148. status,
  149. ).pipe(Layer.provideMerge(infra))
  150. const env = Layer.mergeAll(TestLLMServer.layer, SessionProcessor.layer.pipe(Layer.provideMerge(deps)))
  151. const it = testEffect(env)
  152. const boot = Effect.fn("test.boot")(function* () {
  153. const processors = yield* SessionProcessor.Service
  154. const session = yield* Session.Service
  155. const provider = yield* Provider.Service
  156. return { processors, session, provider }
  157. })
  158. // ---------------------------------------------------------------------------
  159. // Tests
  160. // ---------------------------------------------------------------------------
  161. it.live("session.processor effect tests capture llm input cleanly", () =>
  162. provideTmpdirServer(
  163. ({ dir, llm }) =>
  164. Effect.gen(function* () {
  165. const { processors, session, provider } = yield* boot()
  166. yield* llm.text("hello")
  167. const chat = yield* session.create({})
  168. const parent = yield* user(chat.id, "hi")
  169. const msg = yield* assistant(chat.id, parent.id, path.resolve(dir))
  170. const mdl = yield* provider.getModel(ref.providerID, ref.modelID)
  171. const handle = yield* processors.create({
  172. assistantMessage: msg,
  173. sessionID: chat.id,
  174. model: mdl,
  175. })
  176. const input = {
  177. user: {
  178. id: parent.id,
  179. sessionID: chat.id,
  180. role: "user",
  181. time: parent.time,
  182. agent: parent.agent,
  183. model: { providerID: ref.providerID, modelID: ref.modelID },
  184. } satisfies MessageV2.User,
  185. sessionID: chat.id,
  186. model: mdl,
  187. agent: agent(),
  188. system: [],
  189. messages: [{ role: "user", content: "hi" }],
  190. tools: {},
  191. } satisfies LLM.StreamInput
  192. const value = yield* handle.process(input)
  193. const parts = yield* Effect.promise(() => MessageV2.parts(msg.id))
  194. const calls = yield* llm.calls
  195. expect(value).toBe("continue")
  196. expect(calls).toBe(1)
  197. expect(parts.some((part) => part.type === "text" && part.text === "hello")).toBe(true)
  198. }),
  199. { git: true, config: (url) => providerCfg(url) },
  200. ),
  201. )
  202. it.live("session.processor effect tests stop after token overflow requests compaction", () =>
  203. provideTmpdirServer(
  204. ({ dir, llm }) =>
  205. Effect.gen(function* () {
  206. const { processors, session, provider } = yield* boot()
  207. yield* llm.text("after", { usage: { input: 100, output: 0 } })
  208. const chat = yield* session.create({})
  209. const parent = yield* user(chat.id, "compact")
  210. const msg = yield* assistant(chat.id, parent.id, path.resolve(dir))
  211. const base = yield* provider.getModel(ref.providerID, ref.modelID)
  212. const mdl = { ...base, limit: { context: 20, output: 10 } }
  213. const handle = yield* processors.create({
  214. assistantMessage: msg,
  215. sessionID: chat.id,
  216. model: mdl,
  217. })
  218. const value = yield* handle.process({
  219. user: {
  220. id: parent.id,
  221. sessionID: chat.id,
  222. role: "user",
  223. time: parent.time,
  224. agent: parent.agent,
  225. model: { providerID: ref.providerID, modelID: ref.modelID },
  226. } satisfies MessageV2.User,
  227. sessionID: chat.id,
  228. model: mdl,
  229. agent: agent(),
  230. system: [],
  231. messages: [{ role: "user", content: "compact" }],
  232. tools: {},
  233. })
  234. const parts = yield* Effect.promise(() => MessageV2.parts(msg.id))
  235. expect(value).toBe("compact")
  236. expect(parts.some((part) => part.type === "text" && part.text === "after")).toBe(true)
  237. expect(parts.some((part) => part.type === "step-finish")).toBe(true)
  238. }),
  239. { git: true, config: (url) => providerCfg(url) },
  240. ),
  241. )
  242. it.live("session.processor effect tests capture reasoning from http mock", () =>
  243. provideTmpdirServer(
  244. ({ dir, llm }) =>
  245. Effect.gen(function* () {
  246. const { processors, session, provider } = yield* boot()
  247. yield* llm.push(reply().reason("think").text("done").stop())
  248. const chat = yield* session.create({})
  249. const parent = yield* user(chat.id, "reason")
  250. const msg = yield* assistant(chat.id, parent.id, path.resolve(dir))
  251. const mdl = yield* provider.getModel(ref.providerID, ref.modelID)
  252. const handle = yield* processors.create({
  253. assistantMessage: msg,
  254. sessionID: chat.id,
  255. model: mdl,
  256. })
  257. const value = yield* handle.process({
  258. user: {
  259. id: parent.id,
  260. sessionID: chat.id,
  261. role: "user",
  262. time: parent.time,
  263. agent: parent.agent,
  264. model: { providerID: ref.providerID, modelID: ref.modelID },
  265. } satisfies MessageV2.User,
  266. sessionID: chat.id,
  267. model: mdl,
  268. agent: agent(),
  269. system: [],
  270. messages: [{ role: "user", content: "reason" }],
  271. tools: {},
  272. })
  273. const parts = yield* Effect.promise(() => MessageV2.parts(msg.id))
  274. const reasoning = parts.find((part): part is MessageV2.ReasoningPart => part.type === "reasoning")
  275. const text = parts.find((part): part is MessageV2.TextPart => part.type === "text")
  276. expect(value).toBe("continue")
  277. expect(yield* llm.calls).toBe(1)
  278. expect(reasoning?.text).toBe("think")
  279. expect(text?.text).toBe("done")
  280. }),
  281. { git: true, config: (url) => providerCfg(url) },
  282. ),
  283. )
  284. it.live("session.processor effect tests reset reasoning state across retries", () =>
  285. provideTmpdirServer(
  286. ({ dir, llm }) =>
  287. Effect.gen(function* () {
  288. const { processors, session, provider } = yield* boot()
  289. yield* llm.push(reply().reason("one").reset(), reply().reason("two").stop())
  290. const chat = yield* session.create({})
  291. const parent = yield* user(chat.id, "reason")
  292. const msg = yield* assistant(chat.id, parent.id, path.resolve(dir))
  293. const mdl = yield* provider.getModel(ref.providerID, ref.modelID)
  294. const handle = yield* processors.create({
  295. assistantMessage: msg,
  296. sessionID: chat.id,
  297. model: mdl,
  298. })
  299. const value = yield* handle.process({
  300. user: {
  301. id: parent.id,
  302. sessionID: chat.id,
  303. role: "user",
  304. time: parent.time,
  305. agent: parent.agent,
  306. model: { providerID: ref.providerID, modelID: ref.modelID },
  307. } satisfies MessageV2.User,
  308. sessionID: chat.id,
  309. model: mdl,
  310. agent: agent(),
  311. system: [],
  312. messages: [{ role: "user", content: "reason" }],
  313. tools: {},
  314. })
  315. const parts = yield* Effect.promise(() => MessageV2.parts(msg.id))
  316. const reasoning = parts.filter((part): part is MessageV2.ReasoningPart => part.type === "reasoning")
  317. expect(value).toBe("continue")
  318. expect(yield* llm.calls).toBe(2)
  319. expect(reasoning.some((part) => part.text === "two")).toBe(true)
  320. expect(reasoning.some((part) => part.text === "onetwo")).toBe(false)
  321. }),
  322. { git: true, config: (url) => providerCfg(url) },
  323. ),
  324. )
  325. it.live("session.processor effect tests do not retry unknown json errors", () =>
  326. provideTmpdirServer(
  327. ({ dir, llm }) =>
  328. Effect.gen(function* () {
  329. const { processors, session, provider } = yield* boot()
  330. yield* llm.error(400, { error: { message: "no_kv_space" } })
  331. const chat = yield* session.create({})
  332. const parent = yield* user(chat.id, "json")
  333. const msg = yield* assistant(chat.id, parent.id, path.resolve(dir))
  334. const mdl = yield* provider.getModel(ref.providerID, ref.modelID)
  335. const handle = yield* processors.create({
  336. assistantMessage: msg,
  337. sessionID: chat.id,
  338. model: mdl,
  339. })
  340. const value = yield* handle.process({
  341. user: {
  342. id: parent.id,
  343. sessionID: chat.id,
  344. role: "user",
  345. time: parent.time,
  346. agent: parent.agent,
  347. model: { providerID: ref.providerID, modelID: ref.modelID },
  348. } satisfies MessageV2.User,
  349. sessionID: chat.id,
  350. model: mdl,
  351. agent: agent(),
  352. system: [],
  353. messages: [{ role: "user", content: "json" }],
  354. tools: {},
  355. })
  356. expect(value).toBe("stop")
  357. expect(yield* llm.calls).toBe(1)
  358. expect(handle.message.error?.name).toBe("APIError")
  359. }),
  360. { git: true, config: (url) => providerCfg(url) },
  361. ),
  362. )
  363. it.live("session.processor effect tests retry recognized structured json errors", () =>
  364. provideTmpdirServer(
  365. ({ dir, llm }) =>
  366. Effect.gen(function* () {
  367. const { processors, session, provider } = yield* boot()
  368. yield* llm.error(429, { type: "error", error: { type: "too_many_requests" } })
  369. yield* llm.text("after")
  370. const chat = yield* session.create({})
  371. const parent = yield* user(chat.id, "retry json")
  372. const msg = yield* assistant(chat.id, parent.id, path.resolve(dir))
  373. const mdl = yield* provider.getModel(ref.providerID, ref.modelID)
  374. const handle = yield* processors.create({
  375. assistantMessage: msg,
  376. sessionID: chat.id,
  377. model: mdl,
  378. })
  379. const value = yield* handle.process({
  380. user: {
  381. id: parent.id,
  382. sessionID: chat.id,
  383. role: "user",
  384. time: parent.time,
  385. agent: parent.agent,
  386. model: { providerID: ref.providerID, modelID: ref.modelID },
  387. } satisfies MessageV2.User,
  388. sessionID: chat.id,
  389. model: mdl,
  390. agent: agent(),
  391. system: [],
  392. messages: [{ role: "user", content: "retry json" }],
  393. tools: {},
  394. })
  395. const parts = yield* Effect.promise(() => MessageV2.parts(msg.id))
  396. expect(value).toBe("continue")
  397. expect(yield* llm.calls).toBe(2)
  398. expect(parts.some((part) => part.type === "text" && part.text === "after")).toBe(true)
  399. expect(handle.message.error).toBeUndefined()
  400. }),
  401. { git: true, config: (url) => providerCfg(url) },
  402. ),
  403. )
  404. it.live("session.processor effect tests publish retry status updates", () =>
  405. provideTmpdirServer(
  406. ({ dir, llm }) =>
  407. Effect.gen(function* () {
  408. const { processors, session, provider } = yield* boot()
  409. const bus = yield* Bus.Service
  410. yield* llm.error(503, { error: "boom" })
  411. yield* llm.text("")
  412. const chat = yield* session.create({})
  413. const parent = yield* user(chat.id, "retry")
  414. const msg = yield* assistant(chat.id, parent.id, path.resolve(dir))
  415. const mdl = yield* provider.getModel(ref.providerID, ref.modelID)
  416. const states: number[] = []
  417. const off = yield* bus.subscribeCallback(SessionStatus.Event.Status, (evt) => {
  418. if (evt.properties.sessionID !== chat.id) return
  419. if (evt.properties.status.type === "retry") states.push(evt.properties.status.attempt)
  420. })
  421. const handle = yield* processors.create({
  422. assistantMessage: msg,
  423. sessionID: chat.id,
  424. model: mdl,
  425. })
  426. const value = yield* handle.process({
  427. user: {
  428. id: parent.id,
  429. sessionID: chat.id,
  430. role: "user",
  431. time: parent.time,
  432. agent: parent.agent,
  433. model: { providerID: ref.providerID, modelID: ref.modelID },
  434. } satisfies MessageV2.User,
  435. sessionID: chat.id,
  436. model: mdl,
  437. agent: agent(),
  438. system: [],
  439. messages: [{ role: "user", content: "retry" }],
  440. tools: {},
  441. })
  442. off()
  443. expect(value).toBe("continue")
  444. expect(yield* llm.calls).toBe(2)
  445. expect(states).toStrictEqual([1])
  446. }),
  447. { git: true, config: (url) => providerCfg(url) },
  448. ),
  449. )
  450. it.live("session.processor effect tests compact on structured context overflow", () =>
  451. provideTmpdirServer(
  452. ({ dir, llm }) =>
  453. Effect.gen(function* () {
  454. const { processors, session, provider } = yield* boot()
  455. yield* llm.error(400, { type: "error", error: { code: "context_length_exceeded" } })
  456. const chat = yield* session.create({})
  457. const parent = yield* user(chat.id, "compact json")
  458. const msg = yield* assistant(chat.id, parent.id, path.resolve(dir))
  459. const mdl = yield* provider.getModel(ref.providerID, ref.modelID)
  460. const handle = yield* processors.create({
  461. assistantMessage: msg,
  462. sessionID: chat.id,
  463. model: mdl,
  464. })
  465. const value = yield* handle.process({
  466. user: {
  467. id: parent.id,
  468. sessionID: chat.id,
  469. role: "user",
  470. time: parent.time,
  471. agent: parent.agent,
  472. model: { providerID: ref.providerID, modelID: ref.modelID },
  473. } satisfies MessageV2.User,
  474. sessionID: chat.id,
  475. model: mdl,
  476. agent: agent(),
  477. system: [],
  478. messages: [{ role: "user", content: "compact json" }],
  479. tools: {},
  480. })
  481. expect(value).toBe("compact")
  482. expect(yield* llm.calls).toBe(1)
  483. expect(handle.message.error).toBeUndefined()
  484. }),
  485. { git: true, config: (url) => providerCfg(url) },
  486. ),
  487. )
  488. it.live("session.processor effect tests mark pending tools as aborted on cleanup", () =>
  489. provideTmpdirServer(
  490. ({ dir, llm }) =>
  491. Effect.gen(function* () {
  492. const { processors, session, provider } = yield* boot()
  493. yield* llm.toolHang("bash", { cmd: "pwd" })
  494. const chat = yield* session.create({})
  495. const parent = yield* user(chat.id, "tool abort")
  496. const msg = yield* assistant(chat.id, parent.id, path.resolve(dir))
  497. const mdl = yield* provider.getModel(ref.providerID, ref.modelID)
  498. const handle = yield* processors.create({
  499. assistantMessage: msg,
  500. sessionID: chat.id,
  501. model: mdl,
  502. })
  503. const run = yield* handle
  504. .process({
  505. user: {
  506. id: parent.id,
  507. sessionID: chat.id,
  508. role: "user",
  509. time: parent.time,
  510. agent: parent.agent,
  511. model: { providerID: ref.providerID, modelID: ref.modelID },
  512. } satisfies MessageV2.User,
  513. sessionID: chat.id,
  514. model: mdl,
  515. agent: agent(),
  516. system: [],
  517. messages: [{ role: "user", content: "tool abort" }],
  518. tools: {},
  519. })
  520. .pipe(Effect.forkChild)
  521. yield* llm.wait(1)
  522. yield* Effect.promise(async () => {
  523. const end = Date.now() + 500
  524. while (Date.now() < end) {
  525. const parts = await MessageV2.parts(msg.id)
  526. if (parts.some((part) => part.type === "tool")) return
  527. await Bun.sleep(10)
  528. }
  529. })
  530. yield* Fiber.interrupt(run)
  531. const exit = yield* Fiber.await(run)
  532. if (Exit.isFailure(exit) && Cause.hasInterruptsOnly(exit.cause)) {
  533. yield* handle.abort()
  534. }
  535. const parts = yield* Effect.promise(() => MessageV2.parts(msg.id))
  536. const call = parts.find((part): part is MessageV2.ToolPart => part.type === "tool")
  537. expect(Exit.isFailure(exit)).toBe(true)
  538. if (Exit.isFailure(exit)) {
  539. expect(Cause.hasInterruptsOnly(exit.cause)).toBe(true)
  540. }
  541. expect(yield* llm.calls).toBe(1)
  542. expect(call?.state.status).toBe("error")
  543. if (call?.state.status === "error") {
  544. expect(call.state.error).toBe("Tool execution aborted")
  545. expect(call.state.time.end).toBeDefined()
  546. }
  547. }),
  548. { git: true, config: (url) => providerCfg(url) },
  549. ),
  550. )
  551. it.live("session.processor effect tests record aborted errors and idle state", () =>
  552. provideTmpdirServer(
  553. ({ dir, llm }) =>
  554. Effect.gen(function* () {
  555. const seen = defer<void>()
  556. const { processors, session, provider } = yield* boot()
  557. const bus = yield* Bus.Service
  558. const sts = yield* SessionStatus.Service
  559. yield* llm.hang
  560. const chat = yield* session.create({})
  561. const parent = yield* user(chat.id, "abort")
  562. const msg = yield* assistant(chat.id, parent.id, path.resolve(dir))
  563. const mdl = yield* provider.getModel(ref.providerID, ref.modelID)
  564. const errs: string[] = []
  565. const off = yield* bus.subscribeCallback(Session.Event.Error, (evt) => {
  566. if (evt.properties.sessionID !== chat.id) return
  567. if (!evt.properties.error) return
  568. errs.push(evt.properties.error.name)
  569. seen.resolve()
  570. })
  571. const handle = yield* processors.create({
  572. assistantMessage: msg,
  573. sessionID: chat.id,
  574. model: mdl,
  575. })
  576. const run = yield* handle
  577. .process({
  578. user: {
  579. id: parent.id,
  580. sessionID: chat.id,
  581. role: "user",
  582. time: parent.time,
  583. agent: parent.agent,
  584. model: { providerID: ref.providerID, modelID: ref.modelID },
  585. } satisfies MessageV2.User,
  586. sessionID: chat.id,
  587. model: mdl,
  588. agent: agent(),
  589. system: [],
  590. messages: [{ role: "user", content: "abort" }],
  591. tools: {},
  592. })
  593. .pipe(Effect.forkChild)
  594. yield* llm.wait(1)
  595. yield* Fiber.interrupt(run)
  596. const exit = yield* Fiber.await(run)
  597. if (Exit.isFailure(exit) && Cause.hasInterruptsOnly(exit.cause)) {
  598. yield* handle.abort()
  599. }
  600. yield* Effect.promise(() => seen.promise)
  601. const stored = yield* Effect.promise(() => MessageV2.get({ sessionID: chat.id, messageID: msg.id }))
  602. const state = yield* sts.get(chat.id)
  603. off()
  604. expect(Exit.isFailure(exit)).toBe(true)
  605. if (Exit.isFailure(exit)) {
  606. expect(Cause.hasInterruptsOnly(exit.cause)).toBe(true)
  607. }
  608. expect(handle.message.error?.name).toBe("MessageAbortedError")
  609. expect(stored.info.role).toBe("assistant")
  610. if (stored.info.role === "assistant") {
  611. expect(stored.info.error?.name).toBe("MessageAbortedError")
  612. }
  613. expect(state).toMatchObject({ type: "idle" })
  614. expect(errs).toContain("MessageAbortedError")
  615. }),
  616. { git: true, config: (url) => providerCfg(url) },
  617. ),
  618. )
  619. it.live("session.processor effect tests mark interruptions aborted without manual abort", () =>
  620. provideTmpdirServer(
  621. ({ dir, llm }) =>
  622. Effect.gen(function* () {
  623. const { processors, session, provider } = yield* boot()
  624. const sts = yield* SessionStatus.Service
  625. yield* llm.hang
  626. const chat = yield* session.create({})
  627. const parent = yield* user(chat.id, "interrupt")
  628. const msg = yield* assistant(chat.id, parent.id, path.resolve(dir))
  629. const mdl = yield* provider.getModel(ref.providerID, ref.modelID)
  630. const handle = yield* processors.create({
  631. assistantMessage: msg,
  632. sessionID: chat.id,
  633. model: mdl,
  634. })
  635. const run = yield* handle
  636. .process({
  637. user: {
  638. id: parent.id,
  639. sessionID: chat.id,
  640. role: "user",
  641. time: parent.time,
  642. agent: parent.agent,
  643. model: { providerID: ref.providerID, modelID: ref.modelID },
  644. } satisfies MessageV2.User,
  645. sessionID: chat.id,
  646. model: mdl,
  647. agent: agent(),
  648. system: [],
  649. messages: [{ role: "user", content: "interrupt" }],
  650. tools: {},
  651. })
  652. .pipe(Effect.forkChild)
  653. yield* llm.wait(1)
  654. yield* Fiber.interrupt(run)
  655. const exit = yield* Fiber.await(run)
  656. const stored = yield* Effect.promise(() => MessageV2.get({ sessionID: chat.id, messageID: msg.id }))
  657. const state = yield* sts.get(chat.id)
  658. expect(Exit.isFailure(exit)).toBe(true)
  659. expect(handle.message.error?.name).toBe("MessageAbortedError")
  660. expect(stored.info.role).toBe("assistant")
  661. if (stored.info.role === "assistant") {
  662. expect(stored.info.error?.name).toBe("MessageAbortedError")
  663. }
  664. expect(state).toMatchObject({ type: "idle" })
  665. }),
  666. { git: true, config: (url) => providerCfg(url) },
  667. ),
  668. )