Task.ts 170 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752
  1. import * as path from "path"
  2. import * as vscode from "vscode"
  3. import os from "os"
  4. import crypto from "crypto"
  5. import { v7 as uuidv7 } from "uuid"
  6. import EventEmitter from "events"
  7. import { AskIgnoredError } from "./AskIgnoredError"
  8. import { Anthropic } from "@anthropic-ai/sdk"
  9. import OpenAI from "openai"
  10. import debounce from "lodash.debounce"
  11. import delay from "delay"
  12. import pWaitFor from "p-wait-for"
  13. import { serializeError } from "serialize-error"
  14. import { Package } from "../../shared/package"
  15. import { formatToolInvocation } from "../tools/helpers/toolResultFormatting"
  16. import {
  17. type TaskLike,
  18. type TaskMetadata,
  19. type TaskEvents,
  20. type ProviderSettings,
  21. type TokenUsage,
  22. type ToolUsage,
  23. type ToolName,
  24. type ContextCondense,
  25. type ContextTruncation,
  26. type ClineMessage,
  27. type ClineSay,
  28. type ClineAsk,
  29. type ToolProgressStatus,
  30. type HistoryItem,
  31. type CreateTaskOptions,
  32. type ModelInfo,
  33. type ClineApiReqCancelReason,
  34. type ClineApiReqInfo,
  35. RooCodeEventName,
  36. TelemetryEventName,
  37. TaskStatus,
  38. TodoItem,
  39. getApiProtocol,
  40. getModelId,
  41. isRetiredProvider,
  42. isIdleAsk,
  43. isInteractiveAsk,
  44. isResumableAsk,
  45. QueuedMessage,
  46. DEFAULT_CONSECUTIVE_MISTAKE_LIMIT,
  47. DEFAULT_CHECKPOINT_TIMEOUT_SECONDS,
  48. MAX_CHECKPOINT_TIMEOUT_SECONDS,
  49. MIN_CHECKPOINT_TIMEOUT_SECONDS,
  50. ConsecutiveMistakeError,
  51. MAX_MCP_TOOLS_THRESHOLD,
  52. countEnabledMcpTools,
  53. } from "@roo-code/types"
  54. import { TelemetryService } from "@roo-code/telemetry"
  55. import { CloudService, BridgeOrchestrator } from "@roo-code/cloud"
  56. // api
  57. import { ApiHandler, ApiHandlerCreateMessageMetadata, buildApiHandler } from "../../api"
  58. import { ApiStream, GroundingSource } from "../../api/transform/stream"
  59. import { maybeRemoveImageBlocks } from "../../api/transform/image-cleaning"
  60. // shared
  61. import { findLastIndex } from "../../shared/array"
  62. import { combineApiRequests } from "../../shared/combineApiRequests"
  63. import { combineCommandSequences } from "../../shared/combineCommandSequences"
  64. import { t } from "../../i18n"
  65. import { getApiMetrics, hasTokenUsageChanged, hasToolUsageChanged } from "../../shared/getApiMetrics"
  66. import { ClineAskResponse } from "../../shared/WebviewMessage"
  67. import { defaultModeSlug, getModeBySlug } from "../../shared/modes"
  68. import { DiffStrategy, type ToolUse, type ToolParamName, toolParamNames } from "../../shared/tools"
  69. import { getModelMaxOutputTokens } from "../../shared/api"
  70. // services
  71. import { McpHub } from "../../services/mcp/McpHub"
  72. import { McpServerManager } from "../../services/mcp/McpServerManager"
  73. import { RepoPerTaskCheckpointService } from "../../services/checkpoints"
  74. // integrations
  75. import { DiffViewProvider } from "../../integrations/editor/DiffViewProvider"
  76. import { findToolName } from "../../integrations/misc/export-markdown"
  77. import { RooTerminalProcess } from "../../integrations/terminal/types"
  78. import { TerminalRegistry } from "../../integrations/terminal/TerminalRegistry"
  79. import { OutputInterceptor } from "../../integrations/terminal/OutputInterceptor"
  80. // utils
  81. import { calculateApiCostAnthropic, calculateApiCostOpenAI } from "../../shared/cost"
  82. import { getWorkspacePath } from "../../utils/path"
  83. import { sanitizeToolUseId } from "../../utils/tool-id"
  84. import { getTaskDirectoryPath } from "../../utils/storage"
  85. // prompts
  86. import { formatResponse } from "../prompts/responses"
  87. import { SYSTEM_PROMPT } from "../prompts/system"
  88. import { buildNativeToolsArrayWithRestrictions } from "./build-tools"
  89. // core modules
  90. import { ToolRepetitionDetector } from "../tools/ToolRepetitionDetector"
  91. import { restoreTodoListForTask } from "../tools/UpdateTodoListTool"
  92. import { FileContextTracker } from "../context-tracking/FileContextTracker"
  93. import { RooIgnoreController } from "../ignore/RooIgnoreController"
  94. import { RooProtectedController } from "../protect/RooProtectedController"
  95. import { type AssistantMessageContent, presentAssistantMessage } from "../assistant-message"
  96. import { NativeToolCallParser } from "../assistant-message/NativeToolCallParser"
  97. import { manageContext, willManageContext } from "../context-management"
  98. import { ClineProvider } from "../webview/ClineProvider"
  99. import { MultiSearchReplaceDiffStrategy } from "../diff/strategies/multi-search-replace"
  100. import {
  101. type ApiMessage,
  102. readApiMessages,
  103. saveApiMessages,
  104. readTaskMessages,
  105. saveTaskMessages,
  106. taskMetadata,
  107. } from "../task-persistence"
  108. import { getEnvironmentDetails } from "../environment/getEnvironmentDetails"
  109. import { checkContextWindowExceededError } from "../context/context-management/context-error-handling"
  110. import {
  111. type CheckpointDiffOptions,
  112. type CheckpointRestoreOptions,
  113. getCheckpointService,
  114. checkpointSave,
  115. checkpointRestore,
  116. checkpointDiff,
  117. } from "../checkpoints"
  118. import { processUserContentMentions } from "../mentions/processUserContentMentions"
  119. import { getMessagesSinceLastSummary, summarizeConversation, getEffectiveApiHistory } from "../condense"
  120. import { MessageQueueService } from "../message-queue/MessageQueueService"
  121. import { AutoApprovalHandler, checkAutoApproval } from "../auto-approval"
  122. import { MessageManager } from "../message-manager"
  123. import { validateAndFixToolResultIds } from "./validateToolResultIds"
  124. import { mergeConsecutiveApiMessages } from "./mergeConsecutiveApiMessages"
  125. const MAX_EXPONENTIAL_BACKOFF_SECONDS = 600 // 10 minutes
  126. const DEFAULT_USAGE_COLLECTION_TIMEOUT_MS = 5000 // 5 seconds
  127. const FORCED_CONTEXT_REDUCTION_PERCENT = 75 // Keep 75% of context (remove 25%) on context window errors
  128. const MAX_CONTEXT_WINDOW_RETRIES = 3 // Maximum retries for context window errors
  129. export interface TaskOptions extends CreateTaskOptions {
  130. provider: ClineProvider
  131. apiConfiguration: ProviderSettings
  132. enableCheckpoints?: boolean
  133. checkpointTimeout?: number
  134. enableBridge?: boolean
  135. consecutiveMistakeLimit?: number
  136. task?: string
  137. images?: string[]
  138. historyItem?: HistoryItem
  139. experiments?: Record<string, boolean>
  140. startTask?: boolean
  141. rootTask?: Task
  142. parentTask?: Task
  143. taskNumber?: number
  144. onCreated?: (task: Task) => void
  145. initialTodos?: TodoItem[]
  146. workspacePath?: string
  147. /** Initial status for the task's history item (e.g., "active" for child tasks) */
  148. initialStatus?: "active" | "delegated" | "completed"
  149. }
  150. export class Task extends EventEmitter<TaskEvents> implements TaskLike {
  151. readonly taskId: string
  152. readonly rootTaskId?: string
  153. readonly parentTaskId?: string
  154. childTaskId?: string
  155. pendingNewTaskToolCallId?: string
  156. readonly instanceId: string
  157. readonly metadata: TaskMetadata
  158. todoList?: TodoItem[]
  159. readonly rootTask: Task | undefined = undefined
  160. readonly parentTask: Task | undefined = undefined
  161. readonly taskNumber: number
  162. readonly workspacePath: string
  163. /**
  164. * The mode associated with this task. Persisted across sessions
  165. * to maintain user context when reopening tasks from history.
  166. *
  167. * ## Lifecycle
  168. *
  169. * ### For new tasks:
  170. * 1. Initially `undefined` during construction
  171. * 2. Asynchronously initialized from provider state via `initializeTaskMode()`
  172. * 3. Falls back to `defaultModeSlug` if provider state is unavailable
  173. *
  174. * ### For history items:
  175. * 1. Immediately set from `historyItem.mode` during construction
  176. * 2. Falls back to `defaultModeSlug` if mode is not stored in history
  177. *
  178. * ## Important
  179. * This property should NOT be accessed directly until `taskModeReady` promise resolves.
  180. * Use `getTaskMode()` for async access or `taskMode` getter for sync access after initialization.
  181. *
  182. * @private
  183. * @see {@link getTaskMode} - For safe async access
  184. * @see {@link taskMode} - For sync access after initialization
  185. * @see {@link waitForModeInitialization} - To ensure initialization is complete
  186. */
  187. private _taskMode: string | undefined
  188. /**
  189. * Promise that resolves when the task mode has been initialized.
  190. * This ensures async mode initialization completes before the task is used.
  191. *
  192. * ## Purpose
  193. * - Prevents race conditions when accessing task mode
  194. * - Ensures provider state is properly loaded before mode-dependent operations
  195. * - Provides a synchronization point for async initialization
  196. *
  197. * ## Resolution timing
  198. * - For history items: Resolves immediately (sync initialization)
  199. * - For new tasks: Resolves after provider state is fetched (async initialization)
  200. *
  201. * @private
  202. * @see {@link waitForModeInitialization} - Public method to await this promise
  203. */
  204. private taskModeReady: Promise<void>
  205. /**
  206. * The API configuration name (provider profile) associated with this task.
  207. * Persisted across sessions to maintain the provider profile when reopening tasks from history.
  208. *
  209. * ## Lifecycle
  210. *
  211. * ### For new tasks:
  212. * 1. Initially `undefined` during construction
  213. * 2. Asynchronously initialized from provider state via `initializeTaskApiConfigName()`
  214. * 3. Falls back to "default" if provider state is unavailable
  215. *
  216. * ### For history items:
  217. * 1. Immediately set from `historyItem.apiConfigName` during construction
  218. * 2. Falls back to undefined if not stored in history (for backward compatibility)
  219. *
  220. * ## Important
  221. * If you need a non-`undefined` provider profile (e.g., for profile-dependent operations),
  222. * wait for `taskApiConfigReady` first (or use `getTaskApiConfigName()`).
  223. * The sync `taskApiConfigName` getter may return `undefined` for backward compatibility.
  224. *
  225. * @private
  226. * @see {@link getTaskApiConfigName} - For safe async access
  227. * @see {@link taskApiConfigName} - For sync access after initialization
  228. */
  229. private _taskApiConfigName: string | undefined
  230. /**
  231. * Promise that resolves when the task API config name has been initialized.
  232. * This ensures async API config name initialization completes before the task is used.
  233. *
  234. * ## Purpose
  235. * - Prevents race conditions when accessing task API config name
  236. * - Ensures provider state is properly loaded before profile-dependent operations
  237. * - Provides a synchronization point for async initialization
  238. *
  239. * ## Resolution timing
  240. * - For history items: Resolves immediately (sync initialization)
  241. * - For new tasks: Resolves after provider state is fetched (async initialization)
  242. *
  243. * @private
  244. */
  245. private taskApiConfigReady: Promise<void>
  246. providerRef: WeakRef<ClineProvider>
  247. private readonly globalStoragePath: string
  248. abort: boolean = false
  249. currentRequestAbortController?: AbortController
  250. skipPrevResponseIdOnce: boolean = false
  251. // TaskStatus
  252. idleAsk?: ClineMessage
  253. resumableAsk?: ClineMessage
  254. interactiveAsk?: ClineMessage
  255. didFinishAbortingStream = false
  256. abandoned = false
  257. abortReason?: ClineApiReqCancelReason
  258. isInitialized = false
  259. isPaused: boolean = false
  260. // API
  261. apiConfiguration: ProviderSettings
  262. api: ApiHandler
  263. private static lastGlobalApiRequestTime?: number
  264. private autoApprovalHandler: AutoApprovalHandler
  265. /**
  266. * Reset the global API request timestamp. This should only be used for testing.
  267. * @internal
  268. */
  269. static resetGlobalApiRequestTime(): void {
  270. Task.lastGlobalApiRequestTime = undefined
  271. }
  272. toolRepetitionDetector: ToolRepetitionDetector
  273. rooIgnoreController?: RooIgnoreController
  274. rooProtectedController?: RooProtectedController
  275. fileContextTracker: FileContextTracker
  276. terminalProcess?: RooTerminalProcess
  277. // Editing
  278. diffViewProvider: DiffViewProvider
  279. diffStrategy?: DiffStrategy
  280. didEditFile: boolean = false
  281. // LLM Messages & Chat Messages
  282. apiConversationHistory: ApiMessage[] = []
  283. clineMessages: ClineMessage[] = []
  284. // Ask
  285. private askResponse?: ClineAskResponse
  286. private askResponseText?: string
  287. private askResponseImages?: string[]
  288. public lastMessageTs?: number
  289. private autoApprovalTimeoutRef?: NodeJS.Timeout
  290. // Tool Use
  291. consecutiveMistakeCount: number = 0
  292. consecutiveMistakeLimit: number
  293. consecutiveMistakeCountForApplyDiff: Map<string, number> = new Map()
  294. consecutiveMistakeCountForEditFile: Map<string, number> = new Map()
  295. consecutiveNoToolUseCount: number = 0
  296. consecutiveNoAssistantMessagesCount: number = 0
  297. toolUsage: ToolUsage = {}
  298. // Checkpoints
  299. enableCheckpoints: boolean
  300. checkpointTimeout: number
  301. checkpointService?: RepoPerTaskCheckpointService
  302. checkpointServiceInitializing = false
  303. // Task Bridge
  304. enableBridge: boolean
  305. // Message Queue Service
  306. public readonly messageQueueService: MessageQueueService
  307. private messageQueueStateChangedHandler: (() => void) | undefined
  308. // Streaming
  309. isWaitingForFirstChunk = false
  310. isStreaming = false
  311. currentStreamingContentIndex = 0
  312. currentStreamingDidCheckpoint = false
  313. assistantMessageContent: AssistantMessageContent[] = []
  314. presentAssistantMessageLocked = false
  315. presentAssistantMessageHasPendingUpdates = false
  316. userMessageContent: (Anthropic.TextBlockParam | Anthropic.ImageBlockParam | Anthropic.ToolResultBlockParam)[] = []
  317. userMessageContentReady = false
  318. /**
  319. * Flag indicating whether the assistant message for the current streaming session
  320. * has been saved to API conversation history.
  321. *
  322. * This is critical for parallel tool calling: tools should NOT execute until
  323. * the assistant message is saved. Otherwise, if a tool like `new_task` triggers
  324. * `flushPendingToolResultsToHistory()`, the user message with tool_results would
  325. * appear BEFORE the assistant message with tool_uses, causing API errors.
  326. *
  327. * Reset to `false` at the start of each API request.
  328. * Set to `true` after the assistant message is saved in `recursivelyMakeClineRequests`.
  329. */
  330. assistantMessageSavedToHistory = false
  331. /**
  332. * Push a tool_result block to userMessageContent, preventing duplicates.
  333. * Duplicate tool_use_ids cause API errors.
  334. *
  335. * @param toolResult - The tool_result block to add
  336. * @returns true if added, false if duplicate was skipped
  337. */
  338. public pushToolResultToUserContent(toolResult: Anthropic.ToolResultBlockParam): boolean {
  339. const existingResult = this.userMessageContent.find(
  340. (block): block is Anthropic.ToolResultBlockParam =>
  341. block.type === "tool_result" && block.tool_use_id === toolResult.tool_use_id,
  342. )
  343. if (existingResult) {
  344. console.warn(
  345. `[Task#pushToolResultToUserContent] Skipping duplicate tool_result for tool_use_id: ${toolResult.tool_use_id}`,
  346. )
  347. return false
  348. }
  349. this.userMessageContent.push(toolResult)
  350. return true
  351. }
  352. didRejectTool = false
  353. didAlreadyUseTool = false
  354. didToolFailInCurrentTurn = false
  355. didCompleteReadingStream = false
  356. private _started = false
  357. // No streaming parser is required.
  358. assistantMessageParser?: undefined
  359. private providerProfileChangeListener?: (config: { name: string; provider?: string }) => void
  360. // Native tool call streaming state (track which index each tool is at)
  361. private streamingToolCallIndices: Map<string, number> = new Map()
  362. // Cached model info for current streaming session (set at start of each API request)
  363. // This prevents excessive getModel() calls during tool execution
  364. cachedStreamingModel?: { id: string; info: ModelInfo }
  365. // Token Usage Cache
  366. private tokenUsageSnapshot?: TokenUsage
  367. private tokenUsageSnapshotAt?: number
  368. // Tool Usage Cache
  369. private toolUsageSnapshot?: ToolUsage
  370. // Token Usage Throttling - Debounced emit function
  371. private readonly TOKEN_USAGE_EMIT_INTERVAL_MS = 2000 // 2 seconds
  372. private debouncedEmitTokenUsage: ReturnType<typeof debounce>
  373. // Cloud Sync Tracking
  374. private cloudSyncedMessageTimestamps: Set<number> = new Set()
  375. // Initial status for the task's history item (set at creation time to avoid race conditions)
  376. private readonly initialStatus?: "active" | "delegated" | "completed"
  377. // MessageManager for high-level message operations (lazy initialized)
  378. private _messageManager?: MessageManager
  379. constructor({
  380. provider,
  381. apiConfiguration,
  382. enableCheckpoints = true,
  383. checkpointTimeout = DEFAULT_CHECKPOINT_TIMEOUT_SECONDS,
  384. enableBridge = false,
  385. consecutiveMistakeLimit = DEFAULT_CONSECUTIVE_MISTAKE_LIMIT,
  386. task,
  387. images,
  388. historyItem,
  389. experiments: experimentsConfig,
  390. startTask = true,
  391. rootTask,
  392. parentTask,
  393. taskNumber = -1,
  394. onCreated,
  395. initialTodos,
  396. workspacePath,
  397. initialStatus,
  398. }: TaskOptions) {
  399. super()
  400. if (startTask && !task && !images && !historyItem) {
  401. throw new Error("Either historyItem or task/images must be provided")
  402. }
  403. if (
  404. !checkpointTimeout ||
  405. checkpointTimeout > MAX_CHECKPOINT_TIMEOUT_SECONDS ||
  406. checkpointTimeout < MIN_CHECKPOINT_TIMEOUT_SECONDS
  407. ) {
  408. throw new Error(
  409. "checkpointTimeout must be between " +
  410. MIN_CHECKPOINT_TIMEOUT_SECONDS +
  411. " and " +
  412. MAX_CHECKPOINT_TIMEOUT_SECONDS +
  413. " seconds",
  414. )
  415. }
  416. this.taskId = historyItem ? historyItem.id : uuidv7()
  417. this.rootTaskId = historyItem ? historyItem.rootTaskId : rootTask?.taskId
  418. this.parentTaskId = historyItem ? historyItem.parentTaskId : parentTask?.taskId
  419. this.childTaskId = undefined
  420. this.metadata = {
  421. task: historyItem ? historyItem.task : task,
  422. images: historyItem ? [] : images,
  423. }
  424. // Normal use-case is usually retry similar history task with new workspace.
  425. this.workspacePath = parentTask
  426. ? parentTask.workspacePath
  427. : (workspacePath ?? getWorkspacePath(path.join(os.homedir(), "Desktop")))
  428. this.instanceId = crypto.randomUUID().slice(0, 8)
  429. this.taskNumber = -1
  430. this.rooIgnoreController = new RooIgnoreController(this.cwd)
  431. this.rooProtectedController = new RooProtectedController(this.cwd)
  432. this.fileContextTracker = new FileContextTracker(provider, this.taskId)
  433. this.rooIgnoreController.initialize().catch((error) => {
  434. console.error("Failed to initialize RooIgnoreController:", error)
  435. })
  436. this.apiConfiguration = apiConfiguration
  437. this.api = buildApiHandler(this.apiConfiguration)
  438. this.autoApprovalHandler = new AutoApprovalHandler()
  439. this.consecutiveMistakeLimit = consecutiveMistakeLimit ?? DEFAULT_CONSECUTIVE_MISTAKE_LIMIT
  440. this.providerRef = new WeakRef(provider)
  441. this.globalStoragePath = provider.context.globalStorageUri.fsPath
  442. this.diffViewProvider = new DiffViewProvider(this.cwd, this)
  443. this.enableCheckpoints = enableCheckpoints
  444. this.checkpointTimeout = checkpointTimeout
  445. this.enableBridge = enableBridge
  446. this.parentTask = parentTask
  447. this.taskNumber = taskNumber
  448. this.initialStatus = initialStatus
  449. // Store the task's mode and API config name when it's created.
  450. // For history items, use the stored values; for new tasks, we'll set them
  451. // after getting state.
  452. if (historyItem) {
  453. this._taskMode = historyItem.mode || defaultModeSlug
  454. this._taskApiConfigName = historyItem.apiConfigName
  455. this.taskModeReady = Promise.resolve()
  456. this.taskApiConfigReady = Promise.resolve()
  457. TelemetryService.instance.captureTaskRestarted(this.taskId)
  458. } else {
  459. // For new tasks, don't set the mode/apiConfigName yet - wait for async initialization.
  460. this._taskMode = undefined
  461. this._taskApiConfigName = undefined
  462. this.taskModeReady = this.initializeTaskMode(provider)
  463. this.taskApiConfigReady = this.initializeTaskApiConfigName(provider)
  464. TelemetryService.instance.captureTaskCreated(this.taskId)
  465. }
  466. this.assistantMessageParser = undefined
  467. this.messageQueueService = new MessageQueueService()
  468. this.messageQueueStateChangedHandler = () => {
  469. this.emit(RooCodeEventName.TaskUserMessage, this.taskId)
  470. this.emit(RooCodeEventName.QueuedMessagesUpdated, this.taskId, this.messageQueueService.messages)
  471. this.providerRef.deref()?.postStateToWebviewWithoutTaskHistory()
  472. }
  473. this.messageQueueService.on("stateChanged", this.messageQueueStateChangedHandler)
  474. // Listen for provider profile changes to update parser state
  475. this.setupProviderProfileChangeListener(provider)
  476. // Set up diff strategy
  477. this.diffStrategy = new MultiSearchReplaceDiffStrategy()
  478. this.toolRepetitionDetector = new ToolRepetitionDetector(this.consecutiveMistakeLimit)
  479. // Initialize todo list if provided
  480. if (initialTodos && initialTodos.length > 0) {
  481. this.todoList = initialTodos
  482. }
  483. // Initialize debounced token usage emit function
  484. // Uses debounce with maxWait to achieve throttle-like behavior:
  485. // - leading: true - Emit immediately on first call
  486. // - trailing: true - Emit final state when updates stop
  487. // - maxWait - Ensures at most one emit per interval during rapid updates (throttle behavior)
  488. this.debouncedEmitTokenUsage = debounce(
  489. (tokenUsage: TokenUsage, toolUsage: ToolUsage) => {
  490. const tokenChanged = hasTokenUsageChanged(tokenUsage, this.tokenUsageSnapshot)
  491. const toolChanged = hasToolUsageChanged(toolUsage, this.toolUsageSnapshot)
  492. if (tokenChanged || toolChanged) {
  493. this.emit(RooCodeEventName.TaskTokenUsageUpdated, this.taskId, tokenUsage, toolUsage)
  494. this.tokenUsageSnapshot = tokenUsage
  495. this.tokenUsageSnapshotAt = this.clineMessages.at(-1)?.ts
  496. // Deep copy tool usage for snapshot
  497. this.toolUsageSnapshot = JSON.parse(JSON.stringify(toolUsage))
  498. }
  499. },
  500. this.TOKEN_USAGE_EMIT_INTERVAL_MS,
  501. { leading: true, trailing: true, maxWait: this.TOKEN_USAGE_EMIT_INTERVAL_MS },
  502. )
  503. onCreated?.(this)
  504. if (startTask) {
  505. this._started = true
  506. if (task || images) {
  507. this.startTask(task, images)
  508. } else if (historyItem) {
  509. this.resumeTaskFromHistory()
  510. } else {
  511. throw new Error("Either historyItem or task/images must be provided")
  512. }
  513. }
  514. }
  515. /**
  516. * Initialize the task mode from the provider state.
  517. * This method handles async initialization with proper error handling.
  518. *
  519. * ## Flow
  520. * 1. Attempts to fetch the current mode from provider state
  521. * 2. Sets `_taskMode` to the fetched mode or `defaultModeSlug` if unavailable
  522. * 3. Handles errors gracefully by falling back to default mode
  523. * 4. Logs any initialization errors for debugging
  524. *
  525. * ## Error handling
  526. * - Network failures when fetching provider state
  527. * - Provider not yet initialized
  528. * - Invalid state structure
  529. *
  530. * All errors result in fallback to `defaultModeSlug` to ensure task can proceed.
  531. *
  532. * @private
  533. * @param provider - The ClineProvider instance to fetch state from
  534. * @returns Promise that resolves when initialization is complete
  535. */
  536. private async initializeTaskMode(provider: ClineProvider): Promise<void> {
  537. try {
  538. const state = await provider.getState()
  539. this._taskMode = state?.mode || defaultModeSlug
  540. } catch (error) {
  541. // If there's an error getting state, use the default mode
  542. this._taskMode = defaultModeSlug
  543. // Use the provider's log method for better error visibility
  544. const errorMessage = `Failed to initialize task mode: ${error instanceof Error ? error.message : String(error)}`
  545. provider.log(errorMessage)
  546. }
  547. }
  548. /**
  549. * Initialize the task API config name from the provider state.
  550. * This method handles async initialization with proper error handling.
  551. *
  552. * ## Flow
  553. * 1. Attempts to fetch the current API config name from provider state
  554. * 2. Sets `_taskApiConfigName` to the fetched name or "default" if unavailable
  555. * 3. Handles errors gracefully by falling back to "default"
  556. * 4. Logs any initialization errors for debugging
  557. *
  558. * ## Error handling
  559. * - Network failures when fetching provider state
  560. * - Provider not yet initialized
  561. * - Invalid state structure
  562. *
  563. * All errors result in fallback to "default" to ensure task can proceed.
  564. *
  565. * @private
  566. * @param provider - The ClineProvider instance to fetch state from
  567. * @returns Promise that resolves when initialization is complete
  568. */
  569. private async initializeTaskApiConfigName(provider: ClineProvider): Promise<void> {
  570. try {
  571. const state = await provider.getState()
  572. // Avoid clobbering a newer value that may have been set while awaiting provider state
  573. // (e.g., user switches provider profile immediately after task creation).
  574. if (this._taskApiConfigName === undefined) {
  575. this._taskApiConfigName = state?.currentApiConfigName ?? "default"
  576. }
  577. } catch (error) {
  578. // If there's an error getting state, use the default profile (unless a newer value was set).
  579. if (this._taskApiConfigName === undefined) {
  580. this._taskApiConfigName = "default"
  581. }
  582. // Use the provider's log method for better error visibility
  583. const errorMessage = `Failed to initialize task API config name: ${error instanceof Error ? error.message : String(error)}`
  584. provider.log(errorMessage)
  585. }
  586. }
  587. /**
  588. * Sets up a listener for provider profile changes.
  589. *
  590. * @private
  591. * @param provider - The ClineProvider instance to listen to
  592. */
  593. private setupProviderProfileChangeListener(provider: ClineProvider): void {
  594. // Only set up listener if provider has the on method (may not exist in test mocks)
  595. if (typeof provider.on !== "function") {
  596. return
  597. }
  598. this.providerProfileChangeListener = async () => {
  599. try {
  600. const newState = await provider.getState()
  601. if (newState?.apiConfiguration) {
  602. this.updateApiConfiguration(newState.apiConfiguration)
  603. }
  604. } catch (error) {
  605. console.error(
  606. `[Task#${this.taskId}.${this.instanceId}] Failed to update API configuration on profile change:`,
  607. error,
  608. )
  609. }
  610. }
  611. provider.on(RooCodeEventName.ProviderProfileChanged, this.providerProfileChangeListener)
  612. }
  613. /**
  614. * Wait for the task mode to be initialized before proceeding.
  615. * This method ensures that any operations depending on the task mode
  616. * will have access to the correct mode value.
  617. *
  618. * ## When to use
  619. * - Before accessing mode-specific configurations
  620. * - When switching between tasks with different modes
  621. * - Before operations that depend on mode-based permissions
  622. *
  623. * ## Example usage
  624. * ```typescript
  625. * // Wait for mode initialization before mode-dependent operations
  626. * await task.waitForModeInitialization();
  627. * const mode = task.taskMode; // Now safe to access synchronously
  628. *
  629. * // Or use with getTaskMode() for a one-liner
  630. * const mode = await task.getTaskMode(); // Internally waits for initialization
  631. * ```
  632. *
  633. * @returns Promise that resolves when the task mode is initialized
  634. * @public
  635. */
  636. public async waitForModeInitialization(): Promise<void> {
  637. return this.taskModeReady
  638. }
  639. /**
  640. * Get the task mode asynchronously, ensuring it's properly initialized.
  641. * This is the recommended way to access the task mode as it guarantees
  642. * the mode is available before returning.
  643. *
  644. * ## Async behavior
  645. * - Internally waits for `taskModeReady` promise to resolve
  646. * - Returns the initialized mode or `defaultModeSlug` as fallback
  647. * - Safe to call multiple times - subsequent calls return immediately if already initialized
  648. *
  649. * ## Example usage
  650. * ```typescript
  651. * // Safe async access
  652. * const mode = await task.getTaskMode();
  653. * console.log(`Task is running in ${mode} mode`);
  654. *
  655. * // Use in conditional logic
  656. * if (await task.getTaskMode() === 'architect') {
  657. * // Perform architect-specific operations
  658. * }
  659. * ```
  660. *
  661. * @returns Promise resolving to the task mode string
  662. * @public
  663. */
  664. public async getTaskMode(): Promise<string> {
  665. await this.taskModeReady
  666. return this._taskMode || defaultModeSlug
  667. }
  668. /**
  669. * Get the task mode synchronously. This should only be used when you're certain
  670. * that the mode has already been initialized (e.g., after waitForModeInitialization).
  671. *
  672. * ## When to use
  673. * - In synchronous contexts where async/await is not available
  674. * - After explicitly waiting for initialization via `waitForModeInitialization()`
  675. * - In event handlers or callbacks where mode is guaranteed to be initialized
  676. *
  677. * ## Example usage
  678. * ```typescript
  679. * // After ensuring initialization
  680. * await task.waitForModeInitialization();
  681. * const mode = task.taskMode; // Safe synchronous access
  682. *
  683. * // In an event handler after task is started
  684. * task.on('taskStarted', () => {
  685. * console.log(`Task started in ${task.taskMode} mode`); // Safe here
  686. * });
  687. * ```
  688. *
  689. * @throws {Error} If the mode hasn't been initialized yet
  690. * @returns The task mode string
  691. * @public
  692. */
  693. public get taskMode(): string {
  694. if (this._taskMode === undefined) {
  695. throw new Error("Task mode accessed before initialization. Use getTaskMode() or wait for taskModeReady.")
  696. }
  697. return this._taskMode
  698. }
  699. /**
  700. * Wait for the task API config name to be initialized before proceeding.
  701. * This method ensures that any operations depending on the task's provider profile
  702. * will have access to the correct value.
  703. *
  704. * ## When to use
  705. * - Before accessing provider profile-specific configurations
  706. * - When switching between tasks with different provider profiles
  707. * - Before operations that depend on the provider profile
  708. *
  709. * @returns Promise that resolves when the task API config name is initialized
  710. * @public
  711. */
  712. public async waitForApiConfigInitialization(): Promise<void> {
  713. return this.taskApiConfigReady
  714. }
  715. /**
  716. * Get the task API config name asynchronously, ensuring it's properly initialized.
  717. * This is the recommended way to access the task's provider profile as it guarantees
  718. * the value is available before returning.
  719. *
  720. * ## Async behavior
  721. * - Internally waits for `taskApiConfigReady` promise to resolve
  722. * - Returns the initialized API config name or undefined as fallback
  723. * - Safe to call multiple times - subsequent calls return immediately if already initialized
  724. *
  725. * @returns Promise resolving to the task API config name string or undefined
  726. * @public
  727. */
  728. public async getTaskApiConfigName(): Promise<string | undefined> {
  729. await this.taskApiConfigReady
  730. return this._taskApiConfigName
  731. }
  732. /**
  733. * Get the task API config name synchronously. This should only be used when you're certain
  734. * that the value has already been initialized (e.g., after waitForApiConfigInitialization).
  735. *
  736. * ## When to use
  737. * - In synchronous contexts where async/await is not available
  738. * - After explicitly waiting for initialization via `waitForApiConfigInitialization()`
  739. * - In event handlers or callbacks where API config name is guaranteed to be initialized
  740. *
  741. * Note: Unlike taskMode, this getter does not throw if uninitialized since the API config
  742. * name can legitimately be undefined (backward compatibility with tasks created before
  743. * this feature was added).
  744. *
  745. * @returns The task API config name string or undefined
  746. * @public
  747. */
  748. public get taskApiConfigName(): string | undefined {
  749. return this._taskApiConfigName
  750. }
  751. /**
  752. * Update the task's API config name. This is called when the user switches
  753. * provider profiles while a task is active, allowing the task to remember
  754. * its new provider profile.
  755. *
  756. * @param apiConfigName - The new API config name to set
  757. * @internal
  758. */
  759. public setTaskApiConfigName(apiConfigName: string | undefined): void {
  760. this._taskApiConfigName = apiConfigName
  761. }
  762. static create(options: TaskOptions): [Task, Promise<void>] {
  763. const instance = new Task({ ...options, startTask: false })
  764. const { images, task, historyItem } = options
  765. let promise
  766. if (images || task) {
  767. promise = instance.startTask(task, images)
  768. } else if (historyItem) {
  769. promise = instance.resumeTaskFromHistory()
  770. } else {
  771. throw new Error("Either historyItem or task/images must be provided")
  772. }
  773. return [instance, promise]
  774. }
  775. // API Messages
  776. private async getSavedApiConversationHistory(): Promise<ApiMessage[]> {
  777. return readApiMessages({ taskId: this.taskId, globalStoragePath: this.globalStoragePath })
  778. }
  779. private async addToApiConversationHistory(message: Anthropic.MessageParam, reasoning?: string) {
  780. // Capture the encrypted_content / thought signatures from the provider (e.g., OpenAI Responses API, Google GenAI) if present.
  781. // We only persist data reported by the current response body.
  782. const handler = this.api as ApiHandler & {
  783. getResponseId?: () => string | undefined
  784. getEncryptedContent?: () => { encrypted_content: string; id?: string } | undefined
  785. getThoughtSignature?: () => string | undefined
  786. getSummary?: () => any[] | undefined
  787. getReasoningDetails?: () => any[] | undefined
  788. }
  789. if (message.role === "assistant") {
  790. const responseId = handler.getResponseId?.()
  791. const reasoningData = handler.getEncryptedContent?.()
  792. const thoughtSignature = handler.getThoughtSignature?.()
  793. const reasoningSummary = handler.getSummary?.()
  794. const reasoningDetails = handler.getReasoningDetails?.()
  795. // Only Anthropic's API expects/validates the special `thinking` content block signature.
  796. // Other providers (notably Gemini 3) use different signature semantics (e.g. `thoughtSignature`)
  797. // and require round-tripping the signature in their own format.
  798. const modelId = getModelId(this.apiConfiguration)
  799. const apiProvider = this.apiConfiguration.apiProvider
  800. const apiProtocol = getApiProtocol(
  801. apiProvider && !isRetiredProvider(apiProvider) ? apiProvider : undefined,
  802. modelId,
  803. )
  804. const isAnthropicProtocol = apiProtocol === "anthropic"
  805. // Start from the original assistant message
  806. const messageWithTs: any = {
  807. ...message,
  808. ...(responseId ? { id: responseId } : {}),
  809. ts: Date.now(),
  810. }
  811. // Store reasoning_details array if present (for models like Gemini 3)
  812. if (reasoningDetails) {
  813. messageWithTs.reasoning_details = reasoningDetails
  814. }
  815. // Store reasoning: Anthropic thinking (with signature), plain text (most providers), or encrypted (OpenAI Native)
  816. // Skip if reasoning_details already contains the reasoning (to avoid duplication)
  817. if (isAnthropicProtocol && reasoning && thoughtSignature && !reasoningDetails) {
  818. // Anthropic provider with extended thinking: Store as proper `thinking` block
  819. // This format passes through anthropic-filter.ts and is properly round-tripped
  820. // for interleaved thinking with tool use (required by Anthropic API)
  821. const thinkingBlock = {
  822. type: "thinking",
  823. thinking: reasoning,
  824. signature: thoughtSignature,
  825. }
  826. if (typeof messageWithTs.content === "string") {
  827. messageWithTs.content = [
  828. thinkingBlock,
  829. { type: "text", text: messageWithTs.content } satisfies Anthropic.Messages.TextBlockParam,
  830. ]
  831. } else if (Array.isArray(messageWithTs.content)) {
  832. messageWithTs.content = [thinkingBlock, ...messageWithTs.content]
  833. } else if (!messageWithTs.content) {
  834. messageWithTs.content = [thinkingBlock]
  835. }
  836. } else if (reasoning && !reasoningDetails) {
  837. // Other providers (non-Anthropic): Store as generic reasoning block
  838. const reasoningBlock = {
  839. type: "reasoning",
  840. text: reasoning,
  841. summary: reasoningSummary ?? ([] as any[]),
  842. }
  843. if (typeof messageWithTs.content === "string") {
  844. messageWithTs.content = [
  845. reasoningBlock,
  846. { type: "text", text: messageWithTs.content } satisfies Anthropic.Messages.TextBlockParam,
  847. ]
  848. } else if (Array.isArray(messageWithTs.content)) {
  849. messageWithTs.content = [reasoningBlock, ...messageWithTs.content]
  850. } else if (!messageWithTs.content) {
  851. messageWithTs.content = [reasoningBlock]
  852. }
  853. } else if (reasoningData?.encrypted_content) {
  854. // OpenAI Native encrypted reasoning
  855. const reasoningBlock = {
  856. type: "reasoning",
  857. summary: [] as any[],
  858. encrypted_content: reasoningData.encrypted_content,
  859. ...(reasoningData.id ? { id: reasoningData.id } : {}),
  860. }
  861. if (typeof messageWithTs.content === "string") {
  862. messageWithTs.content = [
  863. reasoningBlock,
  864. { type: "text", text: messageWithTs.content } satisfies Anthropic.Messages.TextBlockParam,
  865. ]
  866. } else if (Array.isArray(messageWithTs.content)) {
  867. messageWithTs.content = [reasoningBlock, ...messageWithTs.content]
  868. } else if (!messageWithTs.content) {
  869. messageWithTs.content = [reasoningBlock]
  870. }
  871. }
  872. // For non-Anthropic providers (e.g., Gemini 3), persist the thought signature as its own
  873. // content block so converters can attach it back to the correct provider-specific fields.
  874. // Note: For Anthropic extended thinking, the signature is already included in the thinking block above.
  875. if (thoughtSignature && !isAnthropicProtocol) {
  876. const thoughtSignatureBlock = {
  877. type: "thoughtSignature",
  878. thoughtSignature,
  879. }
  880. if (typeof messageWithTs.content === "string") {
  881. messageWithTs.content = [
  882. { type: "text", text: messageWithTs.content } satisfies Anthropic.Messages.TextBlockParam,
  883. thoughtSignatureBlock,
  884. ]
  885. } else if (Array.isArray(messageWithTs.content)) {
  886. messageWithTs.content = [...messageWithTs.content, thoughtSignatureBlock]
  887. } else if (!messageWithTs.content) {
  888. messageWithTs.content = [thoughtSignatureBlock]
  889. }
  890. }
  891. this.apiConversationHistory.push(messageWithTs)
  892. } else {
  893. // For user messages, validate tool_result IDs ONLY when the immediately previous *effective* message
  894. // is an assistant message.
  895. //
  896. // If the previous effective message is also a user message (e.g., summary + a new user message),
  897. // validating against any earlier assistant message can incorrectly inject placeholder tool_results.
  898. const effectiveHistoryForValidation = getEffectiveApiHistory(this.apiConversationHistory)
  899. const lastEffective = effectiveHistoryForValidation[effectiveHistoryForValidation.length - 1]
  900. const historyForValidation = lastEffective?.role === "assistant" ? effectiveHistoryForValidation : []
  901. // If the previous effective message is NOT an assistant, convert tool_result blocks to text blocks.
  902. // This prevents orphaned tool_results from being filtered out by getEffectiveApiHistory.
  903. // This can happen when condensing occurs after the assistant sends tool_uses but before
  904. // the user responds - the tool_use blocks get condensed away, leaving orphaned tool_results.
  905. let messageToAdd = message
  906. if (lastEffective?.role !== "assistant" && Array.isArray(message.content)) {
  907. messageToAdd = {
  908. ...message,
  909. content: message.content.map((block) =>
  910. block.type === "tool_result"
  911. ? {
  912. type: "text" as const,
  913. text: `Tool result:\n${typeof block.content === "string" ? block.content : JSON.stringify(block.content)}`,
  914. }
  915. : block,
  916. ),
  917. }
  918. }
  919. const validatedMessage = validateAndFixToolResultIds(messageToAdd, historyForValidation)
  920. const messageWithTs = { ...validatedMessage, ts: Date.now() }
  921. this.apiConversationHistory.push(messageWithTs)
  922. }
  923. await this.saveApiConversationHistory()
  924. }
  925. // NOTE: We intentionally do NOT mutate stored messages to merge consecutive user turns.
  926. // For API requests, consecutive same-role messages are merged via mergeConsecutiveApiMessages()
  927. // so rewind/edit behavior can still reference original message boundaries.
  928. async overwriteApiConversationHistory(newHistory: ApiMessage[]) {
  929. this.apiConversationHistory = newHistory
  930. await this.saveApiConversationHistory()
  931. }
  932. /**
  933. * Flush any pending tool results to the API conversation history.
  934. *
  935. * This is critical when the task is about to be
  936. * delegated (e.g., via new_task). Before delegation, if other tools were
  937. * called in the same turn before new_task, their tool_result blocks are
  938. * accumulated in `userMessageContent` but haven't been saved to the API
  939. * history yet. If we don't flush them before the parent is disposed,
  940. * the API conversation will be incomplete and cause 400 errors when
  941. * the parent resumes (missing tool_result for tool_use blocks).
  942. *
  943. * NOTE: The assistant message is typically already in history by the time
  944. * tools execute (added in recursivelyMakeClineRequests after streaming completes).
  945. * So we usually only need to flush the pending user message with tool_results.
  946. */
  947. public async flushPendingToolResultsToHistory(): Promise<boolean> {
  948. // Only flush if there's actually pending content to save
  949. if (this.userMessageContent.length === 0) {
  950. return true
  951. }
  952. // CRITICAL: Wait for the assistant message to be saved to API history first.
  953. // Without this, tool_result blocks would appear BEFORE tool_use blocks in the
  954. // conversation history, causing API errors like:
  955. // "unexpected `tool_use_id` found in `tool_result` blocks"
  956. //
  957. // This can happen when parallel tools are called (e.g., update_todo_list + new_task).
  958. // Tools execute during streaming via presentAssistantMessage, BEFORE the assistant
  959. // message is saved. When new_task triggers delegation, it calls this method to
  960. // flush pending results - but the assistant message hasn't been saved yet.
  961. //
  962. // The assistantMessageSavedToHistory flag is:
  963. // - Reset to false at the start of each API request
  964. // - Set to true after the assistant message is saved in recursivelyMakeClineRequests
  965. if (!this.assistantMessageSavedToHistory) {
  966. await pWaitFor(() => this.assistantMessageSavedToHistory || this.abort, {
  967. interval: 50,
  968. timeout: 30_000, // 30 second timeout as safety net
  969. }).catch(() => {
  970. // If timeout or abort, log and proceed anyway to avoid hanging
  971. console.warn(
  972. `[Task#${this.taskId}] flushPendingToolResultsToHistory: timed out waiting for assistant message to be saved`,
  973. )
  974. })
  975. }
  976. // If task was aborted while waiting, don't flush
  977. if (this.abort) {
  978. return false
  979. }
  980. // Save the user message with tool_result blocks
  981. const userMessage: Anthropic.MessageParam = {
  982. role: "user",
  983. content: this.userMessageContent,
  984. }
  985. // Validate and fix tool_result IDs when the previous *effective* message is an assistant message.
  986. const effectiveHistoryForValidation = getEffectiveApiHistory(this.apiConversationHistory)
  987. const lastEffective = effectiveHistoryForValidation[effectiveHistoryForValidation.length - 1]
  988. const historyForValidation = lastEffective?.role === "assistant" ? effectiveHistoryForValidation : []
  989. const validatedMessage = validateAndFixToolResultIds(userMessage, historyForValidation)
  990. const userMessageWithTs = { ...validatedMessage, ts: Date.now() }
  991. this.apiConversationHistory.push(userMessageWithTs as ApiMessage)
  992. const saved = await this.saveApiConversationHistory()
  993. if (saved) {
  994. // Clear the pending content since it's now saved
  995. this.userMessageContent = []
  996. } else {
  997. console.warn(
  998. `[Task#${this.taskId}] flushPendingToolResultsToHistory: save failed, retaining pending tool results in memory`,
  999. )
  1000. }
  1001. return saved
  1002. }
  1003. private async saveApiConversationHistory(): Promise<boolean> {
  1004. try {
  1005. await saveApiMessages({
  1006. messages: structuredClone(this.apiConversationHistory),
  1007. taskId: this.taskId,
  1008. globalStoragePath: this.globalStoragePath,
  1009. })
  1010. return true
  1011. } catch (error) {
  1012. console.error("Failed to save API conversation history:", error)
  1013. return false
  1014. }
  1015. }
  1016. /**
  1017. * Public wrapper to retry saving the API conversation history.
  1018. * Uses exponential backoff: up to 3 attempts with delays of 100 ms, 500 ms, 1500 ms.
  1019. * Used by delegation flow when flushPendingToolResultsToHistory reports failure.
  1020. */
  1021. public async retrySaveApiConversationHistory(): Promise<boolean> {
  1022. const delays = [100, 500, 1500]
  1023. for (let attempt = 0; attempt < delays.length; attempt++) {
  1024. await new Promise<void>((resolve) => setTimeout(resolve, delays[attempt]))
  1025. console.warn(
  1026. `[Task#${this.taskId}] retrySaveApiConversationHistory: retry attempt ${attempt + 1}/${delays.length}`,
  1027. )
  1028. const success = await this.saveApiConversationHistory()
  1029. if (success) {
  1030. return true
  1031. }
  1032. }
  1033. return false
  1034. }
  1035. // Cline Messages
  1036. private async getSavedClineMessages(): Promise<ClineMessage[]> {
  1037. return readTaskMessages({ taskId: this.taskId, globalStoragePath: this.globalStoragePath })
  1038. }
  1039. private async addToClineMessages(message: ClineMessage) {
  1040. this.clineMessages.push(message)
  1041. const provider = this.providerRef.deref()
  1042. // Avoid resending large, mostly-static fields (notably taskHistory) on every chat message update.
  1043. // taskHistory is maintained in-memory in the webview and updated via taskHistoryItemUpdated.
  1044. await provider?.postStateToWebviewWithoutTaskHistory()
  1045. this.emit(RooCodeEventName.Message, { action: "created", message })
  1046. await this.saveClineMessages()
  1047. const shouldCaptureMessage = message.partial !== true && CloudService.isEnabled()
  1048. if (shouldCaptureMessage) {
  1049. CloudService.instance.captureEvent({
  1050. event: TelemetryEventName.TASK_MESSAGE,
  1051. properties: { taskId: this.taskId, message },
  1052. })
  1053. // Track that this message has been synced to cloud
  1054. this.cloudSyncedMessageTimestamps.add(message.ts)
  1055. }
  1056. }
  1057. public async overwriteClineMessages(newMessages: ClineMessage[]) {
  1058. this.clineMessages = newMessages
  1059. restoreTodoListForTask(this)
  1060. await this.saveClineMessages()
  1061. // When overwriting messages (e.g., during task resume), repopulate the cloud sync tracking Set
  1062. // with timestamps from all non-partial messages to prevent re-syncing previously synced messages
  1063. this.cloudSyncedMessageTimestamps.clear()
  1064. for (const msg of newMessages) {
  1065. if (msg.partial !== true) {
  1066. this.cloudSyncedMessageTimestamps.add(msg.ts)
  1067. }
  1068. }
  1069. }
  1070. private async updateClineMessage(message: ClineMessage) {
  1071. const provider = this.providerRef.deref()
  1072. await provider?.postMessageToWebview({ type: "messageUpdated", clineMessage: message })
  1073. this.emit(RooCodeEventName.Message, { action: "updated", message })
  1074. // Check if we should sync to cloud and haven't already synced this message
  1075. const shouldCaptureMessage = message.partial !== true && CloudService.isEnabled()
  1076. const hasNotBeenSynced = !this.cloudSyncedMessageTimestamps.has(message.ts)
  1077. if (shouldCaptureMessage && hasNotBeenSynced) {
  1078. CloudService.instance.captureEvent({
  1079. event: TelemetryEventName.TASK_MESSAGE,
  1080. properties: { taskId: this.taskId, message },
  1081. })
  1082. // Track that this message has been synced to cloud
  1083. this.cloudSyncedMessageTimestamps.add(message.ts)
  1084. }
  1085. }
  1086. private async saveClineMessages(): Promise<boolean> {
  1087. try {
  1088. await saveTaskMessages({
  1089. messages: structuredClone(this.clineMessages),
  1090. taskId: this.taskId,
  1091. globalStoragePath: this.globalStoragePath,
  1092. })
  1093. if (this._taskApiConfigName === undefined) {
  1094. await this.taskApiConfigReady
  1095. }
  1096. const { historyItem, tokenUsage } = await taskMetadata({
  1097. taskId: this.taskId,
  1098. rootTaskId: this.rootTaskId,
  1099. parentTaskId: this.parentTaskId,
  1100. taskNumber: this.taskNumber,
  1101. messages: this.clineMessages,
  1102. globalStoragePath: this.globalStoragePath,
  1103. workspace: this.cwd,
  1104. mode: this._taskMode || defaultModeSlug, // Use the task's own mode, not the current provider mode.
  1105. apiConfigName: this._taskApiConfigName, // Use the task's own provider profile, not the current provider profile.
  1106. initialStatus: this.initialStatus,
  1107. })
  1108. // Emit token/tool usage updates using debounced function
  1109. // The debounce with maxWait ensures:
  1110. // - Immediate first emit (leading: true)
  1111. // - At most one emit per interval during rapid updates (maxWait)
  1112. // - Final state is emitted when updates stop (trailing: true)
  1113. this.debouncedEmitTokenUsage(tokenUsage, this.toolUsage)
  1114. await this.providerRef.deref()?.updateTaskHistory(historyItem)
  1115. return true
  1116. } catch (error) {
  1117. console.error("Failed to save Roo messages:", error)
  1118. return false
  1119. }
  1120. }
  1121. private findMessageByTimestamp(ts: number): ClineMessage | undefined {
  1122. for (let i = this.clineMessages.length - 1; i >= 0; i--) {
  1123. if (this.clineMessages[i].ts === ts) {
  1124. return this.clineMessages[i]
  1125. }
  1126. }
  1127. return undefined
  1128. }
  1129. // Note that `partial` has three valid states true (partial message),
  1130. // false (completion of partial message), undefined (individual complete
  1131. // message).
  1132. async ask(
  1133. type: ClineAsk,
  1134. text?: string,
  1135. partial?: boolean,
  1136. progressStatus?: ToolProgressStatus,
  1137. isProtected?: boolean,
  1138. ): Promise<{ response: ClineAskResponse; text?: string; images?: string[] }> {
  1139. // If this Cline instance was aborted by the provider, then the only
  1140. // thing keeping us alive is a promise still running in the background,
  1141. // in which case we don't want to send its result to the webview as it
  1142. // is attached to a new instance of Cline now. So we can safely ignore
  1143. // the result of any active promises, and this class will be
  1144. // deallocated. (Although we set Cline = undefined in provider, that
  1145. // simply removes the reference to this instance, but the instance is
  1146. // still alive until this promise resolves or rejects.)
  1147. if (this.abort) {
  1148. throw new Error(`[RooCode#ask] task ${this.taskId}.${this.instanceId} aborted`)
  1149. }
  1150. let askTs: number
  1151. if (partial !== undefined) {
  1152. const lastMessage = this.clineMessages.at(-1)
  1153. const isUpdatingPreviousPartial =
  1154. lastMessage && lastMessage.partial && lastMessage.type === "ask" && lastMessage.ask === type
  1155. if (partial) {
  1156. if (isUpdatingPreviousPartial) {
  1157. // Existing partial message, so update it.
  1158. lastMessage.text = text
  1159. lastMessage.partial = partial
  1160. lastMessage.progressStatus = progressStatus
  1161. lastMessage.isProtected = isProtected
  1162. // TODO: Be more efficient about saving and posting only new
  1163. // data or one whole message at a time so ignore partial for
  1164. // saves, and only post parts of partial message instead of
  1165. // whole array in new listener.
  1166. this.updateClineMessage(lastMessage)
  1167. // console.log("Task#ask: current ask promise was ignored (#1)")
  1168. throw new AskIgnoredError("updating existing partial")
  1169. } else {
  1170. // This is a new partial message, so add it with partial
  1171. // state.
  1172. askTs = Date.now()
  1173. this.lastMessageTs = askTs
  1174. await this.addToClineMessages({ ts: askTs, type: "ask", ask: type, text, partial, isProtected })
  1175. // console.log("Task#ask: current ask promise was ignored (#2)")
  1176. throw new AskIgnoredError("new partial")
  1177. }
  1178. } else {
  1179. if (isUpdatingPreviousPartial) {
  1180. // This is the complete version of a previously partial
  1181. // message, so replace the partial with the complete version.
  1182. this.askResponse = undefined
  1183. this.askResponseText = undefined
  1184. this.askResponseImages = undefined
  1185. // Bug for the history books:
  1186. // In the webview we use the ts as the chatrow key for the
  1187. // virtuoso list. Since we would update this ts right at the
  1188. // end of streaming, it would cause the view to flicker. The
  1189. // key prop has to be stable otherwise react has trouble
  1190. // reconciling items between renders, causing unmounting and
  1191. // remounting of components (flickering).
  1192. // The lesson here is if you see flickering when rendering
  1193. // lists, it's likely because the key prop is not stable.
  1194. // So in this case we must make sure that the message ts is
  1195. // never altered after first setting it.
  1196. askTs = lastMessage.ts
  1197. this.lastMessageTs = askTs
  1198. lastMessage.text = text
  1199. lastMessage.partial = false
  1200. lastMessage.progressStatus = progressStatus
  1201. lastMessage.isProtected = isProtected
  1202. await this.saveClineMessages()
  1203. this.updateClineMessage(lastMessage)
  1204. } else {
  1205. // This is a new and complete message, so add it like normal.
  1206. this.askResponse = undefined
  1207. this.askResponseText = undefined
  1208. this.askResponseImages = undefined
  1209. askTs = Date.now()
  1210. this.lastMessageTs = askTs
  1211. await this.addToClineMessages({ ts: askTs, type: "ask", ask: type, text, isProtected })
  1212. }
  1213. }
  1214. } else {
  1215. // This is a new non-partial message, so add it like normal.
  1216. this.askResponse = undefined
  1217. this.askResponseText = undefined
  1218. this.askResponseImages = undefined
  1219. askTs = Date.now()
  1220. this.lastMessageTs = askTs
  1221. await this.addToClineMessages({ ts: askTs, type: "ask", ask: type, text, isProtected })
  1222. }
  1223. let timeouts: NodeJS.Timeout[] = []
  1224. // Automatically approve if the ask according to the user's settings.
  1225. const provider = this.providerRef.deref()
  1226. const state = provider ? await provider.getState() : undefined
  1227. const approval = await checkAutoApproval({ state, ask: type, text, isProtected })
  1228. if (approval.decision === "approve") {
  1229. this.approveAsk()
  1230. } else if (approval.decision === "deny") {
  1231. this.denyAsk()
  1232. } else if (approval.decision === "timeout") {
  1233. // Store the auto-approval timeout so it can be cancelled if user interacts
  1234. this.autoApprovalTimeoutRef = setTimeout(() => {
  1235. const { askResponse, text, images } = approval.fn()
  1236. this.handleWebviewAskResponse(askResponse, text, images)
  1237. this.autoApprovalTimeoutRef = undefined
  1238. }, approval.timeout)
  1239. timeouts.push(this.autoApprovalTimeoutRef)
  1240. }
  1241. // The state is mutable if the message is complete and the task will
  1242. // block (via the `pWaitFor`).
  1243. const isBlocking = !(this.askResponse !== undefined || this.lastMessageTs !== askTs)
  1244. const isMessageQueued = !this.messageQueueService.isEmpty()
  1245. const isStatusMutable = !partial && isBlocking && !isMessageQueued && approval.decision === "ask"
  1246. if (isStatusMutable) {
  1247. const statusMutationTimeout = 2_000
  1248. if (isInteractiveAsk(type)) {
  1249. timeouts.push(
  1250. setTimeout(() => {
  1251. const message = this.findMessageByTimestamp(askTs)
  1252. if (message) {
  1253. this.interactiveAsk = message
  1254. this.emit(RooCodeEventName.TaskInteractive, this.taskId)
  1255. provider?.postMessageToWebview({ type: "interactionRequired" })
  1256. }
  1257. }, statusMutationTimeout),
  1258. )
  1259. } else if (isResumableAsk(type)) {
  1260. timeouts.push(
  1261. setTimeout(() => {
  1262. const message = this.findMessageByTimestamp(askTs)
  1263. if (message) {
  1264. this.resumableAsk = message
  1265. this.emit(RooCodeEventName.TaskResumable, this.taskId)
  1266. }
  1267. }, statusMutationTimeout),
  1268. )
  1269. } else if (isIdleAsk(type)) {
  1270. timeouts.push(
  1271. setTimeout(() => {
  1272. const message = this.findMessageByTimestamp(askTs)
  1273. if (message) {
  1274. this.idleAsk = message
  1275. this.emit(RooCodeEventName.TaskIdle, this.taskId)
  1276. }
  1277. }, statusMutationTimeout),
  1278. )
  1279. }
  1280. } else if (isMessageQueued) {
  1281. const message = this.messageQueueService.dequeueMessage()
  1282. if (message) {
  1283. // Check if this is a tool approval ask that needs to be handled.
  1284. if (type === "tool" || type === "command" || type === "use_mcp_server") {
  1285. // For tool approvals, we need to approve first, then send
  1286. // the message if there's text/images.
  1287. this.handleWebviewAskResponse("yesButtonClicked", message.text, message.images)
  1288. } else {
  1289. // For other ask types (like followup or command_output), fulfill the ask
  1290. // directly.
  1291. this.handleWebviewAskResponse("messageResponse", message.text, message.images)
  1292. }
  1293. }
  1294. }
  1295. // Wait for askResponse to be set
  1296. await pWaitFor(
  1297. () => {
  1298. if (this.askResponse !== undefined || this.lastMessageTs !== askTs) {
  1299. return true
  1300. }
  1301. // If a queued message arrives while we're blocked on an ask (e.g. a follow-up
  1302. // suggestion click that was incorrectly queued due to UI state), consume it
  1303. // immediately so the task doesn't hang.
  1304. if (!this.messageQueueService.isEmpty()) {
  1305. const message = this.messageQueueService.dequeueMessage()
  1306. if (message) {
  1307. // If this is a tool approval ask, we need to approve first (yesButtonClicked)
  1308. // and include any queued text/images.
  1309. if (type === "tool" || type === "command" || type === "use_mcp_server") {
  1310. this.handleWebviewAskResponse("yesButtonClicked", message.text, message.images)
  1311. } else {
  1312. this.handleWebviewAskResponse("messageResponse", message.text, message.images)
  1313. }
  1314. }
  1315. }
  1316. return false
  1317. },
  1318. { interval: 100 },
  1319. )
  1320. if (this.lastMessageTs !== askTs) {
  1321. // Could happen if we send multiple asks in a row i.e. with
  1322. // command_output. It's important that when we know an ask could
  1323. // fail, it is handled gracefully.
  1324. throw new AskIgnoredError("superseded")
  1325. }
  1326. const result = { response: this.askResponse!, text: this.askResponseText, images: this.askResponseImages }
  1327. this.askResponse = undefined
  1328. this.askResponseText = undefined
  1329. this.askResponseImages = undefined
  1330. // Cancel the timeouts if they are still running.
  1331. timeouts.forEach((timeout) => clearTimeout(timeout))
  1332. // Switch back to an active state.
  1333. if (this.idleAsk || this.resumableAsk || this.interactiveAsk) {
  1334. this.idleAsk = undefined
  1335. this.resumableAsk = undefined
  1336. this.interactiveAsk = undefined
  1337. this.emit(RooCodeEventName.TaskActive, this.taskId)
  1338. }
  1339. this.emit(RooCodeEventName.TaskAskResponded)
  1340. return result
  1341. }
  1342. handleWebviewAskResponse(askResponse: ClineAskResponse, text?: string, images?: string[]) {
  1343. // Clear any pending auto-approval timeout when user responds
  1344. this.cancelAutoApprovalTimeout()
  1345. this.askResponse = askResponse
  1346. this.askResponseText = text
  1347. this.askResponseImages = images
  1348. // Create a checkpoint whenever the user sends a message.
  1349. // Use allowEmpty=true to ensure a checkpoint is recorded even if there are no file changes.
  1350. // Suppress the checkpoint_saved chat row for this particular checkpoint to keep the timeline clean.
  1351. if (askResponse === "messageResponse") {
  1352. void this.checkpointSave(false, true)
  1353. }
  1354. // Mark the last follow-up question as answered
  1355. if (askResponse === "messageResponse" || askResponse === "yesButtonClicked") {
  1356. // Find the last unanswered follow-up message using findLastIndex
  1357. const lastFollowUpIndex = findLastIndex(
  1358. this.clineMessages,
  1359. (msg) => msg.type === "ask" && msg.ask === "followup" && !msg.isAnswered,
  1360. )
  1361. if (lastFollowUpIndex !== -1) {
  1362. // Mark this follow-up as answered
  1363. this.clineMessages[lastFollowUpIndex].isAnswered = true
  1364. // Save the updated messages
  1365. this.saveClineMessages().catch((error) => {
  1366. console.error("Failed to save answered follow-up state:", error)
  1367. })
  1368. }
  1369. }
  1370. // Mark the last tool-approval ask as answered when user approves (or auto-approval)
  1371. if (askResponse === "yesButtonClicked") {
  1372. const lastToolAskIndex = findLastIndex(
  1373. this.clineMessages,
  1374. (msg) => msg.type === "ask" && msg.ask === "tool" && !msg.isAnswered,
  1375. )
  1376. if (lastToolAskIndex !== -1) {
  1377. this.clineMessages[lastToolAskIndex].isAnswered = true
  1378. void this.updateClineMessage(this.clineMessages[lastToolAskIndex])
  1379. this.saveClineMessages().catch((error) => {
  1380. console.error("Failed to save answered tool-ask state:", error)
  1381. })
  1382. }
  1383. }
  1384. }
  1385. /**
  1386. * Cancel any pending auto-approval timeout.
  1387. * Called when user interacts (types, clicks buttons, etc.) to prevent the timeout from firing.
  1388. */
  1389. public cancelAutoApprovalTimeout(): void {
  1390. if (this.autoApprovalTimeoutRef) {
  1391. clearTimeout(this.autoApprovalTimeoutRef)
  1392. this.autoApprovalTimeoutRef = undefined
  1393. }
  1394. }
  1395. public approveAsk({ text, images }: { text?: string; images?: string[] } = {}) {
  1396. this.handleWebviewAskResponse("yesButtonClicked", text, images)
  1397. }
  1398. public denyAsk({ text, images }: { text?: string; images?: string[] } = {}) {
  1399. this.handleWebviewAskResponse("noButtonClicked", text, images)
  1400. }
  1401. public supersedePendingAsk(): void {
  1402. this.lastMessageTs = Date.now()
  1403. }
  1404. /**
  1405. * Updates the API configuration and rebuilds the API handler.
  1406. * There is no tool-protocol switching or tool parser swapping.
  1407. *
  1408. * @param newApiConfiguration - The new API configuration to use
  1409. */
  1410. public updateApiConfiguration(newApiConfiguration: ProviderSettings): void {
  1411. // Update the configuration and rebuild the API handler
  1412. this.apiConfiguration = newApiConfiguration
  1413. this.api = buildApiHandler(this.apiConfiguration)
  1414. }
  1415. public async submitUserMessage(
  1416. text: string,
  1417. images?: string[],
  1418. mode?: string,
  1419. providerProfile?: string,
  1420. ): Promise<void> {
  1421. try {
  1422. text = (text ?? "").trim()
  1423. images = images ?? []
  1424. if (text.length === 0 && images.length === 0) {
  1425. return
  1426. }
  1427. const provider = this.providerRef.deref()
  1428. if (provider) {
  1429. if (mode) {
  1430. await provider.setMode(mode)
  1431. }
  1432. if (providerProfile) {
  1433. await provider.setProviderProfile(providerProfile)
  1434. // Update this task's API configuration to match the new profile
  1435. // This ensures the parser state is synchronized with the selected model
  1436. const newState = await provider.getState()
  1437. if (newState?.apiConfiguration) {
  1438. this.updateApiConfiguration(newState.apiConfiguration)
  1439. }
  1440. }
  1441. this.emit(RooCodeEventName.TaskUserMessage, this.taskId)
  1442. // Handle the message directly instead of routing through the webview.
  1443. // This avoids a race condition where the webview's message state hasn't
  1444. // hydrated yet, causing it to interpret the message as a new task request.
  1445. this.handleWebviewAskResponse("messageResponse", text, images)
  1446. } else {
  1447. console.error("[Task#submitUserMessage] Provider reference lost")
  1448. }
  1449. } catch (error) {
  1450. console.error("[Task#submitUserMessage] Failed to submit user message:", error)
  1451. }
  1452. }
  1453. async handleTerminalOperation(terminalOperation: "continue" | "abort") {
  1454. if (terminalOperation === "continue") {
  1455. this.terminalProcess?.continue()
  1456. } else if (terminalOperation === "abort") {
  1457. this.terminalProcess?.abort()
  1458. }
  1459. }
  1460. private async getFilesReadByRooSafely(context: string): Promise<string[] | undefined> {
  1461. try {
  1462. return await this.fileContextTracker.getFilesReadByRoo()
  1463. } catch (error) {
  1464. console.error(`[Task#${context}] Failed to get files read by Roo:`, error)
  1465. return undefined
  1466. }
  1467. }
  1468. public async condenseContext(): Promise<void> {
  1469. // CRITICAL: Flush any pending tool results before condensing
  1470. // to ensure tool_use/tool_result pairs are complete in history
  1471. await this.flushPendingToolResultsToHistory()
  1472. const systemPrompt = await this.getSystemPrompt()
  1473. // Get condensing configuration
  1474. const state = await this.providerRef.deref()?.getState()
  1475. const customCondensingPrompt = state?.customSupportPrompts?.CONDENSE
  1476. const { mode, apiConfiguration } = state ?? {}
  1477. const { contextTokens: prevContextTokens } = this.getTokenUsage()
  1478. // Build tools for condensing metadata (same tools used for normal API calls)
  1479. const provider = this.providerRef.deref()
  1480. let allTools: import("openai").default.Chat.ChatCompletionTool[] = []
  1481. if (provider) {
  1482. const modelInfo = this.api.getModel().info
  1483. const toolsResult = await buildNativeToolsArrayWithRestrictions({
  1484. provider,
  1485. cwd: this.cwd,
  1486. mode,
  1487. customModes: state?.customModes,
  1488. experiments: state?.experiments,
  1489. apiConfiguration,
  1490. disabledTools: state?.disabledTools,
  1491. modelInfo,
  1492. includeAllToolsWithRestrictions: false,
  1493. })
  1494. allTools = toolsResult.tools
  1495. }
  1496. // Build metadata with tools and taskId for the condensing API call
  1497. const metadata: ApiHandlerCreateMessageMetadata = {
  1498. mode,
  1499. taskId: this.taskId,
  1500. ...(allTools.length > 0
  1501. ? {
  1502. tools: allTools,
  1503. tool_choice: "auto",
  1504. parallelToolCalls: true,
  1505. }
  1506. : {}),
  1507. }
  1508. // Generate environment details to include in the condensed summary
  1509. const environmentDetails = await getEnvironmentDetails(this, true)
  1510. const filesReadByRoo = await this.getFilesReadByRooSafely("condenseContext")
  1511. const {
  1512. messages,
  1513. summary,
  1514. cost,
  1515. newContextTokens = 0,
  1516. error,
  1517. errorDetails,
  1518. condenseId,
  1519. } = await summarizeConversation({
  1520. messages: this.apiConversationHistory,
  1521. apiHandler: this.api,
  1522. systemPrompt,
  1523. taskId: this.taskId,
  1524. isAutomaticTrigger: false,
  1525. customCondensingPrompt,
  1526. metadata,
  1527. environmentDetails,
  1528. filesReadByRoo,
  1529. cwd: this.cwd,
  1530. rooIgnoreController: this.rooIgnoreController,
  1531. })
  1532. if (error) {
  1533. await this.say(
  1534. "condense_context_error",
  1535. error,
  1536. undefined /* images */,
  1537. false /* partial */,
  1538. undefined /* checkpoint */,
  1539. undefined /* progressStatus */,
  1540. { isNonInteractive: true } /* options */,
  1541. )
  1542. return
  1543. }
  1544. await this.overwriteApiConversationHistory(messages)
  1545. const contextCondense: ContextCondense = {
  1546. summary,
  1547. cost,
  1548. newContextTokens,
  1549. prevContextTokens,
  1550. condenseId: condenseId!,
  1551. }
  1552. await this.say(
  1553. "condense_context",
  1554. undefined /* text */,
  1555. undefined /* images */,
  1556. false /* partial */,
  1557. undefined /* checkpoint */,
  1558. undefined /* progressStatus */,
  1559. { isNonInteractive: true } /* options */,
  1560. contextCondense,
  1561. )
  1562. // Process any queued messages after condensing completes
  1563. this.processQueuedMessages()
  1564. }
  1565. async say(
  1566. type: ClineSay,
  1567. text?: string,
  1568. images?: string[],
  1569. partial?: boolean,
  1570. checkpoint?: Record<string, unknown>,
  1571. progressStatus?: ToolProgressStatus,
  1572. options: {
  1573. isNonInteractive?: boolean
  1574. } = {},
  1575. contextCondense?: ContextCondense,
  1576. contextTruncation?: ContextTruncation,
  1577. ): Promise<undefined> {
  1578. if (this.abort) {
  1579. throw new Error(`[RooCode#say] task ${this.taskId}.${this.instanceId} aborted`)
  1580. }
  1581. if (partial !== undefined) {
  1582. const lastMessage = this.clineMessages.at(-1)
  1583. const isUpdatingPreviousPartial =
  1584. lastMessage && lastMessage.partial && lastMessage.type === "say" && lastMessage.say === type
  1585. if (partial) {
  1586. if (isUpdatingPreviousPartial) {
  1587. // Existing partial message, so update it.
  1588. lastMessage.text = text
  1589. lastMessage.images = images
  1590. lastMessage.partial = partial
  1591. lastMessage.progressStatus = progressStatus
  1592. this.updateClineMessage(lastMessage)
  1593. } else {
  1594. // This is a new partial message, so add it with partial state.
  1595. const sayTs = Date.now()
  1596. if (!options.isNonInteractive) {
  1597. this.lastMessageTs = sayTs
  1598. }
  1599. await this.addToClineMessages({
  1600. ts: sayTs,
  1601. type: "say",
  1602. say: type,
  1603. text,
  1604. images,
  1605. partial,
  1606. contextCondense,
  1607. contextTruncation,
  1608. })
  1609. }
  1610. } else {
  1611. // New now have a complete version of a previously partial message.
  1612. // This is the complete version of a previously partial
  1613. // message, so replace the partial with the complete version.
  1614. if (isUpdatingPreviousPartial) {
  1615. if (!options.isNonInteractive) {
  1616. this.lastMessageTs = lastMessage.ts
  1617. }
  1618. lastMessage.text = text
  1619. lastMessage.images = images
  1620. lastMessage.partial = false
  1621. lastMessage.progressStatus = progressStatus
  1622. // Instead of streaming partialMessage events, we do a save
  1623. // and post like normal to persist to disk.
  1624. await this.saveClineMessages()
  1625. // More performant than an entire `postStateToWebview`.
  1626. this.updateClineMessage(lastMessage)
  1627. } else {
  1628. // This is a new and complete message, so add it like normal.
  1629. const sayTs = Date.now()
  1630. if (!options.isNonInteractive) {
  1631. this.lastMessageTs = sayTs
  1632. }
  1633. await this.addToClineMessages({
  1634. ts: sayTs,
  1635. type: "say",
  1636. say: type,
  1637. text,
  1638. images,
  1639. contextCondense,
  1640. contextTruncation,
  1641. })
  1642. }
  1643. }
  1644. } else {
  1645. // This is a new non-partial message, so add it like normal.
  1646. const sayTs = Date.now()
  1647. // A "non-interactive" message is a message is one that the user
  1648. // does not need to respond to. We don't want these message types
  1649. // to trigger an update to `lastMessageTs` since they can be created
  1650. // asynchronously and could interrupt a pending ask.
  1651. if (!options.isNonInteractive) {
  1652. this.lastMessageTs = sayTs
  1653. }
  1654. await this.addToClineMessages({
  1655. ts: sayTs,
  1656. type: "say",
  1657. say: type,
  1658. text,
  1659. images,
  1660. checkpoint,
  1661. contextCondense,
  1662. contextTruncation,
  1663. })
  1664. }
  1665. }
  1666. async sayAndCreateMissingParamError(toolName: ToolName, paramName: string, relPath?: string) {
  1667. await this.say(
  1668. "error",
  1669. `Roo tried to use ${toolName}${
  1670. relPath ? ` for '${relPath.toPosix()}'` : ""
  1671. } without value for required parameter '${paramName}'. Retrying...`,
  1672. )
  1673. return formatResponse.toolError(formatResponse.missingToolParameterError(paramName))
  1674. }
  1675. // Lifecycle
  1676. // Start / Resume / Abort / Dispose
  1677. /**
  1678. * Get enabled MCP tools count for this task.
  1679. * Returns the count along with the number of servers contributing.
  1680. *
  1681. * @returns Object with enabledToolCount and enabledServerCount
  1682. */
  1683. private async getEnabledMcpToolsCount(): Promise<{ enabledToolCount: number; enabledServerCount: number }> {
  1684. try {
  1685. const provider = this.providerRef.deref()
  1686. if (!provider) {
  1687. return { enabledToolCount: 0, enabledServerCount: 0 }
  1688. }
  1689. const { mcpEnabled } = (await provider.getState()) ?? {}
  1690. if (!(mcpEnabled ?? true)) {
  1691. return { enabledToolCount: 0, enabledServerCount: 0 }
  1692. }
  1693. const mcpHub = await McpServerManager.getInstance(provider.context, provider)
  1694. if (!mcpHub) {
  1695. return { enabledToolCount: 0, enabledServerCount: 0 }
  1696. }
  1697. const servers = mcpHub.getServers()
  1698. return countEnabledMcpTools(servers)
  1699. } catch (error) {
  1700. console.error("[Task#getEnabledMcpToolsCount] Error counting MCP tools:", error)
  1701. return { enabledToolCount: 0, enabledServerCount: 0 }
  1702. }
  1703. }
  1704. /**
  1705. * Manually start a **new** task when it was created with `startTask: false`.
  1706. *
  1707. * This fires `startTask` as a background async operation for the
  1708. * `task/images` code-path only. It does **not** handle the
  1709. * `historyItem` resume path (use the constructor with `startTask: true`
  1710. * for that). The primary use-case is in the delegation flow where the
  1711. * parent's metadata must be persisted to globalState **before** the
  1712. * child task begins writing its own history (avoiding a read-modify-write
  1713. * race on globalState).
  1714. */
  1715. public start(): void {
  1716. if (this._started) {
  1717. return
  1718. }
  1719. this._started = true
  1720. const { task, images } = this.metadata
  1721. if (task || images) {
  1722. this.startTask(task ?? undefined, images ?? undefined)
  1723. }
  1724. }
  1725. private async startTask(task?: string, images?: string[]): Promise<void> {
  1726. try {
  1727. if (this.enableBridge) {
  1728. try {
  1729. await BridgeOrchestrator.subscribeToTask(this)
  1730. } catch (error) {
  1731. console.error(
  1732. `[Task#startTask] BridgeOrchestrator.subscribeToTask() failed: ${error instanceof Error ? error.message : String(error)}`,
  1733. )
  1734. }
  1735. }
  1736. // `conversationHistory` (for API) and `clineMessages` (for webview)
  1737. // need to be in sync.
  1738. // If the extension process were killed, then on restart the
  1739. // `clineMessages` might not be empty, so we need to set it to [] when
  1740. // we create a new Cline client (otherwise webview would show stale
  1741. // messages from previous session).
  1742. this.clineMessages = []
  1743. this.apiConversationHistory = []
  1744. // The todo list is already set in the constructor if initialTodos were provided
  1745. // No need to add any messages - the todoList property is already set
  1746. await this.providerRef.deref()?.postStateToWebviewWithoutTaskHistory()
  1747. await this.say("text", task, images)
  1748. // Check for too many MCP tools and warn the user
  1749. const { enabledToolCount, enabledServerCount } = await this.getEnabledMcpToolsCount()
  1750. if (enabledToolCount > MAX_MCP_TOOLS_THRESHOLD) {
  1751. await this.say(
  1752. "too_many_tools_warning",
  1753. JSON.stringify({
  1754. toolCount: enabledToolCount,
  1755. serverCount: enabledServerCount,
  1756. threshold: MAX_MCP_TOOLS_THRESHOLD,
  1757. }),
  1758. undefined,
  1759. undefined,
  1760. undefined,
  1761. undefined,
  1762. { isNonInteractive: true },
  1763. )
  1764. }
  1765. this.isInitialized = true
  1766. const imageBlocks: Anthropic.ImageBlockParam[] = formatResponse.imageBlocks(images)
  1767. // Task starting
  1768. await this.initiateTaskLoop([
  1769. {
  1770. type: "text",
  1771. text: `<user_message>\n${task}\n</user_message>`,
  1772. },
  1773. ...imageBlocks,
  1774. ]).catch((error) => {
  1775. // Swallow loop rejection when the task was intentionally abandoned/aborted
  1776. // during delegation or user cancellation to prevent unhandled rejections.
  1777. if (this.abandoned === true || this.abortReason === "user_cancelled") {
  1778. return
  1779. }
  1780. throw error
  1781. })
  1782. } catch (error) {
  1783. // In tests and some UX flows, tasks can be aborted while `startTask` is still
  1784. // initializing. Treat abort/abandon as expected and avoid unhandled rejections.
  1785. if (this.abandoned === true || this.abort === true || this.abortReason === "user_cancelled") {
  1786. return
  1787. }
  1788. throw error
  1789. }
  1790. }
  1791. private async resumeTaskFromHistory() {
  1792. try {
  1793. if (this.enableBridge) {
  1794. try {
  1795. await BridgeOrchestrator.subscribeToTask(this)
  1796. } catch (error) {
  1797. console.error(
  1798. `[Task#resumeTaskFromHistory] BridgeOrchestrator.subscribeToTask() failed: ${error instanceof Error ? error.message : String(error)}`,
  1799. )
  1800. }
  1801. }
  1802. const modifiedClineMessages = await this.getSavedClineMessages()
  1803. // Remove any resume messages that may have been added before.
  1804. const lastRelevantMessageIndex = findLastIndex(
  1805. modifiedClineMessages,
  1806. (m) => !(m.ask === "resume_task" || m.ask === "resume_completed_task"),
  1807. )
  1808. if (lastRelevantMessageIndex !== -1) {
  1809. modifiedClineMessages.splice(lastRelevantMessageIndex + 1)
  1810. }
  1811. // Remove any trailing reasoning-only UI messages that were not part of the persisted API conversation
  1812. while (modifiedClineMessages.length > 0) {
  1813. const last = modifiedClineMessages[modifiedClineMessages.length - 1]
  1814. if (last.type === "say" && last.say === "reasoning") {
  1815. modifiedClineMessages.pop()
  1816. } else {
  1817. break
  1818. }
  1819. }
  1820. // Since we don't use `api_req_finished` anymore, we need to check if the
  1821. // last `api_req_started` has a cost value, if it doesn't and no
  1822. // cancellation reason to present, then we remove it since it indicates
  1823. // an api request without any partial content streamed.
  1824. const lastApiReqStartedIndex = findLastIndex(
  1825. modifiedClineMessages,
  1826. (m) => m.type === "say" && m.say === "api_req_started",
  1827. )
  1828. if (lastApiReqStartedIndex !== -1) {
  1829. const lastApiReqStarted = modifiedClineMessages[lastApiReqStartedIndex]
  1830. const { cost, cancelReason }: ClineApiReqInfo = JSON.parse(lastApiReqStarted.text || "{}")
  1831. if (cost === undefined && cancelReason === undefined) {
  1832. modifiedClineMessages.splice(lastApiReqStartedIndex, 1)
  1833. }
  1834. }
  1835. await this.overwriteClineMessages(modifiedClineMessages)
  1836. this.clineMessages = await this.getSavedClineMessages()
  1837. // Now present the cline messages to the user and ask if they want to
  1838. // resume (NOTE: we ran into a bug before where the
  1839. // apiConversationHistory wouldn't be initialized when opening a old
  1840. // task, and it was because we were waiting for resume).
  1841. // This is important in case the user deletes messages without resuming
  1842. // the task first.
  1843. this.apiConversationHistory = await this.getSavedApiConversationHistory()
  1844. const lastClineMessage = this.clineMessages
  1845. .slice()
  1846. .reverse()
  1847. .find((m) => !(m.ask === "resume_task" || m.ask === "resume_completed_task")) // Could be multiple resume tasks.
  1848. let askType: ClineAsk
  1849. if (lastClineMessage?.ask === "completion_result") {
  1850. askType = "resume_completed_task"
  1851. } else {
  1852. askType = "resume_task"
  1853. }
  1854. this.isInitialized = true
  1855. const { response, text, images } = await this.ask(askType) // Calls `postStateToWebview`.
  1856. let responseText: string | undefined
  1857. let responseImages: string[] | undefined
  1858. if (response === "messageResponse") {
  1859. await this.say("user_feedback", text, images)
  1860. responseText = text
  1861. responseImages = images
  1862. }
  1863. // Make sure that the api conversation history can be resumed by the API,
  1864. // even if it goes out of sync with cline messages.
  1865. let existingApiConversationHistory: ApiMessage[] = await this.getSavedApiConversationHistory()
  1866. // Tool blocks are always preserved; native tool calling only.
  1867. // if the last message is an assistant message, we need to check if there's tool use since every tool use has to have a tool response
  1868. // if there's no tool use and only a text block, then we can just add a user message
  1869. // (note this isn't relevant anymore since we use custom tool prompts instead of tool use blocks, but this is here for legacy purposes in case users resume old tasks)
  1870. // if the last message is a user message, we can need to get the assistant message before it to see if it made tool calls, and if so, fill in the remaining tool responses with 'interrupted'
  1871. let modifiedOldUserContent: Anthropic.Messages.ContentBlockParam[] // either the last message if its user message, or the user message before the last (assistant) message
  1872. let modifiedApiConversationHistory: ApiMessage[] // need to remove the last user message to replace with new modified user message
  1873. if (existingApiConversationHistory.length > 0) {
  1874. const lastMessage = existingApiConversationHistory[existingApiConversationHistory.length - 1]
  1875. if (lastMessage.isSummary) {
  1876. // IMPORTANT: If the last message is a condensation summary, we must preserve it
  1877. // intact. The summary message carries critical metadata (isSummary, condenseId)
  1878. // that getEffectiveApiHistory() uses to filter out condensed messages.
  1879. // Removing or merging it would destroy this metadata, causing all condensed
  1880. // messages to become "orphaned" and restored to active status — effectively
  1881. // undoing the condensation and sending the full history to the API.
  1882. // See: https://github.com/RooCodeInc/Roo-Code/issues/11487
  1883. modifiedApiConversationHistory = [...existingApiConversationHistory]
  1884. modifiedOldUserContent = []
  1885. } else if (lastMessage.role === "assistant") {
  1886. const content = Array.isArray(lastMessage.content)
  1887. ? lastMessage.content
  1888. : [{ type: "text", text: lastMessage.content }]
  1889. const hasToolUse = content.some((block) => block.type === "tool_use")
  1890. if (hasToolUse) {
  1891. const toolUseBlocks = content.filter(
  1892. (block) => block.type === "tool_use",
  1893. ) as Anthropic.Messages.ToolUseBlock[]
  1894. const toolResponses: Anthropic.ToolResultBlockParam[] = toolUseBlocks.map((block) => ({
  1895. type: "tool_result",
  1896. tool_use_id: block.id,
  1897. content: "Task was interrupted before this tool call could be completed.",
  1898. }))
  1899. modifiedApiConversationHistory = [...existingApiConversationHistory] // no changes
  1900. modifiedOldUserContent = [...toolResponses]
  1901. } else {
  1902. modifiedApiConversationHistory = [...existingApiConversationHistory]
  1903. modifiedOldUserContent = []
  1904. }
  1905. } else if (lastMessage.role === "user") {
  1906. const previousAssistantMessage: ApiMessage | undefined =
  1907. existingApiConversationHistory[existingApiConversationHistory.length - 2]
  1908. const existingUserContent: Anthropic.Messages.ContentBlockParam[] = Array.isArray(
  1909. lastMessage.content,
  1910. )
  1911. ? lastMessage.content
  1912. : [{ type: "text", text: lastMessage.content }]
  1913. if (previousAssistantMessage && previousAssistantMessage.role === "assistant") {
  1914. const assistantContent = Array.isArray(previousAssistantMessage.content)
  1915. ? previousAssistantMessage.content
  1916. : [{ type: "text", text: previousAssistantMessage.content }]
  1917. const toolUseBlocks = assistantContent.filter(
  1918. (block) => block.type === "tool_use",
  1919. ) as Anthropic.Messages.ToolUseBlock[]
  1920. if (toolUseBlocks.length > 0) {
  1921. const existingToolResults = existingUserContent.filter(
  1922. (block) => block.type === "tool_result",
  1923. ) as Anthropic.ToolResultBlockParam[]
  1924. const missingToolResponses: Anthropic.ToolResultBlockParam[] = toolUseBlocks
  1925. .filter(
  1926. (toolUse) =>
  1927. !existingToolResults.some((result) => result.tool_use_id === toolUse.id),
  1928. )
  1929. .map((toolUse) => ({
  1930. type: "tool_result",
  1931. tool_use_id: toolUse.id,
  1932. content: "Task was interrupted before this tool call could be completed.",
  1933. }))
  1934. modifiedApiConversationHistory = existingApiConversationHistory.slice(0, -1) // removes the last user message
  1935. modifiedOldUserContent = [...existingUserContent, ...missingToolResponses]
  1936. } else {
  1937. modifiedApiConversationHistory = existingApiConversationHistory.slice(0, -1)
  1938. modifiedOldUserContent = [...existingUserContent]
  1939. }
  1940. } else {
  1941. modifiedApiConversationHistory = existingApiConversationHistory.slice(0, -1)
  1942. modifiedOldUserContent = [...existingUserContent]
  1943. }
  1944. } else {
  1945. throw new Error("Unexpected: Last message is not a user or assistant message")
  1946. }
  1947. } else {
  1948. throw new Error("Unexpected: No existing API conversation history")
  1949. }
  1950. let newUserContent: Anthropic.Messages.ContentBlockParam[] = [...modifiedOldUserContent]
  1951. const agoText = ((): string => {
  1952. const timestamp = lastClineMessage?.ts ?? Date.now()
  1953. const now = Date.now()
  1954. const diff = now - timestamp
  1955. const minutes = Math.floor(diff / 60000)
  1956. const hours = Math.floor(minutes / 60)
  1957. const days = Math.floor(hours / 24)
  1958. if (days > 0) {
  1959. return `${days} day${days > 1 ? "s" : ""} ago`
  1960. }
  1961. if (hours > 0) {
  1962. return `${hours} hour${hours > 1 ? "s" : ""} ago`
  1963. }
  1964. if (minutes > 0) {
  1965. return `${minutes} minute${minutes > 1 ? "s" : ""} ago`
  1966. }
  1967. return "just now"
  1968. })()
  1969. if (responseText) {
  1970. newUserContent.push({
  1971. type: "text",
  1972. text: `<user_message>\n${responseText}\n</user_message>`,
  1973. })
  1974. }
  1975. if (responseImages && responseImages.length > 0) {
  1976. newUserContent.push(...formatResponse.imageBlocks(responseImages))
  1977. }
  1978. // Ensure we have at least some content to send to the API.
  1979. // If newUserContent is empty, add a minimal resumption message.
  1980. if (newUserContent.length === 0) {
  1981. newUserContent.push({
  1982. type: "text",
  1983. text: "[TASK RESUMPTION] Resuming task...",
  1984. })
  1985. }
  1986. await this.overwriteApiConversationHistory(modifiedApiConversationHistory)
  1987. // Task resuming from history item.
  1988. await this.initiateTaskLoop(newUserContent)
  1989. } catch (error) {
  1990. // Resume and cancellation can race when users issue repeated cancels.
  1991. // Treat intentional abort/abandon flows as expected and avoid process-level crashes.
  1992. if (this.abandoned === true || this.abort === true || this.abortReason === "user_cancelled") {
  1993. return
  1994. }
  1995. throw error
  1996. }
  1997. }
  1998. /**
  1999. * Cancels the current HTTP request if one is in progress.
  2000. * This immediately aborts the underlying stream rather than waiting for the next chunk.
  2001. */
  2002. public cancelCurrentRequest(): void {
  2003. if (this.currentRequestAbortController) {
  2004. console.log(`[Task#${this.taskId}.${this.instanceId}] Aborting current HTTP request`)
  2005. this.currentRequestAbortController.abort()
  2006. this.currentRequestAbortController = undefined
  2007. }
  2008. }
  2009. /**
  2010. * Force emit a final token usage update, ignoring throttle.
  2011. * Called before task completion or abort to ensure final stats are captured.
  2012. * Triggers the debounce with current values and immediately flushes to ensure emit.
  2013. */
  2014. public emitFinalTokenUsageUpdate(): void {
  2015. const tokenUsage = this.getTokenUsage()
  2016. this.debouncedEmitTokenUsage(tokenUsage, this.toolUsage)
  2017. this.debouncedEmitTokenUsage.flush()
  2018. }
  2019. public async abortTask(isAbandoned = false) {
  2020. // Aborting task
  2021. // Will stop any autonomously running promises.
  2022. if (isAbandoned) {
  2023. this.abandoned = true
  2024. }
  2025. this.abort = true
  2026. // Reset consecutive error counters on abort (manual intervention)
  2027. this.consecutiveNoToolUseCount = 0
  2028. this.consecutiveNoAssistantMessagesCount = 0
  2029. // Force final token usage update before abort event
  2030. this.emitFinalTokenUsageUpdate()
  2031. this.emit(RooCodeEventName.TaskAborted)
  2032. try {
  2033. this.dispose() // Call the centralized dispose method
  2034. } catch (error) {
  2035. console.error(`Error during task ${this.taskId}.${this.instanceId} disposal:`, error)
  2036. // Don't rethrow - we want abort to always succeed
  2037. }
  2038. // Save the countdown message in the automatic retry or other content.
  2039. try {
  2040. // Save the countdown message in the automatic retry or other content.
  2041. await this.saveClineMessages()
  2042. } catch (error) {
  2043. console.error(`Error saving messages during abort for task ${this.taskId}.${this.instanceId}:`, error)
  2044. }
  2045. }
  2046. public dispose(): void {
  2047. console.log(`[Task#dispose] disposing task ${this.taskId}.${this.instanceId}`)
  2048. // Cancel any in-progress HTTP request
  2049. try {
  2050. this.cancelCurrentRequest()
  2051. } catch (error) {
  2052. console.error("Error cancelling current request:", error)
  2053. }
  2054. // Remove provider profile change listener
  2055. try {
  2056. if (this.providerProfileChangeListener) {
  2057. const provider = this.providerRef.deref()
  2058. if (provider) {
  2059. provider.off(RooCodeEventName.ProviderProfileChanged, this.providerProfileChangeListener)
  2060. }
  2061. this.providerProfileChangeListener = undefined
  2062. }
  2063. } catch (error) {
  2064. console.error("Error removing provider profile change listener:", error)
  2065. }
  2066. // Dispose message queue and remove event listeners.
  2067. try {
  2068. if (this.messageQueueStateChangedHandler) {
  2069. this.messageQueueService.removeListener("stateChanged", this.messageQueueStateChangedHandler)
  2070. this.messageQueueStateChangedHandler = undefined
  2071. }
  2072. this.messageQueueService.dispose()
  2073. } catch (error) {
  2074. console.error("Error disposing message queue:", error)
  2075. }
  2076. // Remove all event listeners to prevent memory leaks.
  2077. try {
  2078. this.removeAllListeners()
  2079. } catch (error) {
  2080. console.error("Error removing event listeners:", error)
  2081. }
  2082. if (this.enableBridge) {
  2083. BridgeOrchestrator.getInstance()
  2084. ?.unsubscribeFromTask(this.taskId)
  2085. .catch((error) =>
  2086. console.error(
  2087. `[Task#dispose] BridgeOrchestrator#unsubscribeFromTask() failed: ${error instanceof Error ? error.message : String(error)}`,
  2088. ),
  2089. )
  2090. }
  2091. // Release any terminals associated with this task.
  2092. try {
  2093. // Release any terminals associated with this task.
  2094. TerminalRegistry.releaseTerminalsForTask(this.taskId)
  2095. } catch (error) {
  2096. console.error("Error releasing terminals:", error)
  2097. }
  2098. // Cleanup command output artifacts
  2099. getTaskDirectoryPath(this.globalStoragePath, this.taskId)
  2100. .then((taskDir) => {
  2101. const outputDir = path.join(taskDir, "command-output")
  2102. return OutputInterceptor.cleanup(outputDir)
  2103. })
  2104. .catch((error) => {
  2105. console.error("Error cleaning up command output artifacts:", error)
  2106. })
  2107. try {
  2108. if (this.rooIgnoreController) {
  2109. this.rooIgnoreController.dispose()
  2110. this.rooIgnoreController = undefined
  2111. }
  2112. } catch (error) {
  2113. console.error("Error disposing RooIgnoreController:", error)
  2114. // This is the critical one for the leak fix.
  2115. }
  2116. try {
  2117. this.fileContextTracker.dispose()
  2118. } catch (error) {
  2119. console.error("Error disposing file context tracker:", error)
  2120. }
  2121. try {
  2122. // If we're not streaming then `abortStream` won't be called.
  2123. if (this.isStreaming && this.diffViewProvider.isEditing) {
  2124. this.diffViewProvider.revertChanges().catch(console.error)
  2125. }
  2126. } catch (error) {
  2127. console.error("Error reverting diff changes:", error)
  2128. }
  2129. }
  2130. // Subtasks
  2131. // Spawn / Wait / Complete
  2132. public async startSubtask(message: string, initialTodos: TodoItem[], mode: string) {
  2133. const provider = this.providerRef.deref()
  2134. if (!provider) {
  2135. throw new Error("Provider not available")
  2136. }
  2137. const child = await (provider as any).delegateParentAndOpenChild({
  2138. parentTaskId: this.taskId,
  2139. message,
  2140. initialTodos,
  2141. mode,
  2142. })
  2143. return child
  2144. }
  2145. /**
  2146. * Resume parent task after delegation completion without showing resume ask.
  2147. * Used in metadata-driven subtask flow.
  2148. *
  2149. * This method:
  2150. * - Clears any pending ask states
  2151. * - Resets abort and streaming flags
  2152. * - Ensures next API call includes full context
  2153. * - Immediately continues task loop without user interaction
  2154. */
  2155. public async resumeAfterDelegation(): Promise<void> {
  2156. // Clear any ask states that might have been set during history load
  2157. this.idleAsk = undefined
  2158. this.resumableAsk = undefined
  2159. this.interactiveAsk = undefined
  2160. // Reset abort and streaming state to ensure clean continuation
  2161. this.abort = false
  2162. this.abandoned = false
  2163. this.abortReason = undefined
  2164. this.didFinishAbortingStream = false
  2165. this.isStreaming = false
  2166. this.isWaitingForFirstChunk = false
  2167. // Ensure next API call includes full context after delegation
  2168. this.skipPrevResponseIdOnce = true
  2169. // Mark as initialized and active
  2170. this.isInitialized = true
  2171. this.emit(RooCodeEventName.TaskActive, this.taskId)
  2172. // Load conversation history if not already loaded
  2173. if (this.apiConversationHistory.length === 0) {
  2174. this.apiConversationHistory = await this.getSavedApiConversationHistory()
  2175. }
  2176. // Add environment details to the existing last user message (which contains the tool_result)
  2177. // This avoids creating a new user message which would cause consecutive user messages
  2178. const environmentDetails = await getEnvironmentDetails(this, true)
  2179. let lastUserMsgIndex = -1
  2180. for (let i = this.apiConversationHistory.length - 1; i >= 0; i--) {
  2181. if (this.apiConversationHistory[i].role === "user") {
  2182. lastUserMsgIndex = i
  2183. break
  2184. }
  2185. }
  2186. if (lastUserMsgIndex >= 0) {
  2187. const lastUserMsg = this.apiConversationHistory[lastUserMsgIndex]
  2188. if (Array.isArray(lastUserMsg.content)) {
  2189. // Remove any existing environment_details blocks before adding fresh ones
  2190. const contentWithoutEnvDetails = lastUserMsg.content.filter(
  2191. (block: Anthropic.Messages.ContentBlockParam) => {
  2192. if (block.type === "text" && typeof block.text === "string") {
  2193. const isEnvironmentDetailsBlock =
  2194. block.text.trim().startsWith("<environment_details>") &&
  2195. block.text.trim().endsWith("</environment_details>")
  2196. return !isEnvironmentDetailsBlock
  2197. }
  2198. return true
  2199. },
  2200. )
  2201. // Add fresh environment details
  2202. lastUserMsg.content = [...contentWithoutEnvDetails, { type: "text" as const, text: environmentDetails }]
  2203. }
  2204. }
  2205. // Save the updated history
  2206. await this.saveApiConversationHistory()
  2207. // Continue task loop - pass empty array to signal no new user content needed
  2208. // The initiateTaskLoop will handle this by skipping user message addition
  2209. await this.initiateTaskLoop([])
  2210. }
  2211. // Task Loop
  2212. private async initiateTaskLoop(userContent: Anthropic.Messages.ContentBlockParam[]): Promise<void> {
  2213. // Kicks off the checkpoints initialization process in the background.
  2214. getCheckpointService(this)
  2215. let nextUserContent = userContent
  2216. let includeFileDetails = true
  2217. this.emit(RooCodeEventName.TaskStarted)
  2218. while (!this.abort) {
  2219. const didEndLoop = await this.recursivelyMakeClineRequests(nextUserContent, includeFileDetails)
  2220. includeFileDetails = false // We only need file details the first time.
  2221. // The way this agentic loop works is that cline will be given a
  2222. // task that he then calls tools to complete. Unless there's an
  2223. // attempt_completion call, we keep responding back to him with his
  2224. // tool's responses until he either attempt_completion or does not
  2225. // use anymore tools. If he does not use anymore tools, we ask him
  2226. // to consider if he's completed the task and then call
  2227. // attempt_completion, otherwise proceed with completing the task.
  2228. // There is a MAX_REQUESTS_PER_TASK limit to prevent infinite
  2229. // requests, but Cline is prompted to finish the task as efficiently
  2230. // as he can.
  2231. if (didEndLoop) {
  2232. // For now a task never 'completes'. This will only happen if
  2233. // the user hits max requests and denies resetting the count.
  2234. break
  2235. } else {
  2236. nextUserContent = [{ type: "text", text: formatResponse.noToolsUsed() }]
  2237. }
  2238. }
  2239. }
  2240. public async recursivelyMakeClineRequests(
  2241. userContent: Anthropic.Messages.ContentBlockParam[],
  2242. includeFileDetails: boolean = false,
  2243. ): Promise<boolean> {
  2244. interface StackItem {
  2245. userContent: Anthropic.Messages.ContentBlockParam[]
  2246. includeFileDetails: boolean
  2247. retryAttempt?: number
  2248. userMessageWasRemoved?: boolean // Track if user message was removed due to empty response
  2249. }
  2250. const stack: StackItem[] = [{ userContent, includeFileDetails, retryAttempt: 0 }]
  2251. while (stack.length > 0) {
  2252. const currentItem = stack.pop()!
  2253. const currentUserContent = currentItem.userContent
  2254. const currentIncludeFileDetails = currentItem.includeFileDetails
  2255. if (this.abort) {
  2256. throw new Error(`[RooCode#recursivelyMakeRooRequests] task ${this.taskId}.${this.instanceId} aborted`)
  2257. }
  2258. if (this.consecutiveMistakeLimit > 0 && this.consecutiveMistakeCount >= this.consecutiveMistakeLimit) {
  2259. // Track consecutive mistake errors in telemetry via event and PostHog exception tracking.
  2260. // The reason is "no_tools_used" because this limit is reached via initiateTaskLoop
  2261. // which increments consecutiveMistakeCount when the model doesn't use any tools.
  2262. TelemetryService.instance.captureConsecutiveMistakeError(this.taskId)
  2263. TelemetryService.instance.captureException(
  2264. new ConsecutiveMistakeError(
  2265. `Task reached consecutive mistake limit (${this.consecutiveMistakeLimit})`,
  2266. this.taskId,
  2267. this.consecutiveMistakeCount,
  2268. this.consecutiveMistakeLimit,
  2269. "no_tools_used",
  2270. this.apiConfiguration.apiProvider,
  2271. getModelId(this.apiConfiguration),
  2272. ),
  2273. )
  2274. const { response, text, images } = await this.ask(
  2275. "mistake_limit_reached",
  2276. t("common:errors.mistake_limit_guidance"),
  2277. )
  2278. if (response === "messageResponse") {
  2279. currentUserContent.push(
  2280. ...[
  2281. { type: "text" as const, text: formatResponse.tooManyMistakes(text) },
  2282. ...formatResponse.imageBlocks(images),
  2283. ],
  2284. )
  2285. await this.say("user_feedback", text, images)
  2286. }
  2287. this.consecutiveMistakeCount = 0
  2288. }
  2289. // Getting verbose details is an expensive operation, it uses ripgrep to
  2290. // top-down build file structure of project which for large projects can
  2291. // take a few seconds. For the best UX we show a placeholder api_req_started
  2292. // message with a loading spinner as this happens.
  2293. // Determine API protocol based on provider and model
  2294. const modelId = getModelId(this.apiConfiguration)
  2295. const apiProvider = this.apiConfiguration.apiProvider
  2296. const apiProtocol = getApiProtocol(
  2297. apiProvider && !isRetiredProvider(apiProvider) ? apiProvider : undefined,
  2298. modelId,
  2299. )
  2300. // Respect user-configured provider rate limiting BEFORE we emit api_req_started.
  2301. // This prevents the UI from showing an "API Request..." spinner while we are
  2302. // intentionally waiting due to the rate limit slider.
  2303. //
  2304. // NOTE: We also set Task.lastGlobalApiRequestTime here to reserve this slot
  2305. // before we build environment details (which can take time).
  2306. // This ensures subsequent requests (including subtasks) still honour the
  2307. // provider rate-limit window.
  2308. await this.maybeWaitForProviderRateLimit(currentItem.retryAttempt ?? 0)
  2309. Task.lastGlobalApiRequestTime = performance.now()
  2310. await this.say(
  2311. "api_req_started",
  2312. JSON.stringify({
  2313. apiProtocol,
  2314. }),
  2315. )
  2316. const {
  2317. showRooIgnoredFiles = false,
  2318. includeDiagnosticMessages = true,
  2319. maxDiagnosticMessages = 50,
  2320. } = (await this.providerRef.deref()?.getState()) ?? {}
  2321. const { content: parsedUserContent, mode: slashCommandMode } = await processUserContentMentions({
  2322. userContent: currentUserContent,
  2323. cwd: this.cwd,
  2324. fileContextTracker: this.fileContextTracker,
  2325. rooIgnoreController: this.rooIgnoreController,
  2326. showRooIgnoredFiles,
  2327. includeDiagnosticMessages,
  2328. maxDiagnosticMessages,
  2329. })
  2330. // Switch mode if specified in a slash command's frontmatter
  2331. if (slashCommandMode) {
  2332. const provider = this.providerRef.deref()
  2333. if (provider) {
  2334. const state = await provider.getState()
  2335. const targetMode = getModeBySlug(slashCommandMode, state?.customModes)
  2336. if (targetMode) {
  2337. await provider.handleModeSwitch(slashCommandMode)
  2338. }
  2339. }
  2340. }
  2341. const environmentDetails = await getEnvironmentDetails(this, currentIncludeFileDetails)
  2342. // Remove any existing environment_details blocks before adding fresh ones.
  2343. // This prevents duplicate environment details when resuming tasks,
  2344. // where the old user message content may already contain environment details from the previous session.
  2345. // We check for both opening and closing tags to ensure we're matching complete environment detail blocks,
  2346. // not just mentions of the tag in regular content.
  2347. const contentWithoutEnvDetails = parsedUserContent.filter((block) => {
  2348. if (block.type === "text" && typeof block.text === "string") {
  2349. // Check if this text block is a complete environment_details block
  2350. // by verifying it starts with the opening tag and ends with the closing tag
  2351. const isEnvironmentDetailsBlock =
  2352. block.text.trim().startsWith("<environment_details>") &&
  2353. block.text.trim().endsWith("</environment_details>")
  2354. return !isEnvironmentDetailsBlock
  2355. }
  2356. return true
  2357. })
  2358. // Add environment details as its own text block, separate from tool
  2359. // results.
  2360. let finalUserContent = [...contentWithoutEnvDetails, { type: "text" as const, text: environmentDetails }]
  2361. // Only add user message to conversation history if:
  2362. // 1. This is the first attempt (retryAttempt === 0), AND
  2363. // 2. The original userContent was not empty (empty signals delegation resume where
  2364. // the user message with tool_result and env details is already in history), OR
  2365. // 3. The message was removed in a previous iteration (userMessageWasRemoved === true)
  2366. // This prevents consecutive user messages while allowing re-add when needed
  2367. const isEmptyUserContent = currentUserContent.length === 0
  2368. const shouldAddUserMessage =
  2369. ((currentItem.retryAttempt ?? 0) === 0 && !isEmptyUserContent) || currentItem.userMessageWasRemoved
  2370. if (shouldAddUserMessage) {
  2371. await this.addToApiConversationHistory({ role: "user", content: finalUserContent })
  2372. TelemetryService.instance.captureConversationMessage(this.taskId, "user")
  2373. }
  2374. // Since we sent off a placeholder api_req_started message to update the
  2375. // webview while waiting to actually start the API request (to load
  2376. // potential details for example), we need to update the text of that
  2377. // message.
  2378. const lastApiReqIndex = findLastIndex(this.clineMessages, (m) => m.say === "api_req_started")
  2379. this.clineMessages[lastApiReqIndex].text = JSON.stringify({
  2380. apiProtocol,
  2381. } satisfies ClineApiReqInfo)
  2382. await this.saveClineMessages()
  2383. await this.providerRef.deref()?.postStateToWebviewWithoutTaskHistory()
  2384. try {
  2385. let cacheWriteTokens = 0
  2386. let cacheReadTokens = 0
  2387. let inputTokens = 0
  2388. let outputTokens = 0
  2389. let totalCost: number | undefined
  2390. // We can't use `api_req_finished` anymore since it's a unique case
  2391. // where it could come after a streaming message (i.e. in the middle
  2392. // of being updated or executed).
  2393. // Fortunately `api_req_finished` was always parsed out for the GUI
  2394. // anyways, so it remains solely for legacy purposes to keep track
  2395. // of prices in tasks from history (it's worth removing a few months
  2396. // from now).
  2397. const updateApiReqMsg = (cancelReason?: ClineApiReqCancelReason, streamingFailedMessage?: string) => {
  2398. if (lastApiReqIndex < 0 || !this.clineMessages[lastApiReqIndex]) {
  2399. return
  2400. }
  2401. const existingData = JSON.parse(this.clineMessages[lastApiReqIndex].text || "{}")
  2402. // Calculate total tokens and cost using provider-aware function
  2403. const modelId = getModelId(this.apiConfiguration)
  2404. const apiProvider = this.apiConfiguration.apiProvider
  2405. const apiProtocol = getApiProtocol(
  2406. apiProvider && !isRetiredProvider(apiProvider) ? apiProvider : undefined,
  2407. modelId,
  2408. )
  2409. const costResult =
  2410. apiProtocol === "anthropic"
  2411. ? calculateApiCostAnthropic(
  2412. streamModelInfo,
  2413. inputTokens,
  2414. outputTokens,
  2415. cacheWriteTokens,
  2416. cacheReadTokens,
  2417. )
  2418. : calculateApiCostOpenAI(
  2419. streamModelInfo,
  2420. inputTokens,
  2421. outputTokens,
  2422. cacheWriteTokens,
  2423. cacheReadTokens,
  2424. )
  2425. this.clineMessages[lastApiReqIndex].text = JSON.stringify({
  2426. ...existingData,
  2427. tokensIn: costResult.totalInputTokens,
  2428. tokensOut: costResult.totalOutputTokens,
  2429. cacheWrites: cacheWriteTokens,
  2430. cacheReads: cacheReadTokens,
  2431. cost: totalCost ?? costResult.totalCost,
  2432. cancelReason,
  2433. streamingFailedMessage,
  2434. } satisfies ClineApiReqInfo)
  2435. }
  2436. const abortStream = async (cancelReason: ClineApiReqCancelReason, streamingFailedMessage?: string) => {
  2437. if (this.diffViewProvider.isEditing) {
  2438. await this.diffViewProvider.revertChanges() // closes diff view
  2439. }
  2440. // if last message is a partial we need to update and save it
  2441. const lastMessage = this.clineMessages.at(-1)
  2442. if (lastMessage && lastMessage.partial) {
  2443. // lastMessage.ts = Date.now() DO NOT update ts since it is used as a key for virtuoso list
  2444. lastMessage.partial = false
  2445. // instead of streaming partialMessage events, we do a save and post like normal to persist to disk
  2446. }
  2447. // Update `api_req_started` to have cancelled and cost, so that
  2448. // we can display the cost of the partial stream and the cancellation reason
  2449. updateApiReqMsg(cancelReason, streamingFailedMessage)
  2450. await this.saveClineMessages()
  2451. // Signals to provider that it can retrieve the saved messages
  2452. // from disk, as abortTask can not be awaited on in nature.
  2453. this.didFinishAbortingStream = true
  2454. }
  2455. // Reset streaming state for each new API request
  2456. this.currentStreamingContentIndex = 0
  2457. this.currentStreamingDidCheckpoint = false
  2458. this.assistantMessageContent = []
  2459. this.didCompleteReadingStream = false
  2460. this.userMessageContent = []
  2461. this.userMessageContentReady = false
  2462. this.didRejectTool = false
  2463. this.didAlreadyUseTool = false
  2464. this.assistantMessageSavedToHistory = false
  2465. // Reset tool failure flag for each new assistant turn - this ensures that tool failures
  2466. // only prevent attempt_completion within the same assistant message, not across turns
  2467. // (e.g., if a tool fails, then user sends a message saying "just complete anyway")
  2468. this.didToolFailInCurrentTurn = false
  2469. this.presentAssistantMessageLocked = false
  2470. this.presentAssistantMessageHasPendingUpdates = false
  2471. // No legacy text-stream tool parser.
  2472. this.streamingToolCallIndices.clear()
  2473. // Clear any leftover streaming tool call state from previous interrupted streams
  2474. NativeToolCallParser.clearAllStreamingToolCalls()
  2475. NativeToolCallParser.clearRawChunkState()
  2476. await this.diffViewProvider.reset()
  2477. // Cache model info once per API request to avoid repeated calls during streaming
  2478. // This is especially important for tools and background usage collection
  2479. this.cachedStreamingModel = this.api.getModel()
  2480. const streamModelInfo = this.cachedStreamingModel.info
  2481. const cachedModelId = this.cachedStreamingModel.id
  2482. // Yields only if the first chunk is successful, otherwise will
  2483. // allow the user to retry the request (most likely due to rate
  2484. // limit error, which gets thrown on the first chunk).
  2485. const stream = this.attemptApiRequest(currentItem.retryAttempt ?? 0, { skipProviderRateLimit: true })
  2486. let assistantMessage = ""
  2487. let reasoningMessage = ""
  2488. let pendingGroundingSources: GroundingSource[] = []
  2489. this.isStreaming = true
  2490. try {
  2491. const iterator = stream[Symbol.asyncIterator]()
  2492. // Helper to race iterator.next() with abort signal
  2493. const nextChunkWithAbort = async () => {
  2494. const nextPromise = iterator.next()
  2495. // If we have an abort controller, race it with the next chunk
  2496. if (this.currentRequestAbortController) {
  2497. const abortPromise = new Promise<never>((_, reject) => {
  2498. const signal = this.currentRequestAbortController!.signal
  2499. if (signal.aborted) {
  2500. reject(new Error("Request cancelled by user"))
  2501. } else {
  2502. signal.addEventListener("abort", () => {
  2503. reject(new Error("Request cancelled by user"))
  2504. })
  2505. }
  2506. })
  2507. return await Promise.race([nextPromise, abortPromise])
  2508. }
  2509. // No abort controller, just return the next chunk normally
  2510. return await nextPromise
  2511. }
  2512. let item = await nextChunkWithAbort()
  2513. while (!item.done) {
  2514. const chunk = item.value
  2515. item = await nextChunkWithAbort()
  2516. if (!chunk) {
  2517. // Sometimes chunk is undefined, no idea that can cause
  2518. // it, but this workaround seems to fix it.
  2519. continue
  2520. }
  2521. switch (chunk.type) {
  2522. case "reasoning": {
  2523. reasoningMessage += chunk.text
  2524. // Only apply formatting if the message contains sentence-ending punctuation followed by **
  2525. let formattedReasoning = reasoningMessage
  2526. if (reasoningMessage.includes("**")) {
  2527. // Add line breaks before **Title** patterns that appear after sentence endings
  2528. // This targets section headers like "...end of sentence.**Title Here**"
  2529. // Handles periods, exclamation marks, and question marks
  2530. formattedReasoning = reasoningMessage.replace(
  2531. /([.!?])\*\*([^*\n]+)\*\*/g,
  2532. "$1\n\n**$2**",
  2533. )
  2534. }
  2535. await this.say("reasoning", formattedReasoning, undefined, true)
  2536. break
  2537. }
  2538. case "usage":
  2539. inputTokens += chunk.inputTokens
  2540. outputTokens += chunk.outputTokens
  2541. cacheWriteTokens += chunk.cacheWriteTokens ?? 0
  2542. cacheReadTokens += chunk.cacheReadTokens ?? 0
  2543. totalCost = chunk.totalCost
  2544. break
  2545. case "grounding":
  2546. // Handle grounding sources separately from regular content
  2547. // to prevent state persistence issues - store them separately
  2548. if (chunk.sources && chunk.sources.length > 0) {
  2549. pendingGroundingSources.push(...chunk.sources)
  2550. }
  2551. break
  2552. case "tool_call_partial": {
  2553. // Process raw tool call chunk through NativeToolCallParser
  2554. // which handles tracking, buffering, and emits events
  2555. const events = NativeToolCallParser.processRawChunk({
  2556. index: chunk.index,
  2557. id: chunk.id,
  2558. name: chunk.name,
  2559. arguments: chunk.arguments,
  2560. })
  2561. for (const event of events) {
  2562. if (event.type === "tool_call_start") {
  2563. // Guard against duplicate tool_call_start events for the same tool ID.
  2564. // This can occur due to stream retry, reconnection, or API quirks.
  2565. // Without this check, duplicate tool_use blocks with the same ID would
  2566. // be added to assistantMessageContent, causing API 400 errors:
  2567. // "tool_use ids must be unique"
  2568. if (this.streamingToolCallIndices.has(event.id)) {
  2569. console.warn(
  2570. `[Task#${this.taskId}] Ignoring duplicate tool_call_start for ID: ${event.id} (tool: ${event.name})`,
  2571. )
  2572. continue
  2573. }
  2574. // Initialize streaming in NativeToolCallParser
  2575. NativeToolCallParser.startStreamingToolCall(event.id, event.name as ToolName)
  2576. // Before adding a new tool, finalize any preceding text block
  2577. // This prevents the text block from blocking tool presentation
  2578. const lastBlock =
  2579. this.assistantMessageContent[this.assistantMessageContent.length - 1]
  2580. if (lastBlock?.type === "text" && lastBlock.partial) {
  2581. lastBlock.partial = false
  2582. }
  2583. // Track the index where this tool will be stored
  2584. const toolUseIndex = this.assistantMessageContent.length
  2585. this.streamingToolCallIndices.set(event.id, toolUseIndex)
  2586. // Create initial partial tool use
  2587. const partialToolUse: ToolUse = {
  2588. type: "tool_use",
  2589. name: event.name as ToolName,
  2590. params: {},
  2591. partial: true,
  2592. }
  2593. // Store the ID for native protocol
  2594. ;(partialToolUse as any).id = event.id
  2595. // Add to content and present
  2596. this.assistantMessageContent.push(partialToolUse)
  2597. this.userMessageContentReady = false
  2598. presentAssistantMessage(this)
  2599. } else if (event.type === "tool_call_delta") {
  2600. // Process chunk using streaming JSON parser
  2601. const partialToolUse = NativeToolCallParser.processStreamingChunk(
  2602. event.id,
  2603. event.delta,
  2604. )
  2605. if (partialToolUse) {
  2606. // Get the index for this tool call
  2607. const toolUseIndex = this.streamingToolCallIndices.get(event.id)
  2608. if (toolUseIndex !== undefined) {
  2609. // Store the ID for native protocol
  2610. ;(partialToolUse as any).id = event.id
  2611. // Update the existing tool use with new partial data
  2612. this.assistantMessageContent[toolUseIndex] = partialToolUse
  2613. // Present updated tool use
  2614. presentAssistantMessage(this)
  2615. }
  2616. }
  2617. } else if (event.type === "tool_call_end") {
  2618. // Finalize the streaming tool call
  2619. const finalToolUse = NativeToolCallParser.finalizeStreamingToolCall(event.id)
  2620. // Get the index for this tool call
  2621. const toolUseIndex = this.streamingToolCallIndices.get(event.id)
  2622. if (finalToolUse) {
  2623. // Store the tool call ID
  2624. ;(finalToolUse as any).id = event.id
  2625. // Get the index and replace partial with final
  2626. if (toolUseIndex !== undefined) {
  2627. this.assistantMessageContent[toolUseIndex] = finalToolUse
  2628. }
  2629. // Clean up tracking
  2630. this.streamingToolCallIndices.delete(event.id)
  2631. // Mark that we have new content to process
  2632. this.userMessageContentReady = false
  2633. // Present the finalized tool call
  2634. presentAssistantMessage(this)
  2635. } else if (toolUseIndex !== undefined) {
  2636. // finalizeStreamingToolCall returned null (malformed JSON or missing args)
  2637. // Mark the tool as non-partial so it's presented as complete, but execution
  2638. // will be short-circuited in presentAssistantMessage with a structured tool_result.
  2639. const existingToolUse = this.assistantMessageContent[toolUseIndex]
  2640. if (existingToolUse && existingToolUse.type === "tool_use") {
  2641. existingToolUse.partial = false
  2642. // Ensure it has the ID for native protocol
  2643. ;(existingToolUse as any).id = event.id
  2644. }
  2645. // Clean up tracking
  2646. this.streamingToolCallIndices.delete(event.id)
  2647. // Mark that we have new content to process
  2648. this.userMessageContentReady = false
  2649. // Present the tool call - validation will handle missing params
  2650. presentAssistantMessage(this)
  2651. }
  2652. }
  2653. }
  2654. break
  2655. }
  2656. case "tool_call": {
  2657. // Legacy: Handle complete tool calls (for backward compatibility)
  2658. // Convert native tool call to ToolUse format
  2659. const toolUse = NativeToolCallParser.parseToolCall({
  2660. id: chunk.id,
  2661. name: chunk.name as ToolName,
  2662. arguments: chunk.arguments,
  2663. })
  2664. if (!toolUse) {
  2665. console.error(`Failed to parse tool call for task ${this.taskId}:`, chunk)
  2666. break
  2667. }
  2668. // Store the tool call ID on the ToolUse object for later reference
  2669. // This is needed to create tool_result blocks that reference the correct tool_use_id
  2670. toolUse.id = chunk.id
  2671. // Add the tool use to assistant message content
  2672. this.assistantMessageContent.push(toolUse)
  2673. // Mark that we have new content to process
  2674. this.userMessageContentReady = false
  2675. // Present the tool call to user - presentAssistantMessage will execute
  2676. // tools sequentially and accumulate all results in userMessageContent
  2677. presentAssistantMessage(this)
  2678. break
  2679. }
  2680. case "text": {
  2681. assistantMessage += chunk.text
  2682. // Native tool calling: text chunks are plain text.
  2683. // Create or update a text content block directly
  2684. const lastBlock = this.assistantMessageContent[this.assistantMessageContent.length - 1]
  2685. if (lastBlock?.type === "text" && lastBlock.partial) {
  2686. lastBlock.content = assistantMessage
  2687. } else {
  2688. this.assistantMessageContent.push({
  2689. type: "text",
  2690. content: assistantMessage,
  2691. partial: true,
  2692. })
  2693. this.userMessageContentReady = false
  2694. }
  2695. presentAssistantMessage(this)
  2696. break
  2697. }
  2698. }
  2699. if (this.abort) {
  2700. console.log(`aborting stream, this.abandoned = ${this.abandoned}`)
  2701. if (!this.abandoned) {
  2702. // Only need to gracefully abort if this instance
  2703. // isn't abandoned (sometimes OpenRouter stream
  2704. // hangs, in which case this would affect future
  2705. // instances of Cline).
  2706. await abortStream("user_cancelled")
  2707. }
  2708. break // Aborts the stream.
  2709. }
  2710. if (this.didRejectTool) {
  2711. // `userContent` has a tool rejection, so interrupt the
  2712. // assistant's response to present the user's feedback.
  2713. assistantMessage += "\n\n[Response interrupted by user feedback]"
  2714. // Instead of setting this preemptively, we allow the
  2715. // present iterator to finish and set
  2716. // userMessageContentReady when its ready.
  2717. // this.userMessageContentReady = true
  2718. break
  2719. }
  2720. if (this.didAlreadyUseTool) {
  2721. assistantMessage +=
  2722. "\n\n[Response interrupted by a tool use result. Only one tool may be used at a time and should be placed at the end of the message.]"
  2723. break
  2724. }
  2725. }
  2726. // Create a copy of current token values to avoid race conditions
  2727. const currentTokens = {
  2728. input: inputTokens,
  2729. output: outputTokens,
  2730. cacheWrite: cacheWriteTokens,
  2731. cacheRead: cacheReadTokens,
  2732. total: totalCost,
  2733. }
  2734. const drainStreamInBackgroundToFindAllUsage = async (apiReqIndex: number) => {
  2735. const timeoutMs = DEFAULT_USAGE_COLLECTION_TIMEOUT_MS
  2736. const startTime = performance.now()
  2737. const modelId = getModelId(this.apiConfiguration)
  2738. // Local variables to accumulate usage data without affecting the main flow
  2739. let bgInputTokens = currentTokens.input
  2740. let bgOutputTokens = currentTokens.output
  2741. let bgCacheWriteTokens = currentTokens.cacheWrite
  2742. let bgCacheReadTokens = currentTokens.cacheRead
  2743. let bgTotalCost = currentTokens.total
  2744. // Helper function to capture telemetry and update messages
  2745. const captureUsageData = async (
  2746. tokens: {
  2747. input: number
  2748. output: number
  2749. cacheWrite: number
  2750. cacheRead: number
  2751. total?: number
  2752. },
  2753. messageIndex: number = apiReqIndex,
  2754. ) => {
  2755. if (
  2756. tokens.input > 0 ||
  2757. tokens.output > 0 ||
  2758. tokens.cacheWrite > 0 ||
  2759. tokens.cacheRead > 0
  2760. ) {
  2761. // Update the shared variables atomically
  2762. inputTokens = tokens.input
  2763. outputTokens = tokens.output
  2764. cacheWriteTokens = tokens.cacheWrite
  2765. cacheReadTokens = tokens.cacheRead
  2766. totalCost = tokens.total
  2767. // Update the API request message with the latest usage data
  2768. updateApiReqMsg()
  2769. await this.saveClineMessages()
  2770. // Update the specific message in the webview
  2771. const apiReqMessage = this.clineMessages[messageIndex]
  2772. if (apiReqMessage) {
  2773. await this.updateClineMessage(apiReqMessage)
  2774. }
  2775. // Capture telemetry with provider-aware cost calculation
  2776. const modelId = getModelId(this.apiConfiguration)
  2777. const apiProvider = this.apiConfiguration.apiProvider
  2778. const apiProtocol = getApiProtocol(
  2779. apiProvider && !isRetiredProvider(apiProvider) ? apiProvider : undefined,
  2780. modelId,
  2781. )
  2782. // Use the appropriate cost function based on the API protocol
  2783. const costResult =
  2784. apiProtocol === "anthropic"
  2785. ? calculateApiCostAnthropic(
  2786. streamModelInfo,
  2787. tokens.input,
  2788. tokens.output,
  2789. tokens.cacheWrite,
  2790. tokens.cacheRead,
  2791. )
  2792. : calculateApiCostOpenAI(
  2793. streamModelInfo,
  2794. tokens.input,
  2795. tokens.output,
  2796. tokens.cacheWrite,
  2797. tokens.cacheRead,
  2798. )
  2799. TelemetryService.instance.captureLlmCompletion(this.taskId, {
  2800. inputTokens: costResult.totalInputTokens,
  2801. outputTokens: costResult.totalOutputTokens,
  2802. cacheWriteTokens: tokens.cacheWrite,
  2803. cacheReadTokens: tokens.cacheRead,
  2804. cost: tokens.total ?? costResult.totalCost,
  2805. })
  2806. }
  2807. }
  2808. try {
  2809. // Continue processing the original stream from where the main loop left off
  2810. let usageFound = false
  2811. let chunkCount = 0
  2812. // Use the same iterator that the main loop was using
  2813. while (!item.done) {
  2814. // Check for timeout
  2815. if (performance.now() - startTime > timeoutMs) {
  2816. console.warn(
  2817. `[Background Usage Collection] Timed out after ${timeoutMs}ms for model: ${modelId}, processed ${chunkCount} chunks`,
  2818. )
  2819. // Clean up the iterator before breaking
  2820. if (iterator.return) {
  2821. await iterator.return(undefined)
  2822. }
  2823. break
  2824. }
  2825. const chunk = item.value
  2826. item = await iterator.next()
  2827. chunkCount++
  2828. if (chunk && chunk.type === "usage") {
  2829. usageFound = true
  2830. bgInputTokens += chunk.inputTokens
  2831. bgOutputTokens += chunk.outputTokens
  2832. bgCacheWriteTokens += chunk.cacheWriteTokens ?? 0
  2833. bgCacheReadTokens += chunk.cacheReadTokens ?? 0
  2834. bgTotalCost = chunk.totalCost
  2835. }
  2836. }
  2837. if (
  2838. usageFound ||
  2839. bgInputTokens > 0 ||
  2840. bgOutputTokens > 0 ||
  2841. bgCacheWriteTokens > 0 ||
  2842. bgCacheReadTokens > 0
  2843. ) {
  2844. // We have usage data either from a usage chunk or accumulated tokens
  2845. await captureUsageData(
  2846. {
  2847. input: bgInputTokens,
  2848. output: bgOutputTokens,
  2849. cacheWrite: bgCacheWriteTokens,
  2850. cacheRead: bgCacheReadTokens,
  2851. total: bgTotalCost,
  2852. },
  2853. lastApiReqIndex,
  2854. )
  2855. } else {
  2856. console.warn(
  2857. `[Background Usage Collection] Suspicious: request ${apiReqIndex} is complete, but no usage info was found. Model: ${modelId}`,
  2858. )
  2859. }
  2860. } catch (error) {
  2861. console.error("Error draining stream for usage data:", error)
  2862. // Still try to capture whatever usage data we have collected so far
  2863. if (
  2864. bgInputTokens > 0 ||
  2865. bgOutputTokens > 0 ||
  2866. bgCacheWriteTokens > 0 ||
  2867. bgCacheReadTokens > 0
  2868. ) {
  2869. await captureUsageData(
  2870. {
  2871. input: bgInputTokens,
  2872. output: bgOutputTokens,
  2873. cacheWrite: bgCacheWriteTokens,
  2874. cacheRead: bgCacheReadTokens,
  2875. total: bgTotalCost,
  2876. },
  2877. lastApiReqIndex,
  2878. )
  2879. }
  2880. }
  2881. }
  2882. // Start the background task and handle any errors
  2883. drainStreamInBackgroundToFindAllUsage(lastApiReqIndex).catch((error) => {
  2884. console.error("Background usage collection failed:", error)
  2885. })
  2886. } catch (error) {
  2887. // Abandoned happens when extension is no longer waiting for the
  2888. // Cline instance to finish aborting (error is thrown here when
  2889. // any function in the for loop throws due to this.abort).
  2890. if (!this.abandoned) {
  2891. // Determine cancellation reason
  2892. const cancelReason: ClineApiReqCancelReason = this.abort ? "user_cancelled" : "streaming_failed"
  2893. const rawErrorMessage = error.message ?? JSON.stringify(serializeError(error), null, 2)
  2894. const streamingFailedMessage = this.abort
  2895. ? undefined
  2896. : `${t("common:interruption.streamTerminatedByProvider")}: ${rawErrorMessage}`
  2897. // Clean up partial state
  2898. await abortStream(cancelReason, streamingFailedMessage)
  2899. if (this.abort) {
  2900. // User cancelled - abort the entire task
  2901. this.abortReason = cancelReason
  2902. await this.abortTask()
  2903. } else {
  2904. // Stream failed - log the error and retry with the same content
  2905. // The existing rate limiting will prevent rapid retries
  2906. console.error(
  2907. `[Task#${this.taskId}.${this.instanceId}] Stream failed, will retry: ${streamingFailedMessage}`,
  2908. )
  2909. // Apply exponential backoff similar to first-chunk errors when auto-resubmit is enabled
  2910. const stateForBackoff = await this.providerRef.deref()?.getState()
  2911. if (stateForBackoff?.autoApprovalEnabled) {
  2912. await this.backoffAndAnnounce(currentItem.retryAttempt ?? 0, error)
  2913. // Check if task was aborted during the backoff
  2914. if (this.abort) {
  2915. console.log(
  2916. `[Task#${this.taskId}.${this.instanceId}] Task aborted during mid-stream retry backoff`,
  2917. )
  2918. // Abort the entire task
  2919. this.abortReason = "user_cancelled"
  2920. await this.abortTask()
  2921. break
  2922. }
  2923. }
  2924. // Push the same content back onto the stack to retry, incrementing the retry attempt counter
  2925. stack.push({
  2926. userContent: currentUserContent,
  2927. includeFileDetails: false,
  2928. retryAttempt: (currentItem.retryAttempt ?? 0) + 1,
  2929. })
  2930. // Continue to retry the request
  2931. continue
  2932. }
  2933. }
  2934. } finally {
  2935. this.isStreaming = false
  2936. // Clean up the abort controller when streaming completes
  2937. this.currentRequestAbortController = undefined
  2938. }
  2939. // Need to call here in case the stream was aborted.
  2940. if (this.abort || this.abandoned) {
  2941. throw new Error(
  2942. `[RooCode#recursivelyMakeRooRequests] task ${this.taskId}.${this.instanceId} aborted`,
  2943. )
  2944. }
  2945. this.didCompleteReadingStream = true
  2946. // Set any blocks to be complete to allow `presentAssistantMessage`
  2947. // to finish and set `userMessageContentReady` to true.
  2948. // (Could be a text block that had no subsequent tool uses, or a
  2949. // text block at the very end, or an invalid tool use, etc. Whatever
  2950. // the case, `presentAssistantMessage` relies on these blocks either
  2951. // to be completed or the user to reject a block in order to proceed
  2952. // and eventually set userMessageContentReady to true.)
  2953. // Finalize any remaining streaming tool calls that weren't explicitly ended
  2954. // This is critical for MCP tools which need tool_call_end events to be properly
  2955. // converted from ToolUse to McpToolUse via finalizeStreamingToolCall()
  2956. const finalizeEvents = NativeToolCallParser.finalizeRawChunks()
  2957. for (const event of finalizeEvents) {
  2958. if (event.type === "tool_call_end") {
  2959. // Finalize the streaming tool call
  2960. const finalToolUse = NativeToolCallParser.finalizeStreamingToolCall(event.id)
  2961. // Get the index for this tool call
  2962. const toolUseIndex = this.streamingToolCallIndices.get(event.id)
  2963. if (finalToolUse) {
  2964. // Store the tool call ID
  2965. ;(finalToolUse as any).id = event.id
  2966. // Get the index and replace partial with final
  2967. if (toolUseIndex !== undefined) {
  2968. this.assistantMessageContent[toolUseIndex] = finalToolUse
  2969. }
  2970. // Clean up tracking
  2971. this.streamingToolCallIndices.delete(event.id)
  2972. // Mark that we have new content to process
  2973. this.userMessageContentReady = false
  2974. // Present the finalized tool call
  2975. presentAssistantMessage(this)
  2976. } else if (toolUseIndex !== undefined) {
  2977. // finalizeStreamingToolCall returned null (malformed JSON or missing args)
  2978. // We still need to mark the tool as non-partial so it gets executed
  2979. // The tool's validation will catch any missing required parameters
  2980. const existingToolUse = this.assistantMessageContent[toolUseIndex]
  2981. if (existingToolUse && existingToolUse.type === "tool_use") {
  2982. existingToolUse.partial = false
  2983. // Ensure it has the ID for native protocol
  2984. ;(existingToolUse as any).id = event.id
  2985. }
  2986. // Clean up tracking
  2987. this.streamingToolCallIndices.delete(event.id)
  2988. // Mark that we have new content to process
  2989. this.userMessageContentReady = false
  2990. // Present the tool call - validation will handle missing params
  2991. presentAssistantMessage(this)
  2992. }
  2993. }
  2994. }
  2995. // IMPORTANT: Capture partialBlocks AFTER finalizeRawChunks() to avoid double-presentation.
  2996. // Tools finalized above are already presented, so we only want blocks still partial after finalization.
  2997. const partialBlocks = this.assistantMessageContent.filter((block) => block.partial)
  2998. partialBlocks.forEach((block) => (block.partial = false))
  2999. // Can't just do this b/c a tool could be in the middle of executing.
  3000. // this.assistantMessageContent.forEach((e) => (e.partial = false))
  3001. // No legacy streaming parser to finalize.
  3002. // Note: updateApiReqMsg() is now called from within drainStreamInBackgroundToFindAllUsage
  3003. // to ensure usage data is captured even when the stream is interrupted. The background task
  3004. // uses local variables to accumulate usage data before atomically updating the shared state.
  3005. // Complete the reasoning message if it exists
  3006. // We can't use say() here because the reasoning message may not be the last message
  3007. // (other messages like text blocks or tool uses may have been added after it during streaming)
  3008. if (reasoningMessage) {
  3009. const lastReasoningIndex = findLastIndex(
  3010. this.clineMessages,
  3011. (m) => m.type === "say" && m.say === "reasoning",
  3012. )
  3013. if (lastReasoningIndex !== -1 && this.clineMessages[lastReasoningIndex].partial) {
  3014. this.clineMessages[lastReasoningIndex].partial = false
  3015. await this.updateClineMessage(this.clineMessages[lastReasoningIndex])
  3016. }
  3017. }
  3018. await this.saveClineMessages()
  3019. await this.providerRef.deref()?.postStateToWebviewWithoutTaskHistory()
  3020. // No legacy text-stream tool parser state to reset.
  3021. // CRITICAL: Save assistant message to API history BEFORE executing tools.
  3022. // This ensures that when new_task triggers delegation and calls flushPendingToolResultsToHistory(),
  3023. // the assistant message is already in history. Otherwise, tool_result blocks would appear
  3024. // BEFORE their corresponding tool_use blocks, causing API errors.
  3025. // Check if we have any content to process (text or tool uses)
  3026. const hasTextContent = assistantMessage.length > 0
  3027. const hasToolUses = this.assistantMessageContent.some(
  3028. (block) => block.type === "tool_use" || block.type === "mcp_tool_use",
  3029. )
  3030. if (hasTextContent || hasToolUses) {
  3031. // Reset counter when we get a successful response with content
  3032. this.consecutiveNoAssistantMessagesCount = 0
  3033. // Display grounding sources to the user if they exist
  3034. if (pendingGroundingSources.length > 0) {
  3035. const citationLinks = pendingGroundingSources.map((source, i) => `[${i + 1}](${source.url})`)
  3036. const sourcesText = `${t("common:gemini.sources")} ${citationLinks.join(", ")}`
  3037. await this.say("text", sourcesText, undefined, false, undefined, undefined, {
  3038. isNonInteractive: true,
  3039. })
  3040. }
  3041. // Build the assistant message content array
  3042. const assistantContent: Array<Anthropic.TextBlockParam | Anthropic.ToolUseBlockParam> = []
  3043. // Add text content if present
  3044. if (assistantMessage) {
  3045. assistantContent.push({
  3046. type: "text" as const,
  3047. text: assistantMessage,
  3048. })
  3049. }
  3050. // Add tool_use blocks with their IDs for native protocol
  3051. // This handles both regular ToolUse and McpToolUse types
  3052. // IMPORTANT: Track seen IDs to prevent duplicates in the API request.
  3053. // Duplicate tool_use IDs cause Anthropic API 400 errors:
  3054. // "tool_use ids must be unique"
  3055. const seenToolUseIds = new Set<string>()
  3056. const toolUseBlocks = this.assistantMessageContent.filter(
  3057. (block) => block.type === "tool_use" || block.type === "mcp_tool_use",
  3058. )
  3059. for (const block of toolUseBlocks) {
  3060. if (block.type === "mcp_tool_use") {
  3061. // McpToolUse already has the original tool name (e.g., "mcp_serverName_toolName")
  3062. // The arguments are the raw tool arguments (matching the simplified schema)
  3063. const mcpBlock = block as import("../../shared/tools").McpToolUse
  3064. if (mcpBlock.id) {
  3065. const sanitizedId = sanitizeToolUseId(mcpBlock.id)
  3066. // Pre-flight deduplication: Skip if we've already added this ID
  3067. if (seenToolUseIds.has(sanitizedId)) {
  3068. console.warn(
  3069. `[Task#${this.taskId}] Pre-flight deduplication: Skipping duplicate MCP tool_use ID: ${sanitizedId} (tool: ${mcpBlock.name})`,
  3070. )
  3071. continue
  3072. }
  3073. seenToolUseIds.add(sanitizedId)
  3074. assistantContent.push({
  3075. type: "tool_use" as const,
  3076. id: sanitizedId,
  3077. name: mcpBlock.name, // Original dynamic name
  3078. input: mcpBlock.arguments, // Direct tool arguments
  3079. })
  3080. }
  3081. } else {
  3082. // Regular ToolUse
  3083. const toolUse = block as import("../../shared/tools").ToolUse
  3084. const toolCallId = toolUse.id
  3085. if (toolCallId) {
  3086. const sanitizedId = sanitizeToolUseId(toolCallId)
  3087. // Pre-flight deduplication: Skip if we've already added this ID
  3088. if (seenToolUseIds.has(sanitizedId)) {
  3089. console.warn(
  3090. `[Task#${this.taskId}] Pre-flight deduplication: Skipping duplicate tool_use ID: ${sanitizedId} (tool: ${toolUse.name})`,
  3091. )
  3092. continue
  3093. }
  3094. seenToolUseIds.add(sanitizedId)
  3095. // nativeArgs is already in the correct API format for all tools
  3096. const input = toolUse.nativeArgs || toolUse.params
  3097. // Use originalName (alias) if present for API history consistency.
  3098. // When tool aliases are used (e.g., "edit_file" -> "search_and_replace" -> "edit" (current canonical name)),
  3099. // we want the alias name in the conversation history to match what the model
  3100. // was told the tool was named, preventing confusion in multi-turn conversations.
  3101. const toolNameForHistory = toolUse.originalName ?? toolUse.name
  3102. assistantContent.push({
  3103. type: "tool_use" as const,
  3104. id: sanitizedId,
  3105. name: toolNameForHistory,
  3106. input,
  3107. })
  3108. }
  3109. }
  3110. }
  3111. // Enforce new_task isolation: if new_task is called alongside other tools,
  3112. // truncate any tools that come after it and inject error tool_results.
  3113. // This prevents orphaned tools when delegation disposes the parent task.
  3114. const newTaskIndex = assistantContent.findIndex(
  3115. (block) => block.type === "tool_use" && block.name === "new_task",
  3116. )
  3117. if (newTaskIndex !== -1 && newTaskIndex < assistantContent.length - 1) {
  3118. // new_task found but not last - truncate subsequent tools
  3119. const truncatedTools = assistantContent.slice(newTaskIndex + 1)
  3120. assistantContent.length = newTaskIndex + 1 // Truncate API history array
  3121. // ALSO truncate the execution array (assistantMessageContent) to prevent
  3122. // tools after new_task from being executed by presentAssistantMessage().
  3123. // Find new_task index in assistantMessageContent (may differ from assistantContent
  3124. // due to text blocks being structured differently).
  3125. const executionNewTaskIndex = this.assistantMessageContent.findIndex(
  3126. (block) => block.type === "tool_use" && block.name === "new_task",
  3127. )
  3128. if (executionNewTaskIndex !== -1) {
  3129. this.assistantMessageContent.length = executionNewTaskIndex + 1
  3130. }
  3131. // Pre-inject error tool_results for truncated tools
  3132. for (const tool of truncatedTools) {
  3133. if (tool.type === "tool_use" && (tool as Anthropic.ToolUseBlockParam).id) {
  3134. this.pushToolResultToUserContent({
  3135. type: "tool_result",
  3136. tool_use_id: (tool as Anthropic.ToolUseBlockParam).id,
  3137. content:
  3138. "This tool was not executed because new_task was called in the same message turn. The new_task tool must be the last tool in a message.",
  3139. is_error: true,
  3140. })
  3141. }
  3142. }
  3143. }
  3144. // Save assistant message BEFORE executing tools
  3145. // This is critical for new_task: when it triggers delegation, flushPendingToolResultsToHistory()
  3146. // will save the user message with tool_results. The assistant message must already be in history
  3147. // so that tool_result blocks appear AFTER their corresponding tool_use blocks.
  3148. await this.addToApiConversationHistory(
  3149. { role: "assistant", content: assistantContent },
  3150. reasoningMessage || undefined,
  3151. )
  3152. this.assistantMessageSavedToHistory = true
  3153. TelemetryService.instance.captureConversationMessage(this.taskId, "assistant")
  3154. }
  3155. // Present any partial blocks that were just completed.
  3156. // Tool calls are typically presented during streaming via tool_call_partial events,
  3157. // but we still present here if any partial blocks remain (e.g., malformed streams).
  3158. // NOTE: This MUST happen AFTER saving the assistant message to API history.
  3159. // When new_task is in the batch, it triggers delegation which calls flushPendingToolResultsToHistory().
  3160. // If the assistant message isn't saved yet, tool_results would appear before tool_use blocks.
  3161. if (partialBlocks.length > 0) {
  3162. // If there is content to update then it will complete and
  3163. // update `this.userMessageContentReady` to true, which we
  3164. // `pWaitFor` before making the next request.
  3165. presentAssistantMessage(this)
  3166. }
  3167. if (hasTextContent || hasToolUses) {
  3168. // NOTE: This comment is here for future reference - this was a
  3169. // workaround for `userMessageContent` not getting set to true.
  3170. // It was due to it not recursively calling for partial blocks
  3171. // when `didRejectTool`, so it would get stuck waiting for a
  3172. // partial block to complete before it could continue.
  3173. // In case the content blocks finished it may be the api stream
  3174. // finished after the last parsed content block was executed, so
  3175. // we are able to detect out of bounds and set
  3176. // `userMessageContentReady` to true (note you should not call
  3177. // `presentAssistantMessage` since if the last block i
  3178. // completed it will be presented again).
  3179. // const completeBlocks = this.assistantMessageContent.filter((block) => !block.partial) // If there are any partial blocks after the stream ended we can consider them invalid.
  3180. // if (this.currentStreamingContentIndex >= completeBlocks.length) {
  3181. // this.userMessageContentReady = true
  3182. // }
  3183. await pWaitFor(() => this.userMessageContentReady)
  3184. // If the model did not tool use, then we need to tell it to
  3185. // either use a tool or attempt_completion.
  3186. const didToolUse = this.assistantMessageContent.some(
  3187. (block) => block.type === "tool_use" || block.type === "mcp_tool_use",
  3188. )
  3189. if (!didToolUse) {
  3190. // Increment consecutive no-tool-use counter
  3191. this.consecutiveNoToolUseCount++
  3192. // Only show error and count toward mistake limit after 2 consecutive failures
  3193. if (this.consecutiveNoToolUseCount >= 2) {
  3194. await this.say("error", "MODEL_NO_TOOLS_USED")
  3195. // Only count toward mistake limit after second consecutive failure
  3196. this.consecutiveMistakeCount++
  3197. }
  3198. // Use the task's locked protocol for consistent behavior
  3199. this.userMessageContent.push({
  3200. type: "text",
  3201. text: formatResponse.noToolsUsed(),
  3202. })
  3203. } else {
  3204. // Reset counter when tools are used successfully
  3205. this.consecutiveNoToolUseCount = 0
  3206. }
  3207. // Push to stack if there's content OR if we're paused waiting for a subtask.
  3208. // When paused, we push an empty item so the loop continues to the pause check.
  3209. if (this.userMessageContent.length > 0 || this.isPaused) {
  3210. stack.push({
  3211. userContent: [...this.userMessageContent], // Create a copy to avoid mutation issues
  3212. includeFileDetails: false, // Subsequent iterations don't need file details
  3213. })
  3214. // Add periodic yielding to prevent blocking
  3215. await new Promise((resolve) => setImmediate(resolve))
  3216. }
  3217. continue
  3218. } else {
  3219. // If there's no assistant_responses, that means we got no text
  3220. // or tool_use content blocks from API which we should assume is
  3221. // an error.
  3222. // Increment consecutive no-assistant-messages counter
  3223. this.consecutiveNoAssistantMessagesCount++
  3224. // Only show error and count toward mistake limit after 2 consecutive failures
  3225. // This provides a "grace retry" - first failure retries silently
  3226. if (this.consecutiveNoAssistantMessagesCount >= 2) {
  3227. await this.say("error", "MODEL_NO_ASSISTANT_MESSAGES")
  3228. }
  3229. // IMPORTANT: We already added the user message to
  3230. // apiConversationHistory at line 1876. Since the assistant failed to respond,
  3231. // we need to remove that message before retrying to avoid having two consecutive
  3232. // user messages (which would cause tool_result validation errors).
  3233. let state = await this.providerRef.deref()?.getState()
  3234. if (this.apiConversationHistory.length > 0) {
  3235. const lastMessage = this.apiConversationHistory[this.apiConversationHistory.length - 1]
  3236. if (lastMessage.role === "user") {
  3237. // Remove the last user message that we added earlier
  3238. this.apiConversationHistory.pop()
  3239. }
  3240. }
  3241. // Check if we should auto-retry or prompt the user
  3242. // Reuse the state variable from above
  3243. if (state?.autoApprovalEnabled) {
  3244. // Auto-retry with backoff - don't persist failure message when retrying
  3245. await this.backoffAndAnnounce(
  3246. currentItem.retryAttempt ?? 0,
  3247. new Error(
  3248. "Unexpected API Response: The language model did not provide any assistant messages. This may indicate an issue with the API or the model's output.",
  3249. ),
  3250. )
  3251. // Check if task was aborted during the backoff
  3252. if (this.abort) {
  3253. console.log(
  3254. `[Task#${this.taskId}.${this.instanceId}] Task aborted during empty-assistant retry backoff`,
  3255. )
  3256. break
  3257. }
  3258. // Push the same content back onto the stack to retry, incrementing the retry attempt counter
  3259. // Mark that user message was removed so it gets re-added on retry
  3260. stack.push({
  3261. userContent: currentUserContent,
  3262. includeFileDetails: false,
  3263. retryAttempt: (currentItem.retryAttempt ?? 0) + 1,
  3264. userMessageWasRemoved: true,
  3265. })
  3266. // Continue to retry the request
  3267. continue
  3268. } else {
  3269. // Prompt the user for retry decision
  3270. const { response } = await this.ask(
  3271. "api_req_failed",
  3272. "The model returned no assistant messages. This may indicate an issue with the API or the model's output.",
  3273. )
  3274. if (response === "yesButtonClicked") {
  3275. await this.say("api_req_retried")
  3276. // Push the same content back to retry
  3277. stack.push({
  3278. userContent: currentUserContent,
  3279. includeFileDetails: false,
  3280. retryAttempt: (currentItem.retryAttempt ?? 0) + 1,
  3281. })
  3282. // Continue to retry the request
  3283. continue
  3284. } else {
  3285. // User declined to retry
  3286. // Re-add the user message we removed.
  3287. await this.addToApiConversationHistory({
  3288. role: "user",
  3289. content: currentUserContent,
  3290. })
  3291. await this.say(
  3292. "error",
  3293. "Unexpected API Response: The language model did not provide any assistant messages. This may indicate an issue with the API or the model's output.",
  3294. )
  3295. await this.addToApiConversationHistory({
  3296. role: "assistant",
  3297. content: [{ type: "text", text: "Failure: I did not provide a response." }],
  3298. })
  3299. }
  3300. }
  3301. }
  3302. // If we reach here without continuing, return false (will always be false for now)
  3303. return false
  3304. } catch (error) {
  3305. // This should never happen since the only thing that can throw an
  3306. // error is the attemptApiRequest, which is wrapped in a try catch
  3307. // that sends an ask where if noButtonClicked, will clear current
  3308. // task and destroy this instance. However to avoid unhandled
  3309. // promise rejection, we will end this loop which will end execution
  3310. // of this instance (see `startTask`).
  3311. return true // Needs to be true so parent loop knows to end task.
  3312. }
  3313. }
  3314. // If we exit the while loop normally (stack is empty), return false
  3315. return false
  3316. }
  3317. private async getSystemPrompt(): Promise<string> {
  3318. const { mcpEnabled } = (await this.providerRef.deref()?.getState()) ?? {}
  3319. let mcpHub: McpHub | undefined
  3320. if (mcpEnabled ?? true) {
  3321. const provider = this.providerRef.deref()
  3322. if (!provider) {
  3323. throw new Error("Provider reference lost during view transition")
  3324. }
  3325. // Wait for MCP hub initialization through McpServerManager
  3326. mcpHub = await McpServerManager.getInstance(provider.context, provider)
  3327. if (!mcpHub) {
  3328. throw new Error("Failed to get MCP hub from server manager")
  3329. }
  3330. // Wait for MCP servers to be connected before generating system prompt
  3331. await pWaitFor(() => !mcpHub!.isConnecting, { timeout: 10_000 }).catch(() => {
  3332. console.error("MCP servers failed to connect in time")
  3333. })
  3334. }
  3335. const rooIgnoreInstructions = this.rooIgnoreController?.getInstructions()
  3336. const state = await this.providerRef.deref()?.getState()
  3337. const {
  3338. mode,
  3339. customModes,
  3340. customModePrompts,
  3341. customInstructions,
  3342. experiments,
  3343. language,
  3344. apiConfiguration,
  3345. enableSubfolderRules,
  3346. } = state ?? {}
  3347. return await (async () => {
  3348. const provider = this.providerRef.deref()
  3349. if (!provider) {
  3350. throw new Error("Provider not available")
  3351. }
  3352. const modelInfo = this.api.getModel().info
  3353. return SYSTEM_PROMPT(
  3354. provider.context,
  3355. this.cwd,
  3356. false,
  3357. mcpHub,
  3358. this.diffStrategy,
  3359. mode ?? defaultModeSlug,
  3360. customModePrompts,
  3361. customModes,
  3362. customInstructions,
  3363. experiments,
  3364. language,
  3365. rooIgnoreInstructions,
  3366. {
  3367. todoListEnabled: apiConfiguration?.todoListEnabled ?? true,
  3368. useAgentRules:
  3369. vscode.workspace.getConfiguration(Package.name).get<boolean>("useAgentRules") ?? true,
  3370. enableSubfolderRules: enableSubfolderRules ?? false,
  3371. newTaskRequireTodos: vscode.workspace
  3372. .getConfiguration(Package.name)
  3373. .get<boolean>("newTaskRequireTodos", false),
  3374. isStealthModel: modelInfo?.isStealthModel,
  3375. },
  3376. undefined, // todoList
  3377. this.api.getModel().id,
  3378. provider.getSkillsManager(),
  3379. )
  3380. })()
  3381. }
  3382. private getCurrentProfileId(state: any): string {
  3383. return (
  3384. state?.listApiConfigMeta?.find((profile: any) => profile.name === state?.currentApiConfigName)?.id ??
  3385. "default"
  3386. )
  3387. }
  3388. private async handleContextWindowExceededError(): Promise<void> {
  3389. const state = await this.providerRef.deref()?.getState()
  3390. const { profileThresholds = {}, mode, apiConfiguration } = state ?? {}
  3391. const { contextTokens } = this.getTokenUsage()
  3392. const modelInfo = this.api.getModel().info
  3393. const maxTokens = getModelMaxOutputTokens({
  3394. modelId: this.api.getModel().id,
  3395. model: modelInfo,
  3396. settings: this.apiConfiguration,
  3397. })
  3398. const contextWindow = modelInfo.contextWindow
  3399. // Get the current profile ID using the helper method
  3400. const currentProfileId = this.getCurrentProfileId(state)
  3401. // Log the context window error for debugging
  3402. console.warn(
  3403. `[Task#${this.taskId}] Context window exceeded for model ${this.api.getModel().id}. ` +
  3404. `Current tokens: ${contextTokens}, Context window: ${contextWindow}. ` +
  3405. `Forcing truncation to ${FORCED_CONTEXT_REDUCTION_PERCENT}% of current context.`,
  3406. )
  3407. // Send condenseTaskContextStarted to show in-progress indicator
  3408. await this.providerRef.deref()?.postMessageToWebview({ type: "condenseTaskContextStarted", text: this.taskId })
  3409. // Build tools for condensing metadata (same tools used for normal API calls)
  3410. const provider = this.providerRef.deref()
  3411. let allTools: import("openai").default.Chat.ChatCompletionTool[] = []
  3412. if (provider) {
  3413. const toolsResult = await buildNativeToolsArrayWithRestrictions({
  3414. provider,
  3415. cwd: this.cwd,
  3416. mode,
  3417. customModes: state?.customModes,
  3418. experiments: state?.experiments,
  3419. apiConfiguration,
  3420. disabledTools: state?.disabledTools,
  3421. modelInfo,
  3422. includeAllToolsWithRestrictions: false,
  3423. })
  3424. allTools = toolsResult.tools
  3425. }
  3426. // Build metadata with tools and taskId for the condensing API call
  3427. const metadata: ApiHandlerCreateMessageMetadata = {
  3428. mode,
  3429. taskId: this.taskId,
  3430. ...(allTools.length > 0
  3431. ? {
  3432. tools: allTools,
  3433. tool_choice: "auto",
  3434. parallelToolCalls: true,
  3435. }
  3436. : {}),
  3437. }
  3438. try {
  3439. // Generate environment details to include in the condensed summary
  3440. const environmentDetails = await getEnvironmentDetails(this, true)
  3441. // Force aggressive truncation by keeping only 75% of the conversation history
  3442. const truncateResult = await manageContext({
  3443. messages: this.apiConversationHistory,
  3444. totalTokens: contextTokens || 0,
  3445. maxTokens,
  3446. contextWindow,
  3447. apiHandler: this.api,
  3448. autoCondenseContext: true,
  3449. autoCondenseContextPercent: FORCED_CONTEXT_REDUCTION_PERCENT,
  3450. systemPrompt: await this.getSystemPrompt(),
  3451. taskId: this.taskId,
  3452. profileThresholds,
  3453. currentProfileId,
  3454. metadata,
  3455. environmentDetails,
  3456. })
  3457. if (truncateResult.messages !== this.apiConversationHistory) {
  3458. await this.overwriteApiConversationHistory(truncateResult.messages)
  3459. }
  3460. if (truncateResult.summary) {
  3461. const { summary, cost, prevContextTokens, newContextTokens = 0 } = truncateResult
  3462. const contextCondense: ContextCondense = { summary, cost, newContextTokens, prevContextTokens }
  3463. await this.say(
  3464. "condense_context",
  3465. undefined /* text */,
  3466. undefined /* images */,
  3467. false /* partial */,
  3468. undefined /* checkpoint */,
  3469. undefined /* progressStatus */,
  3470. { isNonInteractive: true } /* options */,
  3471. contextCondense,
  3472. )
  3473. } else if (truncateResult.truncationId) {
  3474. // Sliding window truncation occurred (fallback when condensing fails or is disabled)
  3475. const contextTruncation: ContextTruncation = {
  3476. truncationId: truncateResult.truncationId,
  3477. messagesRemoved: truncateResult.messagesRemoved ?? 0,
  3478. prevContextTokens: truncateResult.prevContextTokens,
  3479. newContextTokens: truncateResult.newContextTokensAfterTruncation ?? 0,
  3480. }
  3481. await this.say(
  3482. "sliding_window_truncation",
  3483. undefined /* text */,
  3484. undefined /* images */,
  3485. false /* partial */,
  3486. undefined /* checkpoint */,
  3487. undefined /* progressStatus */,
  3488. { isNonInteractive: true } /* options */,
  3489. undefined /* contextCondense */,
  3490. contextTruncation,
  3491. )
  3492. }
  3493. } finally {
  3494. // Notify webview that context management is complete (removes in-progress spinner)
  3495. // IMPORTANT: Must always be sent to dismiss the spinner, even on error
  3496. await this.providerRef
  3497. .deref()
  3498. ?.postMessageToWebview({ type: "condenseTaskContextResponse", text: this.taskId })
  3499. }
  3500. }
  3501. /**
  3502. * Enforce the user-configured provider rate limit.
  3503. *
  3504. * NOTE: This is intentionally treated as expected behavior and is surfaced via
  3505. * the `api_req_rate_limit_wait` say type (not an error).
  3506. */
  3507. private async maybeWaitForProviderRateLimit(retryAttempt: number): Promise<void> {
  3508. const state = await this.providerRef.deref()?.getState()
  3509. const rateLimitSeconds =
  3510. state?.apiConfiguration?.rateLimitSeconds ?? this.apiConfiguration?.rateLimitSeconds ?? 0
  3511. if (rateLimitSeconds <= 0 || !Task.lastGlobalApiRequestTime) {
  3512. return
  3513. }
  3514. const now = performance.now()
  3515. const timeSinceLastRequest = now - Task.lastGlobalApiRequestTime
  3516. const rateLimitDelay = Math.ceil(
  3517. Math.min(rateLimitSeconds, Math.max(0, rateLimitSeconds * 1000 - timeSinceLastRequest) / 1000),
  3518. )
  3519. // Only show the countdown UX on the first attempt. Retry flows have their own delay messaging.
  3520. if (rateLimitDelay > 0 && retryAttempt === 0) {
  3521. for (let i = rateLimitDelay; i > 0; i--) {
  3522. // Send structured JSON data for i18n-safe transport
  3523. const delayMessage = JSON.stringify({ seconds: i })
  3524. await this.say("api_req_rate_limit_wait", delayMessage, undefined, true)
  3525. await delay(1000)
  3526. }
  3527. // Finalize the partial message so the UI doesn't keep rendering an in-progress spinner.
  3528. await this.say("api_req_rate_limit_wait", undefined, undefined, false)
  3529. }
  3530. }
  3531. public async *attemptApiRequest(
  3532. retryAttempt: number = 0,
  3533. options: { skipProviderRateLimit?: boolean } = {},
  3534. ): ApiStream {
  3535. const state = await this.providerRef.deref()?.getState()
  3536. const {
  3537. apiConfiguration,
  3538. autoApprovalEnabled,
  3539. requestDelaySeconds,
  3540. mode,
  3541. autoCondenseContext = true,
  3542. autoCondenseContextPercent = 100,
  3543. profileThresholds = {},
  3544. } = state ?? {}
  3545. // Get condensing configuration for automatic triggers.
  3546. const customCondensingPrompt = state?.customSupportPrompts?.CONDENSE
  3547. if (!options.skipProviderRateLimit) {
  3548. await this.maybeWaitForProviderRateLimit(retryAttempt)
  3549. }
  3550. // Update last request time right before making the request so that subsequent
  3551. // requests — even from new subtasks — will honour the provider's rate-limit.
  3552. //
  3553. // NOTE: When recursivelyMakeClineRequests handles rate limiting, it sets the
  3554. // timestamp earlier to include the environment details build. We still set it
  3555. // here for direct callers (tests) and for the case where we didn't rate-limit
  3556. // in the caller.
  3557. Task.lastGlobalApiRequestTime = performance.now()
  3558. const systemPrompt = await this.getSystemPrompt()
  3559. const { contextTokens } = this.getTokenUsage()
  3560. if (contextTokens) {
  3561. const modelInfo = this.api.getModel().info
  3562. const maxTokens = getModelMaxOutputTokens({
  3563. modelId: this.api.getModel().id,
  3564. model: modelInfo,
  3565. settings: this.apiConfiguration,
  3566. })
  3567. const contextWindow = modelInfo.contextWindow
  3568. // Get the current profile ID using the helper method
  3569. const currentProfileId = this.getCurrentProfileId(state)
  3570. // Check if context management will likely run (threshold check)
  3571. // This allows us to show an in-progress indicator to the user
  3572. // We use the centralized willManageContext helper to avoid duplicating threshold logic
  3573. const lastMessage = this.apiConversationHistory[this.apiConversationHistory.length - 1]
  3574. const lastMessageContent = lastMessage?.content
  3575. let lastMessageTokens = 0
  3576. if (lastMessageContent) {
  3577. lastMessageTokens = Array.isArray(lastMessageContent)
  3578. ? await this.api.countTokens(lastMessageContent)
  3579. : await this.api.countTokens([{ type: "text", text: lastMessageContent as string }])
  3580. }
  3581. const contextManagementWillRun = willManageContext({
  3582. totalTokens: contextTokens,
  3583. contextWindow,
  3584. maxTokens,
  3585. autoCondenseContext,
  3586. autoCondenseContextPercent,
  3587. profileThresholds,
  3588. currentProfileId,
  3589. lastMessageTokens,
  3590. })
  3591. // Send condenseTaskContextStarted BEFORE manageContext to show in-progress indicator
  3592. // This notification must be sent here (not earlier) because the early check uses stale token count
  3593. // (before user message is added to history), which could incorrectly skip showing the indicator
  3594. if (contextManagementWillRun && autoCondenseContext) {
  3595. await this.providerRef
  3596. .deref()
  3597. ?.postMessageToWebview({ type: "condenseTaskContextStarted", text: this.taskId })
  3598. }
  3599. // Build tools for condensing metadata (same tools used for normal API calls)
  3600. // This ensures the condensing API call includes tool definitions for providers that need them
  3601. let contextMgmtTools: import("openai").default.Chat.ChatCompletionTool[] = []
  3602. {
  3603. const provider = this.providerRef.deref()
  3604. if (provider) {
  3605. const toolsResult = await buildNativeToolsArrayWithRestrictions({
  3606. provider,
  3607. cwd: this.cwd,
  3608. mode,
  3609. customModes: state?.customModes,
  3610. experiments: state?.experiments,
  3611. apiConfiguration,
  3612. disabledTools: state?.disabledTools,
  3613. modelInfo,
  3614. includeAllToolsWithRestrictions: false,
  3615. })
  3616. contextMgmtTools = toolsResult.tools
  3617. }
  3618. }
  3619. // Build metadata with tools and taskId for the condensing API call
  3620. const contextMgmtMetadata: ApiHandlerCreateMessageMetadata = {
  3621. mode,
  3622. taskId: this.taskId,
  3623. ...(contextMgmtTools.length > 0
  3624. ? {
  3625. tools: contextMgmtTools,
  3626. tool_choice: "auto",
  3627. parallelToolCalls: true,
  3628. }
  3629. : {}),
  3630. }
  3631. // Only generate environment details when context management will actually run.
  3632. // getEnvironmentDetails(this, true) triggers a recursive workspace listing which
  3633. // adds overhead - avoid this for the common case where context is below threshold.
  3634. const contextMgmtEnvironmentDetails = contextManagementWillRun
  3635. ? await getEnvironmentDetails(this, true)
  3636. : undefined
  3637. // Get files read by Roo for code folding - only when context management will run
  3638. const contextMgmtFilesReadByRoo =
  3639. contextManagementWillRun && autoCondenseContext
  3640. ? await this.getFilesReadByRooSafely("attemptApiRequest")
  3641. : undefined
  3642. try {
  3643. const truncateResult = await manageContext({
  3644. messages: this.apiConversationHistory,
  3645. totalTokens: contextTokens,
  3646. maxTokens,
  3647. contextWindow,
  3648. apiHandler: this.api,
  3649. autoCondenseContext,
  3650. autoCondenseContextPercent,
  3651. systemPrompt,
  3652. taskId: this.taskId,
  3653. customCondensingPrompt,
  3654. profileThresholds,
  3655. currentProfileId,
  3656. metadata: contextMgmtMetadata,
  3657. environmentDetails: contextMgmtEnvironmentDetails,
  3658. filesReadByRoo: contextMgmtFilesReadByRoo,
  3659. cwd: this.cwd,
  3660. rooIgnoreController: this.rooIgnoreController,
  3661. })
  3662. if (truncateResult.messages !== this.apiConversationHistory) {
  3663. await this.overwriteApiConversationHistory(truncateResult.messages)
  3664. }
  3665. if (truncateResult.error) {
  3666. await this.say("condense_context_error", truncateResult.error)
  3667. }
  3668. if (truncateResult.summary) {
  3669. const { summary, cost, prevContextTokens, newContextTokens = 0, condenseId } = truncateResult
  3670. const contextCondense: ContextCondense = {
  3671. summary,
  3672. cost,
  3673. newContextTokens,
  3674. prevContextTokens,
  3675. condenseId,
  3676. }
  3677. await this.say(
  3678. "condense_context",
  3679. undefined /* text */,
  3680. undefined /* images */,
  3681. false /* partial */,
  3682. undefined /* checkpoint */,
  3683. undefined /* progressStatus */,
  3684. { isNonInteractive: true } /* options */,
  3685. contextCondense,
  3686. )
  3687. } else if (truncateResult.truncationId) {
  3688. // Sliding window truncation occurred (fallback when condensing fails or is disabled)
  3689. const contextTruncation: ContextTruncation = {
  3690. truncationId: truncateResult.truncationId,
  3691. messagesRemoved: truncateResult.messagesRemoved ?? 0,
  3692. prevContextTokens: truncateResult.prevContextTokens,
  3693. newContextTokens: truncateResult.newContextTokensAfterTruncation ?? 0,
  3694. }
  3695. await this.say(
  3696. "sliding_window_truncation",
  3697. undefined /* text */,
  3698. undefined /* images */,
  3699. false /* partial */,
  3700. undefined /* checkpoint */,
  3701. undefined /* progressStatus */,
  3702. { isNonInteractive: true } /* options */,
  3703. undefined /* contextCondense */,
  3704. contextTruncation,
  3705. )
  3706. }
  3707. } finally {
  3708. // Notify webview that context management is complete (sets isCondensing = false)
  3709. // This removes the in-progress spinner and allows the completed result to show
  3710. // IMPORTANT: Must always be sent to dismiss the spinner, even on error
  3711. if (contextManagementWillRun && autoCondenseContext) {
  3712. await this.providerRef
  3713. .deref()
  3714. ?.postMessageToWebview({ type: "condenseTaskContextResponse", text: this.taskId })
  3715. }
  3716. }
  3717. }
  3718. // Get the effective API history by filtering out condensed messages
  3719. // This allows non-destructive condensing where messages are tagged but not deleted,
  3720. // enabling accurate rewind operations while still sending condensed history to the API.
  3721. const effectiveHistory = getEffectiveApiHistory(this.apiConversationHistory)
  3722. const messagesSinceLastSummary = getMessagesSinceLastSummary(effectiveHistory)
  3723. // For API only: merge consecutive user messages (excludes summary messages per
  3724. // mergeConsecutiveApiMessages implementation) without mutating stored history.
  3725. const mergedForApi = mergeConsecutiveApiMessages(messagesSinceLastSummary, { roles: ["user"] })
  3726. const messagesWithoutImages = maybeRemoveImageBlocks(mergedForApi, this.api)
  3727. const cleanConversationHistory = this.buildCleanConversationHistory(messagesWithoutImages as ApiMessage[])
  3728. // Check auto-approval limits
  3729. const approvalResult = await this.autoApprovalHandler.checkAutoApprovalLimits(
  3730. state,
  3731. this.combineMessages(this.clineMessages.slice(1)),
  3732. async (type, data) => this.ask(type, data),
  3733. )
  3734. if (!approvalResult.shouldProceed) {
  3735. // User did not approve, task should be aborted
  3736. throw new Error("Auto-approval limit reached and user did not approve continuation")
  3737. }
  3738. // Whether we include tools is determined by whether we have any tools to send.
  3739. const modelInfo = this.api.getModel().info
  3740. // Build complete tools array: native tools + dynamic MCP tools
  3741. // When includeAllToolsWithRestrictions is true, returns all tools but provides
  3742. // allowedFunctionNames for providers (like Gemini) that need to see all tool
  3743. // definitions in history while restricting callable tools for the current mode.
  3744. // Only Gemini currently supports this - other providers filter tools normally.
  3745. let allTools: OpenAI.Chat.ChatCompletionTool[] = []
  3746. let allowedFunctionNames: string[] | undefined
  3747. // Gemini requires all tool definitions to be present for history compatibility,
  3748. // but uses allowedFunctionNames to restrict which tools can be called.
  3749. // Other providers (Anthropic, OpenAI, etc.) don't support this feature yet,
  3750. // so they continue to receive only the filtered tools for the current mode.
  3751. const supportsAllowedFunctionNames = apiConfiguration?.apiProvider === "gemini"
  3752. {
  3753. const provider = this.providerRef.deref()
  3754. if (!provider) {
  3755. throw new Error("Provider reference lost during tool building")
  3756. }
  3757. const toolsResult = await buildNativeToolsArrayWithRestrictions({
  3758. provider,
  3759. cwd: this.cwd,
  3760. mode,
  3761. customModes: state?.customModes,
  3762. experiments: state?.experiments,
  3763. apiConfiguration,
  3764. disabledTools: state?.disabledTools,
  3765. modelInfo,
  3766. includeAllToolsWithRestrictions: supportsAllowedFunctionNames,
  3767. })
  3768. allTools = toolsResult.tools
  3769. allowedFunctionNames = toolsResult.allowedFunctionNames
  3770. }
  3771. const shouldIncludeTools = allTools.length > 0
  3772. const metadata: ApiHandlerCreateMessageMetadata = {
  3773. mode: mode,
  3774. taskId: this.taskId,
  3775. suppressPreviousResponseId: this.skipPrevResponseIdOnce,
  3776. // Include tools whenever they are present.
  3777. ...(shouldIncludeTools
  3778. ? {
  3779. tools: allTools,
  3780. tool_choice: "auto",
  3781. parallelToolCalls: true,
  3782. // When mode restricts tools, provide allowedFunctionNames so providers
  3783. // like Gemini can see all tools in history but only call allowed ones
  3784. ...(allowedFunctionNames ? { allowedFunctionNames } : {}),
  3785. }
  3786. : {}),
  3787. }
  3788. // Create an AbortController to allow cancelling the request mid-stream
  3789. this.currentRequestAbortController = new AbortController()
  3790. const abortSignal = this.currentRequestAbortController.signal
  3791. // Reset the flag after using it
  3792. this.skipPrevResponseIdOnce = false
  3793. // The provider accepts reasoning items alongside standard messages; cast to the expected parameter type.
  3794. const stream = this.api.createMessage(
  3795. systemPrompt,
  3796. cleanConversationHistory as unknown as Anthropic.Messages.MessageParam[],
  3797. metadata,
  3798. )
  3799. const iterator = stream[Symbol.asyncIterator]()
  3800. // Set up abort handling - when the signal is aborted, clean up the controller reference
  3801. abortSignal.addEventListener("abort", () => {
  3802. console.log(`[Task#${this.taskId}.${this.instanceId}] AbortSignal triggered for current request`)
  3803. this.currentRequestAbortController = undefined
  3804. })
  3805. try {
  3806. // Awaiting first chunk to see if it will throw an error.
  3807. this.isWaitingForFirstChunk = true
  3808. // Race between the first chunk and the abort signal
  3809. const firstChunkPromise = iterator.next()
  3810. const abortPromise = new Promise<never>((_, reject) => {
  3811. if (abortSignal.aborted) {
  3812. reject(new Error("Request cancelled by user"))
  3813. } else {
  3814. abortSignal.addEventListener("abort", () => {
  3815. reject(new Error("Request cancelled by user"))
  3816. })
  3817. }
  3818. })
  3819. const firstChunk = await Promise.race([firstChunkPromise, abortPromise])
  3820. yield firstChunk.value
  3821. this.isWaitingForFirstChunk = false
  3822. } catch (error) {
  3823. this.isWaitingForFirstChunk = false
  3824. this.currentRequestAbortController = undefined
  3825. const isContextWindowExceededError = checkContextWindowExceededError(error)
  3826. // If it's a context window error and we haven't exceeded max retries for this error type
  3827. if (isContextWindowExceededError && retryAttempt < MAX_CONTEXT_WINDOW_RETRIES) {
  3828. console.warn(
  3829. `[Task#${this.taskId}] Context window exceeded for model ${this.api.getModel().id}. ` +
  3830. `Retry attempt ${retryAttempt + 1}/${MAX_CONTEXT_WINDOW_RETRIES}. ` +
  3831. `Attempting automatic truncation...`,
  3832. )
  3833. await this.handleContextWindowExceededError()
  3834. // Retry the request after handling the context window error
  3835. yield* this.attemptApiRequest(retryAttempt + 1)
  3836. return
  3837. }
  3838. // note that this api_req_failed ask is unique in that we only present this option if the api hasn't streamed any content yet (ie it fails on the first chunk due), as it would allow them to hit a retry button. However if the api failed mid-stream, it could be in any arbitrary state where some tools may have executed, so that error is handled differently and requires cancelling the task entirely.
  3839. if (autoApprovalEnabled) {
  3840. // Apply shared exponential backoff and countdown UX
  3841. await this.backoffAndAnnounce(retryAttempt, error)
  3842. // CRITICAL: Check if task was aborted during the backoff countdown
  3843. // This prevents infinite loops when users cancel during auto-retry
  3844. // Without this check, the recursive call below would continue even after abort
  3845. if (this.abort) {
  3846. throw new Error(
  3847. `[Task#attemptApiRequest] task ${this.taskId}.${this.instanceId} aborted during retry`,
  3848. )
  3849. }
  3850. // Delegate generator output from the recursive call with
  3851. // incremented retry count.
  3852. yield* this.attemptApiRequest(retryAttempt + 1)
  3853. return
  3854. } else {
  3855. const { response } = await this.ask(
  3856. "api_req_failed",
  3857. error.message ?? JSON.stringify(serializeError(error), null, 2),
  3858. )
  3859. if (response !== "yesButtonClicked") {
  3860. // This will never happen since if noButtonClicked, we will
  3861. // clear current task, aborting this instance.
  3862. throw new Error("API request failed")
  3863. }
  3864. await this.say("api_req_retried")
  3865. // Delegate generator output from the recursive call.
  3866. yield* this.attemptApiRequest()
  3867. return
  3868. }
  3869. }
  3870. // No error, so we can continue to yield all remaining chunks.
  3871. // (Needs to be placed outside of try/catch since it we want caller to
  3872. // handle errors not with api_req_failed as that is reserved for first
  3873. // chunk failures only.)
  3874. // This delegates to another generator or iterable object. In this case,
  3875. // it's saying "yield all remaining values from this iterator". This
  3876. // effectively passes along all subsequent chunks from the original
  3877. // stream.
  3878. yield* iterator
  3879. }
  3880. // Shared exponential backoff for retries (first-chunk and mid-stream)
  3881. private async backoffAndAnnounce(retryAttempt: number, error: any): Promise<void> {
  3882. try {
  3883. const state = await this.providerRef.deref()?.getState()
  3884. const baseDelay = state?.requestDelaySeconds || 5
  3885. let exponentialDelay = Math.min(
  3886. Math.ceil(baseDelay * Math.pow(2, retryAttempt)),
  3887. MAX_EXPONENTIAL_BACKOFF_SECONDS,
  3888. )
  3889. // Respect provider rate limit window
  3890. let rateLimitDelay = 0
  3891. const rateLimit = (state?.apiConfiguration ?? this.apiConfiguration)?.rateLimitSeconds || 0
  3892. if (Task.lastGlobalApiRequestTime && rateLimit > 0) {
  3893. const elapsed = performance.now() - Task.lastGlobalApiRequestTime
  3894. rateLimitDelay = Math.ceil(Math.min(rateLimit, Math.max(0, rateLimit * 1000 - elapsed) / 1000))
  3895. }
  3896. // Prefer RetryInfo on 429 if present
  3897. if (error?.status === 429) {
  3898. const retryInfo = error?.errorDetails?.find(
  3899. (d: any) => d["@type"] === "type.googleapis.com/google.rpc.RetryInfo",
  3900. )
  3901. const match = retryInfo?.retryDelay?.match?.(/^(\d+)s$/)
  3902. if (match) {
  3903. exponentialDelay = Number(match[1]) + 1
  3904. }
  3905. }
  3906. const finalDelay = Math.max(exponentialDelay, rateLimitDelay)
  3907. if (finalDelay <= 0) {
  3908. return
  3909. }
  3910. // Build header text; fall back to error message if none provided
  3911. let headerText
  3912. if (error.status) {
  3913. // Include both status code (for ChatRow parsing) and detailed message (for error details)
  3914. // Format: "<status>\n<message>" allows ChatRow to extract status via parseInt(text.substring(0,3))
  3915. // while preserving the full error message in errorDetails for debugging
  3916. const errorMessage = error?.message || "Unknown error"
  3917. headerText = `${error.status}\n${errorMessage}`
  3918. } else if (error?.message) {
  3919. headerText = error.message
  3920. } else {
  3921. headerText = "Unknown error"
  3922. }
  3923. headerText = headerText ? `${headerText}\n` : ""
  3924. // Show countdown timer with exponential backoff
  3925. for (let i = finalDelay; i > 0; i--) {
  3926. // Check abort flag during countdown to allow early exit
  3927. if (this.abort) {
  3928. throw new Error(`[Task#${this.taskId}] Aborted during retry countdown`)
  3929. }
  3930. await this.say("api_req_retry_delayed", `${headerText}<retry_timer>${i}</retry_timer>`, undefined, true)
  3931. await delay(1000)
  3932. }
  3933. await this.say("api_req_retry_delayed", headerText, undefined, false)
  3934. } catch (err) {
  3935. console.error("Exponential backoff failed:", err)
  3936. }
  3937. }
  3938. // Checkpoints
  3939. public async checkpointSave(force: boolean = false, suppressMessage: boolean = false) {
  3940. return checkpointSave(this, force, suppressMessage)
  3941. }
  3942. private buildCleanConversationHistory(
  3943. messages: ApiMessage[],
  3944. ): Array<
  3945. Anthropic.Messages.MessageParam | { type: "reasoning"; encrypted_content: string; id?: string; summary?: any[] }
  3946. > {
  3947. type ReasoningItemForRequest = {
  3948. type: "reasoning"
  3949. encrypted_content: string
  3950. id?: string
  3951. summary?: any[]
  3952. }
  3953. const cleanConversationHistory: (Anthropic.Messages.MessageParam | ReasoningItemForRequest)[] = []
  3954. for (const msg of messages) {
  3955. // Standalone reasoning: send encrypted, skip plain text
  3956. if (msg.type === "reasoning") {
  3957. if (msg.encrypted_content) {
  3958. cleanConversationHistory.push({
  3959. type: "reasoning",
  3960. summary: msg.summary,
  3961. encrypted_content: msg.encrypted_content!,
  3962. ...(msg.id ? { id: msg.id } : {}),
  3963. })
  3964. }
  3965. continue
  3966. }
  3967. // Preferred path: assistant message with embedded reasoning as first content block
  3968. if (msg.role === "assistant") {
  3969. const rawContent = msg.content
  3970. const contentArray: Anthropic.Messages.ContentBlockParam[] = Array.isArray(rawContent)
  3971. ? (rawContent as Anthropic.Messages.ContentBlockParam[])
  3972. : rawContent !== undefined
  3973. ? ([
  3974. { type: "text", text: rawContent } satisfies Anthropic.Messages.TextBlockParam,
  3975. ] as Anthropic.Messages.ContentBlockParam[])
  3976. : []
  3977. const [first, ...rest] = contentArray
  3978. // Check if this message has reasoning_details (OpenRouter format for Gemini 3, etc.)
  3979. const msgWithDetails = msg
  3980. if (msgWithDetails.reasoning_details && Array.isArray(msgWithDetails.reasoning_details)) {
  3981. // Build the assistant message with reasoning_details
  3982. let assistantContent: Anthropic.Messages.MessageParam["content"]
  3983. if (contentArray.length === 0) {
  3984. assistantContent = ""
  3985. } else if (contentArray.length === 1 && contentArray[0].type === "text") {
  3986. assistantContent = (contentArray[0] as Anthropic.Messages.TextBlockParam).text
  3987. } else {
  3988. assistantContent = contentArray
  3989. }
  3990. // Create message with reasoning_details property
  3991. cleanConversationHistory.push({
  3992. role: "assistant",
  3993. content: assistantContent,
  3994. reasoning_details: msgWithDetails.reasoning_details,
  3995. } as any)
  3996. continue
  3997. }
  3998. // Embedded reasoning: encrypted (send) or plain text (skip)
  3999. const hasEncryptedReasoning =
  4000. first && (first as any).type === "reasoning" && typeof (first as any).encrypted_content === "string"
  4001. const hasPlainTextReasoning =
  4002. first && (first as any).type === "reasoning" && typeof (first as any).text === "string"
  4003. if (hasEncryptedReasoning) {
  4004. const reasoningBlock = first as any
  4005. // Send as separate reasoning item (OpenAI Native)
  4006. cleanConversationHistory.push({
  4007. type: "reasoning",
  4008. summary: reasoningBlock.summary ?? [],
  4009. encrypted_content: reasoningBlock.encrypted_content,
  4010. ...(reasoningBlock.id ? { id: reasoningBlock.id } : {}),
  4011. })
  4012. // Send assistant message without reasoning
  4013. let assistantContent: Anthropic.Messages.MessageParam["content"]
  4014. if (rest.length === 0) {
  4015. assistantContent = ""
  4016. } else if (rest.length === 1 && rest[0].type === "text") {
  4017. assistantContent = (rest[0] as Anthropic.Messages.TextBlockParam).text
  4018. } else {
  4019. assistantContent = rest
  4020. }
  4021. cleanConversationHistory.push({
  4022. role: "assistant",
  4023. content: assistantContent,
  4024. } satisfies Anthropic.Messages.MessageParam)
  4025. continue
  4026. } else if (hasPlainTextReasoning) {
  4027. // Check if the model's preserveReasoning flag is set
  4028. // If true, include the reasoning block in API requests
  4029. // If false/undefined, strip it out (stored for history only, not sent back to API)
  4030. const shouldPreserveForApi = this.api.getModel().info.preserveReasoning === true
  4031. let assistantContent: Anthropic.Messages.MessageParam["content"]
  4032. if (shouldPreserveForApi) {
  4033. // Include reasoning block in the content sent to API
  4034. assistantContent = contentArray
  4035. } else {
  4036. // Strip reasoning out - stored for history only, not sent back to API
  4037. if (rest.length === 0) {
  4038. assistantContent = ""
  4039. } else if (rest.length === 1 && rest[0].type === "text") {
  4040. assistantContent = (rest[0] as Anthropic.Messages.TextBlockParam).text
  4041. } else {
  4042. assistantContent = rest
  4043. }
  4044. }
  4045. cleanConversationHistory.push({
  4046. role: "assistant",
  4047. content: assistantContent,
  4048. } satisfies Anthropic.Messages.MessageParam)
  4049. continue
  4050. }
  4051. }
  4052. // Default path for regular messages (no embedded reasoning)
  4053. if (msg.role) {
  4054. cleanConversationHistory.push({
  4055. role: msg.role,
  4056. content: msg.content as Anthropic.Messages.ContentBlockParam[] | string,
  4057. })
  4058. }
  4059. }
  4060. return cleanConversationHistory
  4061. }
  4062. public async checkpointRestore(options: CheckpointRestoreOptions) {
  4063. return checkpointRestore(this, options)
  4064. }
  4065. public async checkpointDiff(options: CheckpointDiffOptions) {
  4066. return checkpointDiff(this, options)
  4067. }
  4068. // Metrics
  4069. public combineMessages(messages: ClineMessage[]) {
  4070. return combineApiRequests(combineCommandSequences(messages))
  4071. }
  4072. public getTokenUsage(): TokenUsage {
  4073. return getApiMetrics(this.combineMessages(this.clineMessages.slice(1)))
  4074. }
  4075. public recordToolUsage(toolName: ToolName) {
  4076. if (!this.toolUsage[toolName]) {
  4077. this.toolUsage[toolName] = { attempts: 0, failures: 0 }
  4078. }
  4079. this.toolUsage[toolName].attempts++
  4080. }
  4081. public recordToolError(toolName: ToolName, error?: string) {
  4082. if (!this.toolUsage[toolName]) {
  4083. this.toolUsage[toolName] = { attempts: 0, failures: 0 }
  4084. }
  4085. this.toolUsage[toolName].failures++
  4086. if (error) {
  4087. this.emit(RooCodeEventName.TaskToolFailed, this.taskId, toolName, error)
  4088. }
  4089. }
  4090. // Getters
  4091. public get taskStatus(): TaskStatus {
  4092. if (this.interactiveAsk) {
  4093. return TaskStatus.Interactive
  4094. }
  4095. if (this.resumableAsk) {
  4096. return TaskStatus.Resumable
  4097. }
  4098. if (this.idleAsk) {
  4099. return TaskStatus.Idle
  4100. }
  4101. return TaskStatus.Running
  4102. }
  4103. public get taskAsk(): ClineMessage | undefined {
  4104. return this.idleAsk || this.resumableAsk || this.interactiveAsk
  4105. }
  4106. public get queuedMessages(): QueuedMessage[] {
  4107. return this.messageQueueService.messages
  4108. }
  4109. public get tokenUsage(): TokenUsage | undefined {
  4110. if (this.tokenUsageSnapshot && this.tokenUsageSnapshotAt) {
  4111. return this.tokenUsageSnapshot
  4112. }
  4113. this.tokenUsageSnapshot = this.getTokenUsage()
  4114. this.tokenUsageSnapshotAt = this.clineMessages.at(-1)?.ts
  4115. return this.tokenUsageSnapshot
  4116. }
  4117. public get cwd() {
  4118. return this.workspacePath
  4119. }
  4120. /**
  4121. * Provides convenient access to high-level message operations.
  4122. * Uses lazy initialization - the MessageManager is only created when first accessed.
  4123. * Subsequent accesses return the same cached instance.
  4124. *
  4125. * ## Important: Single Coordination Point
  4126. *
  4127. * **All MessageManager operations must go through this getter** rather than
  4128. * instantiating `new MessageManager(task)` directly. This ensures:
  4129. * - A single shared instance for consistent behavior
  4130. * - Centralized coordination of all rewind/message operations
  4131. * - Ability to add internal state or instrumentation in the future
  4132. *
  4133. * @example
  4134. * ```typescript
  4135. * // Correct: Use the getter
  4136. * await task.messageManager.rewindToTimestamp(ts)
  4137. *
  4138. * // Incorrect: Do NOT create new instances directly
  4139. * // const manager = new MessageManager(task) // Don't do this!
  4140. * ```
  4141. */
  4142. get messageManager(): MessageManager {
  4143. if (!this._messageManager) {
  4144. this._messageManager = new MessageManager(this)
  4145. }
  4146. return this._messageManager
  4147. }
  4148. /**
  4149. * Process any queued messages by dequeuing and submitting them.
  4150. * This ensures that queued user messages are sent when appropriate,
  4151. * preventing them from getting stuck in the queue.
  4152. *
  4153. * @param context - Context string for logging (e.g., the calling tool name)
  4154. */
  4155. public processQueuedMessages(): void {
  4156. try {
  4157. if (!this.messageQueueService.isEmpty()) {
  4158. const queued = this.messageQueueService.dequeueMessage()
  4159. if (queued) {
  4160. setTimeout(() => {
  4161. this.submitUserMessage(queued.text, queued.images).catch((err) =>
  4162. console.error(`[Task] Failed to submit queued message:`, err),
  4163. )
  4164. }, 0)
  4165. }
  4166. }
  4167. } catch (e) {
  4168. console.error(`[Task] Queue processing error:`, e)
  4169. }
  4170. }
  4171. }