Task.ts 171 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788
  1. import * as path from "path"
  2. import * as vscode from "vscode"
  3. import os from "os"
  4. import crypto from "crypto"
  5. import { v7 as uuidv7 } from "uuid"
  6. import EventEmitter from "events"
  7. import { AskIgnoredError } from "./AskIgnoredError"
  8. import { Anthropic } from "@anthropic-ai/sdk"
  9. import OpenAI from "openai"
  10. import debounce from "lodash.debounce"
  11. import delay from "delay"
  12. import pWaitFor from "p-wait-for"
  13. import { serializeError } from "serialize-error"
  14. import { Package } from "../../shared/package"
  15. import { formatToolInvocation } from "../tools/helpers/toolResultFormatting"
  16. import {
  17. type TaskLike,
  18. type TaskMetadata,
  19. type TaskEvents,
  20. type ProviderSettings,
  21. type TokenUsage,
  22. type ToolUsage,
  23. type ToolName,
  24. type ContextCondense,
  25. type ContextTruncation,
  26. type ClineMessage,
  27. type ClineSay,
  28. type ClineAsk,
  29. type ToolProgressStatus,
  30. type HistoryItem,
  31. type CreateTaskOptions,
  32. type ModelInfo,
  33. type ClineApiReqCancelReason,
  34. type ClineApiReqInfo,
  35. RooCodeEventName,
  36. TelemetryEventName,
  37. TaskStatus,
  38. TodoItem,
  39. getApiProtocol,
  40. getModelId,
  41. isIdleAsk,
  42. isInteractiveAsk,
  43. isResumableAsk,
  44. QueuedMessage,
  45. DEFAULT_CONSECUTIVE_MISTAKE_LIMIT,
  46. DEFAULT_CHECKPOINT_TIMEOUT_SECONDS,
  47. MAX_CHECKPOINT_TIMEOUT_SECONDS,
  48. MIN_CHECKPOINT_TIMEOUT_SECONDS,
  49. ConsecutiveMistakeError,
  50. MAX_MCP_TOOLS_THRESHOLD,
  51. countEnabledMcpTools,
  52. } from "@roo-code/types"
  53. import { TelemetryService } from "@roo-code/telemetry"
  54. import { CloudService, BridgeOrchestrator } from "@roo-code/cloud"
  55. // api
  56. import { ApiHandler, ApiHandlerCreateMessageMetadata, buildApiHandler } from "../../api"
  57. import { ApiStream, GroundingSource } from "../../api/transform/stream"
  58. import { maybeRemoveImageBlocks } from "../../api/transform/image-cleaning"
  59. // shared
  60. import { findLastIndex } from "../../shared/array"
  61. import { combineApiRequests } from "../../shared/combineApiRequests"
  62. import { combineCommandSequences } from "../../shared/combineCommandSequences"
  63. import { t } from "../../i18n"
  64. import { getApiMetrics, hasTokenUsageChanged, hasToolUsageChanged } from "../../shared/getApiMetrics"
  65. import { ClineAskResponse } from "../../shared/WebviewMessage"
  66. import { defaultModeSlug, getModeBySlug, getGroupName } from "../../shared/modes"
  67. import { DiffStrategy, type ToolUse, type ToolParamName, toolParamNames } from "../../shared/tools"
  68. import { getModelMaxOutputTokens } from "../../shared/api"
  69. // services
  70. import { UrlContentFetcher } from "../../services/browser/UrlContentFetcher"
  71. import { BrowserSession } from "../../services/browser/BrowserSession"
  72. import { McpHub } from "../../services/mcp/McpHub"
  73. import { McpServerManager } from "../../services/mcp/McpServerManager"
  74. import { RepoPerTaskCheckpointService } from "../../services/checkpoints"
  75. // integrations
  76. import { DiffViewProvider } from "../../integrations/editor/DiffViewProvider"
  77. import { findToolName } from "../../integrations/misc/export-markdown"
  78. import { RooTerminalProcess } from "../../integrations/terminal/types"
  79. import { TerminalRegistry } from "../../integrations/terminal/TerminalRegistry"
  80. import { OutputInterceptor } from "../../integrations/terminal/OutputInterceptor"
  81. // utils
  82. import { calculateApiCostAnthropic, calculateApiCostOpenAI } from "../../shared/cost"
  83. import { getWorkspacePath } from "../../utils/path"
  84. import { sanitizeToolUseId } from "../../utils/tool-id"
  85. import { getTaskDirectoryPath } from "../../utils/storage"
  86. // prompts
  87. import { formatResponse } from "../prompts/responses"
  88. import { SYSTEM_PROMPT } from "../prompts/system"
  89. import { buildNativeToolsArrayWithRestrictions } from "./build-tools"
  90. // core modules
  91. import { ToolRepetitionDetector } from "../tools/ToolRepetitionDetector"
  92. import { restoreTodoListForTask } from "../tools/UpdateTodoListTool"
  93. import { FileContextTracker } from "../context-tracking/FileContextTracker"
  94. import { RooIgnoreController } from "../ignore/RooIgnoreController"
  95. import { RooProtectedController } from "../protect/RooProtectedController"
  96. import { type AssistantMessageContent, presentAssistantMessage } from "../assistant-message"
  97. import { NativeToolCallParser } from "../assistant-message/NativeToolCallParser"
  98. import { manageContext, willManageContext } from "../context-management"
  99. import { ClineProvider } from "../webview/ClineProvider"
  100. import { MultiSearchReplaceDiffStrategy } from "../diff/strategies/multi-search-replace"
  101. import {
  102. type ApiMessage,
  103. readApiMessages,
  104. saveApiMessages,
  105. readTaskMessages,
  106. saveTaskMessages,
  107. taskMetadata,
  108. } from "../task-persistence"
  109. import { getEnvironmentDetails } from "../environment/getEnvironmentDetails"
  110. import { checkContextWindowExceededError } from "../context/context-management/context-error-handling"
  111. import {
  112. type CheckpointDiffOptions,
  113. type CheckpointRestoreOptions,
  114. getCheckpointService,
  115. checkpointSave,
  116. checkpointRestore,
  117. checkpointDiff,
  118. } from "../checkpoints"
  119. import { processUserContentMentions } from "../mentions/processUserContentMentions"
  120. import { getMessagesSinceLastSummary, summarizeConversation, getEffectiveApiHistory } from "../condense"
  121. import { MessageQueueService } from "../message-queue/MessageQueueService"
  122. import { AutoApprovalHandler, checkAutoApproval } from "../auto-approval"
  123. import { MessageManager } from "../message-manager"
  124. import { validateAndFixToolResultIds } from "./validateToolResultIds"
  125. import { mergeConsecutiveApiMessages } from "./mergeConsecutiveApiMessages"
  126. const MAX_EXPONENTIAL_BACKOFF_SECONDS = 600 // 10 minutes
  127. const DEFAULT_USAGE_COLLECTION_TIMEOUT_MS = 5000 // 5 seconds
  128. const FORCED_CONTEXT_REDUCTION_PERCENT = 75 // Keep 75% of context (remove 25%) on context window errors
  129. const MAX_CONTEXT_WINDOW_RETRIES = 3 // Maximum retries for context window errors
  130. export interface TaskOptions extends CreateTaskOptions {
  131. provider: ClineProvider
  132. apiConfiguration: ProviderSettings
  133. enableCheckpoints?: boolean
  134. checkpointTimeout?: number
  135. enableBridge?: boolean
  136. consecutiveMistakeLimit?: number
  137. task?: string
  138. images?: string[]
  139. historyItem?: HistoryItem
  140. experiments?: Record<string, boolean>
  141. startTask?: boolean
  142. rootTask?: Task
  143. parentTask?: Task
  144. taskNumber?: number
  145. onCreated?: (task: Task) => void
  146. initialTodos?: TodoItem[]
  147. workspacePath?: string
  148. /** Initial status for the task's history item (e.g., "active" for child tasks) */
  149. initialStatus?: "active" | "delegated" | "completed"
  150. }
  151. export class Task extends EventEmitter<TaskEvents> implements TaskLike {
  152. readonly taskId: string
  153. readonly rootTaskId?: string
  154. readonly parentTaskId?: string
  155. childTaskId?: string
  156. pendingNewTaskToolCallId?: string
  157. readonly instanceId: string
  158. readonly metadata: TaskMetadata
  159. todoList?: TodoItem[]
  160. readonly rootTask: Task | undefined = undefined
  161. readonly parentTask: Task | undefined = undefined
  162. readonly taskNumber: number
  163. readonly workspacePath: string
  164. /**
  165. * The mode associated with this task. Persisted across sessions
  166. * to maintain user context when reopening tasks from history.
  167. *
  168. * ## Lifecycle
  169. *
  170. * ### For new tasks:
  171. * 1. Initially `undefined` during construction
  172. * 2. Asynchronously initialized from provider state via `initializeTaskMode()`
  173. * 3. Falls back to `defaultModeSlug` if provider state is unavailable
  174. *
  175. * ### For history items:
  176. * 1. Immediately set from `historyItem.mode` during construction
  177. * 2. Falls back to `defaultModeSlug` if mode is not stored in history
  178. *
  179. * ## Important
  180. * This property should NOT be accessed directly until `taskModeReady` promise resolves.
  181. * Use `getTaskMode()` for async access or `taskMode` getter for sync access after initialization.
  182. *
  183. * @private
  184. * @see {@link getTaskMode} - For safe async access
  185. * @see {@link taskMode} - For sync access after initialization
  186. * @see {@link waitForModeInitialization} - To ensure initialization is complete
  187. */
  188. private _taskMode: string | undefined
  189. /**
  190. * Promise that resolves when the task mode has been initialized.
  191. * This ensures async mode initialization completes before the task is used.
  192. *
  193. * ## Purpose
  194. * - Prevents race conditions when accessing task mode
  195. * - Ensures provider state is properly loaded before mode-dependent operations
  196. * - Provides a synchronization point for async initialization
  197. *
  198. * ## Resolution timing
  199. * - For history items: Resolves immediately (sync initialization)
  200. * - For new tasks: Resolves after provider state is fetched (async initialization)
  201. *
  202. * @private
  203. * @see {@link waitForModeInitialization} - Public method to await this promise
  204. */
  205. private taskModeReady: Promise<void>
  206. /**
  207. * The API configuration name (provider profile) associated with this task.
  208. * Persisted across sessions to maintain the provider profile when reopening tasks from history.
  209. *
  210. * ## Lifecycle
  211. *
  212. * ### For new tasks:
  213. * 1. Initially `undefined` during construction
  214. * 2. Asynchronously initialized from provider state via `initializeTaskApiConfigName()`
  215. * 3. Falls back to "default" if provider state is unavailable
  216. *
  217. * ### For history items:
  218. * 1. Immediately set from `historyItem.apiConfigName` during construction
  219. * 2. Falls back to undefined if not stored in history (for backward compatibility)
  220. *
  221. * ## Important
  222. * If you need a non-`undefined` provider profile (e.g., for profile-dependent operations),
  223. * wait for `taskApiConfigReady` first (or use `getTaskApiConfigName()`).
  224. * The sync `taskApiConfigName` getter may return `undefined` for backward compatibility.
  225. *
  226. * @private
  227. * @see {@link getTaskApiConfigName} - For safe async access
  228. * @see {@link taskApiConfigName} - For sync access after initialization
  229. */
  230. private _taskApiConfigName: string | undefined
  231. /**
  232. * Promise that resolves when the task API config name has been initialized.
  233. * This ensures async API config name initialization completes before the task is used.
  234. *
  235. * ## Purpose
  236. * - Prevents race conditions when accessing task API config name
  237. * - Ensures provider state is properly loaded before profile-dependent operations
  238. * - Provides a synchronization point for async initialization
  239. *
  240. * ## Resolution timing
  241. * - For history items: Resolves immediately (sync initialization)
  242. * - For new tasks: Resolves after provider state is fetched (async initialization)
  243. *
  244. * @private
  245. */
  246. private taskApiConfigReady: Promise<void>
  247. providerRef: WeakRef<ClineProvider>
  248. private readonly globalStoragePath: string
  249. abort: boolean = false
  250. currentRequestAbortController?: AbortController
  251. skipPrevResponseIdOnce: boolean = false
  252. // TaskStatus
  253. idleAsk?: ClineMessage
  254. resumableAsk?: ClineMessage
  255. interactiveAsk?: ClineMessage
  256. didFinishAbortingStream = false
  257. abandoned = false
  258. abortReason?: ClineApiReqCancelReason
  259. isInitialized = false
  260. isPaused: boolean = false
  261. // API
  262. apiConfiguration: ProviderSettings
  263. api: ApiHandler
  264. private static lastGlobalApiRequestTime?: number
  265. private autoApprovalHandler: AutoApprovalHandler
  266. /**
  267. * Reset the global API request timestamp. This should only be used for testing.
  268. * @internal
  269. */
  270. static resetGlobalApiRequestTime(): void {
  271. Task.lastGlobalApiRequestTime = undefined
  272. }
  273. toolRepetitionDetector: ToolRepetitionDetector
  274. rooIgnoreController?: RooIgnoreController
  275. rooProtectedController?: RooProtectedController
  276. fileContextTracker: FileContextTracker
  277. urlContentFetcher: UrlContentFetcher
  278. terminalProcess?: RooTerminalProcess
  279. // Computer User
  280. browserSession: BrowserSession
  281. // Editing
  282. diffViewProvider: DiffViewProvider
  283. diffStrategy?: DiffStrategy
  284. didEditFile: boolean = false
  285. // LLM Messages & Chat Messages
  286. apiConversationHistory: ApiMessage[] = []
  287. clineMessages: ClineMessage[] = []
  288. // Ask
  289. private askResponse?: ClineAskResponse
  290. private askResponseText?: string
  291. private askResponseImages?: string[]
  292. public lastMessageTs?: number
  293. private autoApprovalTimeoutRef?: NodeJS.Timeout
  294. // Tool Use
  295. consecutiveMistakeCount: number = 0
  296. consecutiveMistakeLimit: number
  297. consecutiveMistakeCountForApplyDiff: Map<string, number> = new Map()
  298. consecutiveMistakeCountForEditFile: Map<string, number> = new Map()
  299. consecutiveNoToolUseCount: number = 0
  300. consecutiveNoAssistantMessagesCount: number = 0
  301. toolUsage: ToolUsage = {}
  302. // Checkpoints
  303. enableCheckpoints: boolean
  304. checkpointTimeout: number
  305. checkpointService?: RepoPerTaskCheckpointService
  306. checkpointServiceInitializing = false
  307. // Task Bridge
  308. enableBridge: boolean
  309. // Message Queue Service
  310. public readonly messageQueueService: MessageQueueService
  311. private messageQueueStateChangedHandler: (() => void) | undefined
  312. // Streaming
  313. isWaitingForFirstChunk = false
  314. isStreaming = false
  315. currentStreamingContentIndex = 0
  316. currentStreamingDidCheckpoint = false
  317. assistantMessageContent: AssistantMessageContent[] = []
  318. presentAssistantMessageLocked = false
  319. presentAssistantMessageHasPendingUpdates = false
  320. userMessageContent: (Anthropic.TextBlockParam | Anthropic.ImageBlockParam | Anthropic.ToolResultBlockParam)[] = []
  321. userMessageContentReady = false
  322. /**
  323. * Flag indicating whether the assistant message for the current streaming session
  324. * has been saved to API conversation history.
  325. *
  326. * This is critical for parallel tool calling: tools should NOT execute until
  327. * the assistant message is saved. Otherwise, if a tool like `new_task` triggers
  328. * `flushPendingToolResultsToHistory()`, the user message with tool_results would
  329. * appear BEFORE the assistant message with tool_uses, causing API errors.
  330. *
  331. * Reset to `false` at the start of each API request.
  332. * Set to `true` after the assistant message is saved in `recursivelyMakeClineRequests`.
  333. */
  334. assistantMessageSavedToHistory = false
  335. /**
  336. * Push a tool_result block to userMessageContent, preventing duplicates.
  337. * Duplicate tool_use_ids cause API errors.
  338. *
  339. * @param toolResult - The tool_result block to add
  340. * @returns true if added, false if duplicate was skipped
  341. */
  342. public pushToolResultToUserContent(toolResult: Anthropic.ToolResultBlockParam): boolean {
  343. const existingResult = this.userMessageContent.find(
  344. (block): block is Anthropic.ToolResultBlockParam =>
  345. block.type === "tool_result" && block.tool_use_id === toolResult.tool_use_id,
  346. )
  347. if (existingResult) {
  348. console.warn(
  349. `[Task#pushToolResultToUserContent] Skipping duplicate tool_result for tool_use_id: ${toolResult.tool_use_id}`,
  350. )
  351. return false
  352. }
  353. this.userMessageContent.push(toolResult)
  354. return true
  355. }
  356. /**
  357. * Handle a tool call streaming event (tool_call_start, tool_call_delta, or tool_call_end).
  358. * This is used both for processing events from NativeToolCallParser (legacy providers)
  359. * and for direct AI SDK events (DeepSeek, Moonshot, etc.).
  360. *
  361. * @param event - The tool call event to process
  362. */
  363. private handleToolCallEvent(
  364. event:
  365. | { type: "tool_call_start"; id: string; name: string }
  366. | { type: "tool_call_delta"; id: string; delta: string }
  367. | { type: "tool_call_end"; id: string },
  368. ): void {
  369. if (event.type === "tool_call_start") {
  370. // Guard against duplicate tool_call_start events for the same tool ID.
  371. // This can occur due to stream retry, reconnection, or API quirks.
  372. // Without this check, duplicate tool_use blocks with the same ID would
  373. // be added to assistantMessageContent, causing API 400 errors:
  374. // "tool_use ids must be unique"
  375. if (this.streamingToolCallIndices.has(event.id)) {
  376. console.warn(
  377. `[Task#${this.taskId}] Ignoring duplicate tool_call_start for ID: ${event.id} (tool: ${event.name})`,
  378. )
  379. return
  380. }
  381. // Initialize streaming in NativeToolCallParser
  382. NativeToolCallParser.startStreamingToolCall(event.id, event.name as ToolName)
  383. // Before adding a new tool, finalize any preceding text block
  384. // This prevents the text block from blocking tool presentation
  385. const lastBlock = this.assistantMessageContent[this.assistantMessageContent.length - 1]
  386. if (lastBlock?.type === "text" && lastBlock.partial) {
  387. lastBlock.partial = false
  388. }
  389. // Track the index where this tool will be stored
  390. const toolUseIndex = this.assistantMessageContent.length
  391. this.streamingToolCallIndices.set(event.id, toolUseIndex)
  392. // Create initial partial tool use
  393. const partialToolUse: ToolUse = {
  394. type: "tool_use",
  395. name: event.name as ToolName,
  396. params: {},
  397. partial: true,
  398. }
  399. // Store the ID for native protocol
  400. ;(partialToolUse as any).id = event.id
  401. // Add to content and present
  402. this.assistantMessageContent.push(partialToolUse)
  403. this.userMessageContentReady = false
  404. presentAssistantMessage(this)
  405. } else if (event.type === "tool_call_delta") {
  406. // Process chunk using streaming JSON parser
  407. const partialToolUse = NativeToolCallParser.processStreamingChunk(event.id, event.delta)
  408. if (partialToolUse) {
  409. // Get the index for this tool call
  410. const toolUseIndex = this.streamingToolCallIndices.get(event.id)
  411. if (toolUseIndex !== undefined) {
  412. // Store the ID for native protocol
  413. ;(partialToolUse as any).id = event.id
  414. // Update the existing tool use with new partial data
  415. this.assistantMessageContent[toolUseIndex] = partialToolUse
  416. // Present updated tool use
  417. presentAssistantMessage(this)
  418. }
  419. }
  420. } else if (event.type === "tool_call_end") {
  421. // Finalize the streaming tool call
  422. const finalToolUse = NativeToolCallParser.finalizeStreamingToolCall(event.id)
  423. // Get the index for this tool call
  424. const toolUseIndex = this.streamingToolCallIndices.get(event.id)
  425. if (finalToolUse) {
  426. // Store the tool call ID
  427. ;(finalToolUse as any).id = event.id
  428. // Get the index and replace partial with final
  429. if (toolUseIndex !== undefined) {
  430. this.assistantMessageContent[toolUseIndex] = finalToolUse
  431. }
  432. // Clean up tracking
  433. this.streamingToolCallIndices.delete(event.id)
  434. // Mark that we have new content to process
  435. this.userMessageContentReady = false
  436. // Present the finalized tool call
  437. presentAssistantMessage(this)
  438. } else if (toolUseIndex !== undefined) {
  439. // finalizeStreamingToolCall returned null (malformed JSON or missing args)
  440. // Mark the tool as non-partial so it's presented as complete, but execution
  441. // will be short-circuited in presentAssistantMessage with a structured tool_result.
  442. const existingToolUse = this.assistantMessageContent[toolUseIndex]
  443. if (existingToolUse && existingToolUse.type === "tool_use") {
  444. existingToolUse.partial = false
  445. // Ensure it has the ID for native protocol
  446. ;(existingToolUse as any).id = event.id
  447. }
  448. // Clean up tracking
  449. this.streamingToolCallIndices.delete(event.id)
  450. // Mark that we have new content to process
  451. this.userMessageContentReady = false
  452. // Present the tool call - validation will handle missing params
  453. presentAssistantMessage(this)
  454. }
  455. }
  456. }
  457. didRejectTool = false
  458. didAlreadyUseTool = false
  459. didToolFailInCurrentTurn = false
  460. didCompleteReadingStream = false
  461. // No streaming parser is required.
  462. assistantMessageParser?: undefined
  463. private providerProfileChangeListener?: (config: { name: string; provider?: string }) => void
  464. // Native tool call streaming state (track which index each tool is at)
  465. private streamingToolCallIndices: Map<string, number> = new Map()
  466. // Cached model info for current streaming session (set at start of each API request)
  467. // This prevents excessive getModel() calls during tool execution
  468. cachedStreamingModel?: { id: string; info: ModelInfo }
  469. // Token Usage Cache
  470. private tokenUsageSnapshot?: TokenUsage
  471. private tokenUsageSnapshotAt?: number
  472. // Tool Usage Cache
  473. private toolUsageSnapshot?: ToolUsage
  474. // Token Usage Throttling - Debounced emit function
  475. private readonly TOKEN_USAGE_EMIT_INTERVAL_MS = 2000 // 2 seconds
  476. private debouncedEmitTokenUsage: ReturnType<typeof debounce>
  477. // Cloud Sync Tracking
  478. private cloudSyncedMessageTimestamps: Set<number> = new Set()
  479. // Initial status for the task's history item (set at creation time to avoid race conditions)
  480. private readonly initialStatus?: "active" | "delegated" | "completed"
  481. // MessageManager for high-level message operations (lazy initialized)
  482. private _messageManager?: MessageManager
  483. constructor({
  484. provider,
  485. apiConfiguration,
  486. enableCheckpoints = true,
  487. checkpointTimeout = DEFAULT_CHECKPOINT_TIMEOUT_SECONDS,
  488. enableBridge = false,
  489. consecutiveMistakeLimit = DEFAULT_CONSECUTIVE_MISTAKE_LIMIT,
  490. task,
  491. images,
  492. historyItem,
  493. experiments: experimentsConfig,
  494. startTask = true,
  495. rootTask,
  496. parentTask,
  497. taskNumber = -1,
  498. onCreated,
  499. initialTodos,
  500. workspacePath,
  501. initialStatus,
  502. }: TaskOptions) {
  503. super()
  504. if (startTask && !task && !images && !historyItem) {
  505. throw new Error("Either historyItem or task/images must be provided")
  506. }
  507. if (
  508. !checkpointTimeout ||
  509. checkpointTimeout > MAX_CHECKPOINT_TIMEOUT_SECONDS ||
  510. checkpointTimeout < MIN_CHECKPOINT_TIMEOUT_SECONDS
  511. ) {
  512. throw new Error(
  513. "checkpointTimeout must be between " +
  514. MIN_CHECKPOINT_TIMEOUT_SECONDS +
  515. " and " +
  516. MAX_CHECKPOINT_TIMEOUT_SECONDS +
  517. " seconds",
  518. )
  519. }
  520. this.taskId = historyItem ? historyItem.id : uuidv7()
  521. this.rootTaskId = historyItem ? historyItem.rootTaskId : rootTask?.taskId
  522. this.parentTaskId = historyItem ? historyItem.parentTaskId : parentTask?.taskId
  523. this.childTaskId = undefined
  524. this.metadata = {
  525. task: historyItem ? historyItem.task : task,
  526. images: historyItem ? [] : images,
  527. }
  528. // Normal use-case is usually retry similar history task with new workspace.
  529. this.workspacePath = parentTask
  530. ? parentTask.workspacePath
  531. : (workspacePath ?? getWorkspacePath(path.join(os.homedir(), "Desktop")))
  532. this.instanceId = crypto.randomUUID().slice(0, 8)
  533. this.taskNumber = -1
  534. this.rooIgnoreController = new RooIgnoreController(this.cwd)
  535. this.rooProtectedController = new RooProtectedController(this.cwd)
  536. this.fileContextTracker = new FileContextTracker(provider, this.taskId)
  537. this.rooIgnoreController.initialize().catch((error) => {
  538. console.error("Failed to initialize RooIgnoreController:", error)
  539. })
  540. this.apiConfiguration = apiConfiguration
  541. this.api = buildApiHandler(this.apiConfiguration)
  542. this.autoApprovalHandler = new AutoApprovalHandler()
  543. this.urlContentFetcher = new UrlContentFetcher(provider.context)
  544. this.browserSession = new BrowserSession(provider.context, (isActive: boolean) => {
  545. // Add a message to indicate browser session status change
  546. this.say("browser_session_status", isActive ? "Browser session opened" : "Browser session closed")
  547. // Broadcast to browser panel
  548. this.broadcastBrowserSessionUpdate()
  549. // When a browser session becomes active, automatically open/reveal the Browser Session tab
  550. if (isActive) {
  551. try {
  552. // Lazy-load to avoid circular imports at module load time
  553. const { BrowserSessionPanelManager } = require("../webview/BrowserSessionPanelManager")
  554. const providerRef = this.providerRef.deref()
  555. if (providerRef) {
  556. BrowserSessionPanelManager.getInstance(providerRef)
  557. .show()
  558. .catch(() => {})
  559. }
  560. } catch (err) {
  561. console.error("[Task] Failed to auto-open Browser Session panel:", err)
  562. }
  563. }
  564. })
  565. this.consecutiveMistakeLimit = consecutiveMistakeLimit ?? DEFAULT_CONSECUTIVE_MISTAKE_LIMIT
  566. this.providerRef = new WeakRef(provider)
  567. this.globalStoragePath = provider.context.globalStorageUri.fsPath
  568. this.diffViewProvider = new DiffViewProvider(this.cwd, this)
  569. this.enableCheckpoints = enableCheckpoints
  570. this.checkpointTimeout = checkpointTimeout
  571. this.enableBridge = enableBridge
  572. this.parentTask = parentTask
  573. this.taskNumber = taskNumber
  574. this.initialStatus = initialStatus
  575. // Store the task's mode and API config name when it's created.
  576. // For history items, use the stored values; for new tasks, we'll set them
  577. // after getting state.
  578. if (historyItem) {
  579. this._taskMode = historyItem.mode || defaultModeSlug
  580. this._taskApiConfigName = historyItem.apiConfigName
  581. this.taskModeReady = Promise.resolve()
  582. this.taskApiConfigReady = Promise.resolve()
  583. TelemetryService.instance.captureTaskRestarted(this.taskId)
  584. } else {
  585. // For new tasks, don't set the mode/apiConfigName yet - wait for async initialization.
  586. this._taskMode = undefined
  587. this._taskApiConfigName = undefined
  588. this.taskModeReady = this.initializeTaskMode(provider)
  589. this.taskApiConfigReady = this.initializeTaskApiConfigName(provider)
  590. TelemetryService.instance.captureTaskCreated(this.taskId)
  591. }
  592. this.assistantMessageParser = undefined
  593. this.messageQueueService = new MessageQueueService()
  594. this.messageQueueStateChangedHandler = () => {
  595. this.emit(RooCodeEventName.TaskUserMessage, this.taskId)
  596. this.emit(RooCodeEventName.QueuedMessagesUpdated, this.taskId, this.messageQueueService.messages)
  597. this.providerRef.deref()?.postStateToWebviewWithoutTaskHistory()
  598. }
  599. this.messageQueueService.on("stateChanged", this.messageQueueStateChangedHandler)
  600. // Listen for provider profile changes to update parser state
  601. this.setupProviderProfileChangeListener(provider)
  602. // Set up diff strategy
  603. this.diffStrategy = new MultiSearchReplaceDiffStrategy()
  604. this.toolRepetitionDetector = new ToolRepetitionDetector(this.consecutiveMistakeLimit)
  605. // Initialize todo list if provided
  606. if (initialTodos && initialTodos.length > 0) {
  607. this.todoList = initialTodos
  608. }
  609. // Initialize debounced token usage emit function
  610. // Uses debounce with maxWait to achieve throttle-like behavior:
  611. // - leading: true - Emit immediately on first call
  612. // - trailing: true - Emit final state when updates stop
  613. // - maxWait - Ensures at most one emit per interval during rapid updates (throttle behavior)
  614. this.debouncedEmitTokenUsage = debounce(
  615. (tokenUsage: TokenUsage, toolUsage: ToolUsage) => {
  616. const tokenChanged = hasTokenUsageChanged(tokenUsage, this.tokenUsageSnapshot)
  617. const toolChanged = hasToolUsageChanged(toolUsage, this.toolUsageSnapshot)
  618. if (tokenChanged || toolChanged) {
  619. this.emit(RooCodeEventName.TaskTokenUsageUpdated, this.taskId, tokenUsage, toolUsage)
  620. this.tokenUsageSnapshot = tokenUsage
  621. this.tokenUsageSnapshotAt = this.clineMessages.at(-1)?.ts
  622. // Deep copy tool usage for snapshot
  623. this.toolUsageSnapshot = JSON.parse(JSON.stringify(toolUsage))
  624. }
  625. },
  626. this.TOKEN_USAGE_EMIT_INTERVAL_MS,
  627. { leading: true, trailing: true, maxWait: this.TOKEN_USAGE_EMIT_INTERVAL_MS },
  628. )
  629. onCreated?.(this)
  630. if (startTask) {
  631. if (task || images) {
  632. this.startTask(task, images)
  633. } else if (historyItem) {
  634. this.resumeTaskFromHistory()
  635. } else {
  636. throw new Error("Either historyItem or task/images must be provided")
  637. }
  638. }
  639. }
  640. /**
  641. * Initialize the task mode from the provider state.
  642. * This method handles async initialization with proper error handling.
  643. *
  644. * ## Flow
  645. * 1. Attempts to fetch the current mode from provider state
  646. * 2. Sets `_taskMode` to the fetched mode or `defaultModeSlug` if unavailable
  647. * 3. Handles errors gracefully by falling back to default mode
  648. * 4. Logs any initialization errors for debugging
  649. *
  650. * ## Error handling
  651. * - Network failures when fetching provider state
  652. * - Provider not yet initialized
  653. * - Invalid state structure
  654. *
  655. * All errors result in fallback to `defaultModeSlug` to ensure task can proceed.
  656. *
  657. * @private
  658. * @param provider - The ClineProvider instance to fetch state from
  659. * @returns Promise that resolves when initialization is complete
  660. */
  661. private async initializeTaskMode(provider: ClineProvider): Promise<void> {
  662. try {
  663. const state = await provider.getState()
  664. this._taskMode = state?.mode || defaultModeSlug
  665. } catch (error) {
  666. // If there's an error getting state, use the default mode
  667. this._taskMode = defaultModeSlug
  668. // Use the provider's log method for better error visibility
  669. const errorMessage = `Failed to initialize task mode: ${error instanceof Error ? error.message : String(error)}`
  670. provider.log(errorMessage)
  671. }
  672. }
  673. /**
  674. * Initialize the task API config name from the provider state.
  675. * This method handles async initialization with proper error handling.
  676. *
  677. * ## Flow
  678. * 1. Attempts to fetch the current API config name from provider state
  679. * 2. Sets `_taskApiConfigName` to the fetched name or "default" if unavailable
  680. * 3. Handles errors gracefully by falling back to "default"
  681. * 4. Logs any initialization errors for debugging
  682. *
  683. * ## Error handling
  684. * - Network failures when fetching provider state
  685. * - Provider not yet initialized
  686. * - Invalid state structure
  687. *
  688. * All errors result in fallback to "default" to ensure task can proceed.
  689. *
  690. * @private
  691. * @param provider - The ClineProvider instance to fetch state from
  692. * @returns Promise that resolves when initialization is complete
  693. */
  694. private async initializeTaskApiConfigName(provider: ClineProvider): Promise<void> {
  695. try {
  696. const state = await provider.getState()
  697. // Avoid clobbering a newer value that may have been set while awaiting provider state
  698. // (e.g., user switches provider profile immediately after task creation).
  699. if (this._taskApiConfigName === undefined) {
  700. this._taskApiConfigName = state?.currentApiConfigName ?? "default"
  701. }
  702. } catch (error) {
  703. // If there's an error getting state, use the default profile (unless a newer value was set).
  704. if (this._taskApiConfigName === undefined) {
  705. this._taskApiConfigName = "default"
  706. }
  707. // Use the provider's log method for better error visibility
  708. const errorMessage = `Failed to initialize task API config name: ${error instanceof Error ? error.message : String(error)}`
  709. provider.log(errorMessage)
  710. }
  711. }
  712. /**
  713. * Sets up a listener for provider profile changes.
  714. *
  715. * @private
  716. * @param provider - The ClineProvider instance to listen to
  717. */
  718. private setupProviderProfileChangeListener(provider: ClineProvider): void {
  719. // Only set up listener if provider has the on method (may not exist in test mocks)
  720. if (typeof provider.on !== "function") {
  721. return
  722. }
  723. this.providerProfileChangeListener = async () => {
  724. try {
  725. const newState = await provider.getState()
  726. if (newState?.apiConfiguration) {
  727. this.updateApiConfiguration(newState.apiConfiguration)
  728. }
  729. } catch (error) {
  730. console.error(
  731. `[Task#${this.taskId}.${this.instanceId}] Failed to update API configuration on profile change:`,
  732. error,
  733. )
  734. }
  735. }
  736. provider.on(RooCodeEventName.ProviderProfileChanged, this.providerProfileChangeListener)
  737. }
  738. /**
  739. * Wait for the task mode to be initialized before proceeding.
  740. * This method ensures that any operations depending on the task mode
  741. * will have access to the correct mode value.
  742. *
  743. * ## When to use
  744. * - Before accessing mode-specific configurations
  745. * - When switching between tasks with different modes
  746. * - Before operations that depend on mode-based permissions
  747. *
  748. * ## Example usage
  749. * ```typescript
  750. * // Wait for mode initialization before mode-dependent operations
  751. * await task.waitForModeInitialization();
  752. * const mode = task.taskMode; // Now safe to access synchronously
  753. *
  754. * // Or use with getTaskMode() for a one-liner
  755. * const mode = await task.getTaskMode(); // Internally waits for initialization
  756. * ```
  757. *
  758. * @returns Promise that resolves when the task mode is initialized
  759. * @public
  760. */
  761. public async waitForModeInitialization(): Promise<void> {
  762. return this.taskModeReady
  763. }
  764. /**
  765. * Get the task mode asynchronously, ensuring it's properly initialized.
  766. * This is the recommended way to access the task mode as it guarantees
  767. * the mode is available before returning.
  768. *
  769. * ## Async behavior
  770. * - Internally waits for `taskModeReady` promise to resolve
  771. * - Returns the initialized mode or `defaultModeSlug` as fallback
  772. * - Safe to call multiple times - subsequent calls return immediately if already initialized
  773. *
  774. * ## Example usage
  775. * ```typescript
  776. * // Safe async access
  777. * const mode = await task.getTaskMode();
  778. * console.log(`Task is running in ${mode} mode`);
  779. *
  780. * // Use in conditional logic
  781. * if (await task.getTaskMode() === 'architect') {
  782. * // Perform architect-specific operations
  783. * }
  784. * ```
  785. *
  786. * @returns Promise resolving to the task mode string
  787. * @public
  788. */
  789. public async getTaskMode(): Promise<string> {
  790. await this.taskModeReady
  791. return this._taskMode || defaultModeSlug
  792. }
  793. /**
  794. * Get the task mode synchronously. This should only be used when you're certain
  795. * that the mode has already been initialized (e.g., after waitForModeInitialization).
  796. *
  797. * ## When to use
  798. * - In synchronous contexts where async/await is not available
  799. * - After explicitly waiting for initialization via `waitForModeInitialization()`
  800. * - In event handlers or callbacks where mode is guaranteed to be initialized
  801. *
  802. * ## Example usage
  803. * ```typescript
  804. * // After ensuring initialization
  805. * await task.waitForModeInitialization();
  806. * const mode = task.taskMode; // Safe synchronous access
  807. *
  808. * // In an event handler after task is started
  809. * task.on('taskStarted', () => {
  810. * console.log(`Task started in ${task.taskMode} mode`); // Safe here
  811. * });
  812. * ```
  813. *
  814. * @throws {Error} If the mode hasn't been initialized yet
  815. * @returns The task mode string
  816. * @public
  817. */
  818. public get taskMode(): string {
  819. if (this._taskMode === undefined) {
  820. throw new Error("Task mode accessed before initialization. Use getTaskMode() or wait for taskModeReady.")
  821. }
  822. return this._taskMode
  823. }
  824. /**
  825. * Wait for the task API config name to be initialized before proceeding.
  826. * This method ensures that any operations depending on the task's provider profile
  827. * will have access to the correct value.
  828. *
  829. * ## When to use
  830. * - Before accessing provider profile-specific configurations
  831. * - When switching between tasks with different provider profiles
  832. * - Before operations that depend on the provider profile
  833. *
  834. * @returns Promise that resolves when the task API config name is initialized
  835. * @public
  836. */
  837. public async waitForApiConfigInitialization(): Promise<void> {
  838. return this.taskApiConfigReady
  839. }
  840. /**
  841. * Get the task API config name asynchronously, ensuring it's properly initialized.
  842. * This is the recommended way to access the task's provider profile as it guarantees
  843. * the value is available before returning.
  844. *
  845. * ## Async behavior
  846. * - Internally waits for `taskApiConfigReady` promise to resolve
  847. * - Returns the initialized API config name or undefined as fallback
  848. * - Safe to call multiple times - subsequent calls return immediately if already initialized
  849. *
  850. * @returns Promise resolving to the task API config name string or undefined
  851. * @public
  852. */
  853. public async getTaskApiConfigName(): Promise<string | undefined> {
  854. await this.taskApiConfigReady
  855. return this._taskApiConfigName
  856. }
  857. /**
  858. * Get the task API config name synchronously. This should only be used when you're certain
  859. * that the value has already been initialized (e.g., after waitForApiConfigInitialization).
  860. *
  861. * ## When to use
  862. * - In synchronous contexts where async/await is not available
  863. * - After explicitly waiting for initialization via `waitForApiConfigInitialization()`
  864. * - In event handlers or callbacks where API config name is guaranteed to be initialized
  865. *
  866. * Note: Unlike taskMode, this getter does not throw if uninitialized since the API config
  867. * name can legitimately be undefined (backward compatibility with tasks created before
  868. * this feature was added).
  869. *
  870. * @returns The task API config name string or undefined
  871. * @public
  872. */
  873. public get taskApiConfigName(): string | undefined {
  874. return this._taskApiConfigName
  875. }
  876. /**
  877. * Update the task's API config name. This is called when the user switches
  878. * provider profiles while a task is active, allowing the task to remember
  879. * its new provider profile.
  880. *
  881. * @param apiConfigName - The new API config name to set
  882. * @internal
  883. */
  884. public setTaskApiConfigName(apiConfigName: string | undefined): void {
  885. this._taskApiConfigName = apiConfigName
  886. }
  887. static create(options: TaskOptions): [Task, Promise<void>] {
  888. const instance = new Task({ ...options, startTask: false })
  889. const { images, task, historyItem } = options
  890. let promise
  891. if (images || task) {
  892. promise = instance.startTask(task, images)
  893. } else if (historyItem) {
  894. promise = instance.resumeTaskFromHistory()
  895. } else {
  896. throw new Error("Either historyItem or task/images must be provided")
  897. }
  898. return [instance, promise]
  899. }
  900. // API Messages
  901. private async getSavedApiConversationHistory(): Promise<ApiMessage[]> {
  902. return readApiMessages({ taskId: this.taskId, globalStoragePath: this.globalStoragePath })
  903. }
  904. private async addToApiConversationHistory(message: Anthropic.MessageParam, reasoning?: string) {
  905. // Capture the encrypted_content / thought signatures from the provider (e.g., OpenAI Responses API, Google GenAI) if present.
  906. // We only persist data reported by the current response body.
  907. const handler = this.api as ApiHandler & {
  908. getResponseId?: () => string | undefined
  909. getEncryptedContent?: () => { encrypted_content: string; id?: string } | undefined
  910. getThoughtSignature?: () => string | undefined
  911. getSummary?: () => any[] | undefined
  912. getReasoningDetails?: () => any[] | undefined
  913. getRedactedThinkingBlocks?: () => Array<{ type: "redacted_thinking"; data: string }> | undefined
  914. }
  915. if (message.role === "assistant") {
  916. const responseId = handler.getResponseId?.()
  917. const reasoningData = handler.getEncryptedContent?.()
  918. const thoughtSignature = handler.getThoughtSignature?.()
  919. const reasoningSummary = handler.getSummary?.()
  920. const reasoningDetails = handler.getReasoningDetails?.()
  921. // Only Anthropic's API expects/validates the special `thinking` content block signature.
  922. // Other providers (notably Gemini 3) use different signature semantics (e.g. `thoughtSignature`)
  923. // and require round-tripping the signature in their own format.
  924. const modelId = getModelId(this.apiConfiguration)
  925. const apiProtocol = getApiProtocol(this.apiConfiguration.apiProvider, modelId)
  926. const isAnthropicProtocol = apiProtocol === "anthropic"
  927. // Start from the original assistant message
  928. const messageWithTs: any = {
  929. ...message,
  930. ...(responseId ? { id: responseId } : {}),
  931. ts: Date.now(),
  932. }
  933. // Store reasoning_details array if present (for models like Gemini 3)
  934. if (reasoningDetails) {
  935. messageWithTs.reasoning_details = reasoningDetails
  936. }
  937. // Store reasoning: Anthropic thinking (with signature), plain text (most providers), or encrypted (OpenAI Native)
  938. // Skip if reasoning_details already contains the reasoning (to avoid duplication)
  939. if (isAnthropicProtocol && reasoning && thoughtSignature && !reasoningDetails) {
  940. // Anthropic provider with extended thinking: Store as proper `thinking` block
  941. // This format passes through anthropic-filter.ts and is properly round-tripped
  942. // for interleaved thinking with tool use (required by Anthropic API)
  943. const thinkingBlock = {
  944. type: "thinking",
  945. thinking: reasoning,
  946. signature: thoughtSignature,
  947. }
  948. if (typeof messageWithTs.content === "string") {
  949. messageWithTs.content = [
  950. thinkingBlock,
  951. { type: "text", text: messageWithTs.content } satisfies Anthropic.Messages.TextBlockParam,
  952. ]
  953. } else if (Array.isArray(messageWithTs.content)) {
  954. messageWithTs.content = [thinkingBlock, ...messageWithTs.content]
  955. } else if (!messageWithTs.content) {
  956. messageWithTs.content = [thinkingBlock]
  957. }
  958. // Also insert any redacted_thinking blocks after the thinking block.
  959. // Anthropic returns these when safety filters trigger on reasoning content.
  960. // They must be passed back verbatim for proper reasoning continuity.
  961. const redactedBlocks = handler.getRedactedThinkingBlocks?.()
  962. if (redactedBlocks && Array.isArray(messageWithTs.content)) {
  963. // Insert after the thinking block (index 1, right after thinking at index 0)
  964. messageWithTs.content.splice(1, 0, ...redactedBlocks)
  965. }
  966. } else if (reasoning && !reasoningDetails) {
  967. // Other providers (non-Anthropic): Store as generic reasoning block
  968. const reasoningBlock = {
  969. type: "reasoning",
  970. text: reasoning,
  971. summary: reasoningSummary ?? ([] as any[]),
  972. }
  973. if (typeof messageWithTs.content === "string") {
  974. messageWithTs.content = [
  975. reasoningBlock,
  976. { type: "text", text: messageWithTs.content } satisfies Anthropic.Messages.TextBlockParam,
  977. ]
  978. } else if (Array.isArray(messageWithTs.content)) {
  979. messageWithTs.content = [reasoningBlock, ...messageWithTs.content]
  980. } else if (!messageWithTs.content) {
  981. messageWithTs.content = [reasoningBlock]
  982. }
  983. } else if (reasoningData?.encrypted_content) {
  984. // OpenAI Native encrypted reasoning
  985. const reasoningBlock = {
  986. type: "reasoning",
  987. summary: [] as any[],
  988. encrypted_content: reasoningData.encrypted_content,
  989. ...(reasoningData.id ? { id: reasoningData.id } : {}),
  990. }
  991. if (typeof messageWithTs.content === "string") {
  992. messageWithTs.content = [
  993. reasoningBlock,
  994. { type: "text", text: messageWithTs.content } satisfies Anthropic.Messages.TextBlockParam,
  995. ]
  996. } else if (Array.isArray(messageWithTs.content)) {
  997. messageWithTs.content = [reasoningBlock, ...messageWithTs.content]
  998. } else if (!messageWithTs.content) {
  999. messageWithTs.content = [reasoningBlock]
  1000. }
  1001. }
  1002. // For non-Anthropic providers (e.g., Gemini 3), persist the thought signature as its own
  1003. // content block so converters can attach it back to the correct provider-specific fields.
  1004. // Note: For Anthropic extended thinking, the signature is already included in the thinking block above.
  1005. if (thoughtSignature && !isAnthropicProtocol) {
  1006. const thoughtSignatureBlock = {
  1007. type: "thoughtSignature",
  1008. thoughtSignature,
  1009. }
  1010. if (typeof messageWithTs.content === "string") {
  1011. messageWithTs.content = [
  1012. { type: "text", text: messageWithTs.content } satisfies Anthropic.Messages.TextBlockParam,
  1013. thoughtSignatureBlock,
  1014. ]
  1015. } else if (Array.isArray(messageWithTs.content)) {
  1016. messageWithTs.content = [...messageWithTs.content, thoughtSignatureBlock]
  1017. } else if (!messageWithTs.content) {
  1018. messageWithTs.content = [thoughtSignatureBlock]
  1019. }
  1020. }
  1021. this.apiConversationHistory.push(messageWithTs)
  1022. } else {
  1023. // For user messages, validate tool_result IDs ONLY when the immediately previous *effective* message
  1024. // is an assistant message.
  1025. //
  1026. // If the previous effective message is also a user message (e.g., summary + a new user message),
  1027. // validating against any earlier assistant message can incorrectly inject placeholder tool_results.
  1028. const effectiveHistoryForValidation = getEffectiveApiHistory(this.apiConversationHistory)
  1029. const lastEffective = effectiveHistoryForValidation[effectiveHistoryForValidation.length - 1]
  1030. const historyForValidation = lastEffective?.role === "assistant" ? effectiveHistoryForValidation : []
  1031. // If the previous effective message is NOT an assistant, convert tool_result blocks to text blocks.
  1032. // This prevents orphaned tool_results from being filtered out by getEffectiveApiHistory.
  1033. // This can happen when condensing occurs after the assistant sends tool_uses but before
  1034. // the user responds - the tool_use blocks get condensed away, leaving orphaned tool_results.
  1035. let messageToAdd = message
  1036. if (lastEffective?.role !== "assistant" && Array.isArray(message.content)) {
  1037. messageToAdd = {
  1038. ...message,
  1039. content: message.content.map((block) =>
  1040. block.type === "tool_result"
  1041. ? {
  1042. type: "text" as const,
  1043. text: `Tool result:\n${typeof block.content === "string" ? block.content : JSON.stringify(block.content)}`,
  1044. }
  1045. : block,
  1046. ),
  1047. }
  1048. }
  1049. const validatedMessage = validateAndFixToolResultIds(messageToAdd, historyForValidation)
  1050. const messageWithTs = { ...validatedMessage, ts: Date.now() }
  1051. this.apiConversationHistory.push(messageWithTs)
  1052. }
  1053. await this.saveApiConversationHistory()
  1054. }
  1055. // NOTE: We intentionally do NOT mutate stored messages to merge consecutive user turns.
  1056. // For API requests, consecutive same-role messages are merged via mergeConsecutiveApiMessages()
  1057. // so rewind/edit behavior can still reference original message boundaries.
  1058. async overwriteApiConversationHistory(newHistory: ApiMessage[]) {
  1059. this.apiConversationHistory = newHistory
  1060. await this.saveApiConversationHistory()
  1061. }
  1062. /**
  1063. * Flush any pending tool results to the API conversation history.
  1064. *
  1065. * This is critical when the task is about to be
  1066. * delegated (e.g., via new_task). Before delegation, if other tools were
  1067. * called in the same turn before new_task, their tool_result blocks are
  1068. * accumulated in `userMessageContent` but haven't been saved to the API
  1069. * history yet. If we don't flush them before the parent is disposed,
  1070. * the API conversation will be incomplete and cause 400 errors when
  1071. * the parent resumes (missing tool_result for tool_use blocks).
  1072. *
  1073. * NOTE: The assistant message is typically already in history by the time
  1074. * tools execute (added in recursivelyMakeClineRequests after streaming completes).
  1075. * So we usually only need to flush the pending user message with tool_results.
  1076. */
  1077. public async flushPendingToolResultsToHistory(): Promise<void> {
  1078. // Only flush if there's actually pending content to save
  1079. if (this.userMessageContent.length === 0) {
  1080. return
  1081. }
  1082. // CRITICAL: Wait for the assistant message to be saved to API history first.
  1083. // Without this, tool_result blocks would appear BEFORE tool_use blocks in the
  1084. // conversation history, causing API errors like:
  1085. // "unexpected `tool_use_id` found in `tool_result` blocks"
  1086. //
  1087. // This can happen when parallel tools are called (e.g., update_todo_list + new_task).
  1088. // Tools execute during streaming via presentAssistantMessage, BEFORE the assistant
  1089. // message is saved. When new_task triggers delegation, it calls this method to
  1090. // flush pending results - but the assistant message hasn't been saved yet.
  1091. //
  1092. // The assistantMessageSavedToHistory flag is:
  1093. // - Reset to false at the start of each API request
  1094. // - Set to true after the assistant message is saved in recursivelyMakeClineRequests
  1095. if (!this.assistantMessageSavedToHistory) {
  1096. await pWaitFor(() => this.assistantMessageSavedToHistory || this.abort, {
  1097. interval: 50,
  1098. timeout: 30_000, // 30 second timeout as safety net
  1099. }).catch(() => {
  1100. // If timeout or abort, log and proceed anyway to avoid hanging
  1101. console.warn(
  1102. `[Task#${this.taskId}] flushPendingToolResultsToHistory: timed out waiting for assistant message to be saved`,
  1103. )
  1104. })
  1105. }
  1106. // If task was aborted while waiting, don't flush
  1107. if (this.abort) {
  1108. return
  1109. }
  1110. // Save the user message with tool_result blocks
  1111. const userMessage: Anthropic.MessageParam = {
  1112. role: "user",
  1113. content: this.userMessageContent,
  1114. }
  1115. // Validate and fix tool_result IDs when the previous *effective* message is an assistant message.
  1116. const effectiveHistoryForValidation = getEffectiveApiHistory(this.apiConversationHistory)
  1117. const lastEffective = effectiveHistoryForValidation[effectiveHistoryForValidation.length - 1]
  1118. const historyForValidation = lastEffective?.role === "assistant" ? effectiveHistoryForValidation : []
  1119. const validatedMessage = validateAndFixToolResultIds(userMessage, historyForValidation)
  1120. const userMessageWithTs = { ...validatedMessage, ts: Date.now() }
  1121. this.apiConversationHistory.push(userMessageWithTs as ApiMessage)
  1122. await this.saveApiConversationHistory()
  1123. // Clear the pending content since it's now saved
  1124. this.userMessageContent = []
  1125. }
  1126. private async saveApiConversationHistory() {
  1127. try {
  1128. await saveApiMessages({
  1129. messages: this.apiConversationHistory,
  1130. taskId: this.taskId,
  1131. globalStoragePath: this.globalStoragePath,
  1132. })
  1133. } catch (error) {
  1134. // In the off chance this fails, we don't want to stop the task.
  1135. console.error("Failed to save API conversation history:", error)
  1136. }
  1137. }
  1138. // Cline Messages
  1139. private async getSavedClineMessages(): Promise<ClineMessage[]> {
  1140. return readTaskMessages({ taskId: this.taskId, globalStoragePath: this.globalStoragePath })
  1141. }
  1142. private async addToClineMessages(message: ClineMessage) {
  1143. this.clineMessages.push(message)
  1144. const provider = this.providerRef.deref()
  1145. // Avoid resending large, mostly-static fields (notably taskHistory) on every chat message update.
  1146. // taskHistory is maintained in-memory in the webview and updated via taskHistoryItemUpdated.
  1147. await provider?.postStateToWebviewWithoutTaskHistory()
  1148. this.emit(RooCodeEventName.Message, { action: "created", message })
  1149. await this.saveClineMessages()
  1150. const shouldCaptureMessage = message.partial !== true && CloudService.isEnabled()
  1151. if (shouldCaptureMessage) {
  1152. CloudService.instance.captureEvent({
  1153. event: TelemetryEventName.TASK_MESSAGE,
  1154. properties: { taskId: this.taskId, message },
  1155. })
  1156. // Track that this message has been synced to cloud
  1157. this.cloudSyncedMessageTimestamps.add(message.ts)
  1158. }
  1159. }
  1160. public async overwriteClineMessages(newMessages: ClineMessage[]) {
  1161. this.clineMessages = newMessages
  1162. restoreTodoListForTask(this)
  1163. await this.saveClineMessages()
  1164. // When overwriting messages (e.g., during task resume), repopulate the cloud sync tracking Set
  1165. // with timestamps from all non-partial messages to prevent re-syncing previously synced messages
  1166. this.cloudSyncedMessageTimestamps.clear()
  1167. for (const msg of newMessages) {
  1168. if (msg.partial !== true) {
  1169. this.cloudSyncedMessageTimestamps.add(msg.ts)
  1170. }
  1171. }
  1172. }
  1173. private async updateClineMessage(message: ClineMessage) {
  1174. const provider = this.providerRef.deref()
  1175. await provider?.postMessageToWebview({ type: "messageUpdated", clineMessage: message })
  1176. this.emit(RooCodeEventName.Message, { action: "updated", message })
  1177. // Check if we should sync to cloud and haven't already synced this message
  1178. const shouldCaptureMessage = message.partial !== true && CloudService.isEnabled()
  1179. const hasNotBeenSynced = !this.cloudSyncedMessageTimestamps.has(message.ts)
  1180. if (shouldCaptureMessage && hasNotBeenSynced) {
  1181. CloudService.instance.captureEvent({
  1182. event: TelemetryEventName.TASK_MESSAGE,
  1183. properties: { taskId: this.taskId, message },
  1184. })
  1185. // Track that this message has been synced to cloud
  1186. this.cloudSyncedMessageTimestamps.add(message.ts)
  1187. }
  1188. }
  1189. private async saveClineMessages() {
  1190. try {
  1191. await saveTaskMessages({
  1192. messages: this.clineMessages,
  1193. taskId: this.taskId,
  1194. globalStoragePath: this.globalStoragePath,
  1195. })
  1196. if (this._taskApiConfigName === undefined) {
  1197. await this.taskApiConfigReady
  1198. }
  1199. const { historyItem, tokenUsage } = await taskMetadata({
  1200. taskId: this.taskId,
  1201. rootTaskId: this.rootTaskId,
  1202. parentTaskId: this.parentTaskId,
  1203. taskNumber: this.taskNumber,
  1204. messages: this.clineMessages,
  1205. globalStoragePath: this.globalStoragePath,
  1206. workspace: this.cwd,
  1207. mode: this._taskMode || defaultModeSlug, // Use the task's own mode, not the current provider mode.
  1208. apiConfigName: this._taskApiConfigName, // Use the task's own provider profile, not the current provider profile.
  1209. initialStatus: this.initialStatus,
  1210. })
  1211. // Emit token/tool usage updates using debounced function
  1212. // The debounce with maxWait ensures:
  1213. // - Immediate first emit (leading: true)
  1214. // - At most one emit per interval during rapid updates (maxWait)
  1215. // - Final state is emitted when updates stop (trailing: true)
  1216. this.debouncedEmitTokenUsage(tokenUsage, this.toolUsage)
  1217. await this.providerRef.deref()?.updateTaskHistory(historyItem)
  1218. } catch (error) {
  1219. console.error("Failed to save Roo messages:", error)
  1220. }
  1221. }
  1222. private findMessageByTimestamp(ts: number): ClineMessage | undefined {
  1223. for (let i = this.clineMessages.length - 1; i >= 0; i--) {
  1224. if (this.clineMessages[i].ts === ts) {
  1225. return this.clineMessages[i]
  1226. }
  1227. }
  1228. return undefined
  1229. }
  1230. // Note that `partial` has three valid states true (partial message),
  1231. // false (completion of partial message), undefined (individual complete
  1232. // message).
  1233. async ask(
  1234. type: ClineAsk,
  1235. text?: string,
  1236. partial?: boolean,
  1237. progressStatus?: ToolProgressStatus,
  1238. isProtected?: boolean,
  1239. ): Promise<{ response: ClineAskResponse; text?: string; images?: string[] }> {
  1240. // If this Cline instance was aborted by the provider, then the only
  1241. // thing keeping us alive is a promise still running in the background,
  1242. // in which case we don't want to send its result to the webview as it
  1243. // is attached to a new instance of Cline now. So we can safely ignore
  1244. // the result of any active promises, and this class will be
  1245. // deallocated. (Although we set Cline = undefined in provider, that
  1246. // simply removes the reference to this instance, but the instance is
  1247. // still alive until this promise resolves or rejects.)
  1248. if (this.abort) {
  1249. throw new Error(`[RooCode#ask] task ${this.taskId}.${this.instanceId} aborted`)
  1250. }
  1251. let askTs: number
  1252. if (partial !== undefined) {
  1253. const lastMessage = this.clineMessages.at(-1)
  1254. const isUpdatingPreviousPartial =
  1255. lastMessage && lastMessage.partial && lastMessage.type === "ask" && lastMessage.ask === type
  1256. if (partial) {
  1257. if (isUpdatingPreviousPartial) {
  1258. // Existing partial message, so update it.
  1259. lastMessage.text = text
  1260. lastMessage.partial = partial
  1261. lastMessage.progressStatus = progressStatus
  1262. lastMessage.isProtected = isProtected
  1263. // TODO: Be more efficient about saving and posting only new
  1264. // data or one whole message at a time so ignore partial for
  1265. // saves, and only post parts of partial message instead of
  1266. // whole array in new listener.
  1267. this.updateClineMessage(lastMessage)
  1268. // console.log("Task#ask: current ask promise was ignored (#1)")
  1269. throw new AskIgnoredError("updating existing partial")
  1270. } else {
  1271. // This is a new partial message, so add it with partial
  1272. // state.
  1273. askTs = Date.now()
  1274. this.lastMessageTs = askTs
  1275. await this.addToClineMessages({ ts: askTs, type: "ask", ask: type, text, partial, isProtected })
  1276. // console.log("Task#ask: current ask promise was ignored (#2)")
  1277. throw new AskIgnoredError("new partial")
  1278. }
  1279. } else {
  1280. if (isUpdatingPreviousPartial) {
  1281. // This is the complete version of a previously partial
  1282. // message, so replace the partial with the complete version.
  1283. this.askResponse = undefined
  1284. this.askResponseText = undefined
  1285. this.askResponseImages = undefined
  1286. // Bug for the history books:
  1287. // In the webview we use the ts as the chatrow key for the
  1288. // virtuoso list. Since we would update this ts right at the
  1289. // end of streaming, it would cause the view to flicker. The
  1290. // key prop has to be stable otherwise react has trouble
  1291. // reconciling items between renders, causing unmounting and
  1292. // remounting of components (flickering).
  1293. // The lesson here is if you see flickering when rendering
  1294. // lists, it's likely because the key prop is not stable.
  1295. // So in this case we must make sure that the message ts is
  1296. // never altered after first setting it.
  1297. askTs = lastMessage.ts
  1298. this.lastMessageTs = askTs
  1299. lastMessage.text = text
  1300. lastMessage.partial = false
  1301. lastMessage.progressStatus = progressStatus
  1302. lastMessage.isProtected = isProtected
  1303. await this.saveClineMessages()
  1304. this.updateClineMessage(lastMessage)
  1305. } else {
  1306. // This is a new and complete message, so add it like normal.
  1307. this.askResponse = undefined
  1308. this.askResponseText = undefined
  1309. this.askResponseImages = undefined
  1310. askTs = Date.now()
  1311. this.lastMessageTs = askTs
  1312. await this.addToClineMessages({ ts: askTs, type: "ask", ask: type, text, isProtected })
  1313. }
  1314. }
  1315. } else {
  1316. // This is a new non-partial message, so add it like normal.
  1317. this.askResponse = undefined
  1318. this.askResponseText = undefined
  1319. this.askResponseImages = undefined
  1320. askTs = Date.now()
  1321. this.lastMessageTs = askTs
  1322. await this.addToClineMessages({ ts: askTs, type: "ask", ask: type, text, isProtected })
  1323. }
  1324. let timeouts: NodeJS.Timeout[] = []
  1325. // Automatically approve if the ask according to the user's settings.
  1326. const provider = this.providerRef.deref()
  1327. const state = provider ? await provider.getState() : undefined
  1328. const approval = await checkAutoApproval({ state, ask: type, text, isProtected })
  1329. if (approval.decision === "approve") {
  1330. this.approveAsk()
  1331. } else if (approval.decision === "deny") {
  1332. this.denyAsk()
  1333. } else if (approval.decision === "timeout") {
  1334. // Store the auto-approval timeout so it can be cancelled if user interacts
  1335. this.autoApprovalTimeoutRef = setTimeout(() => {
  1336. const { askResponse, text, images } = approval.fn()
  1337. this.handleWebviewAskResponse(askResponse, text, images)
  1338. this.autoApprovalTimeoutRef = undefined
  1339. }, approval.timeout)
  1340. timeouts.push(this.autoApprovalTimeoutRef)
  1341. }
  1342. // The state is mutable if the message is complete and the task will
  1343. // block (via the `pWaitFor`).
  1344. const isBlocking = !(this.askResponse !== undefined || this.lastMessageTs !== askTs)
  1345. const isMessageQueued = !this.messageQueueService.isEmpty()
  1346. const isStatusMutable = !partial && isBlocking && !isMessageQueued && approval.decision === "ask"
  1347. if (isStatusMutable) {
  1348. const statusMutationTimeout = 2_000
  1349. if (isInteractiveAsk(type)) {
  1350. timeouts.push(
  1351. setTimeout(() => {
  1352. const message = this.findMessageByTimestamp(askTs)
  1353. if (message) {
  1354. this.interactiveAsk = message
  1355. this.emit(RooCodeEventName.TaskInteractive, this.taskId)
  1356. provider?.postMessageToWebview({ type: "interactionRequired" })
  1357. }
  1358. }, statusMutationTimeout),
  1359. )
  1360. } else if (isResumableAsk(type)) {
  1361. timeouts.push(
  1362. setTimeout(() => {
  1363. const message = this.findMessageByTimestamp(askTs)
  1364. if (message) {
  1365. this.resumableAsk = message
  1366. this.emit(RooCodeEventName.TaskResumable, this.taskId)
  1367. }
  1368. }, statusMutationTimeout),
  1369. )
  1370. } else if (isIdleAsk(type)) {
  1371. timeouts.push(
  1372. setTimeout(() => {
  1373. const message = this.findMessageByTimestamp(askTs)
  1374. if (message) {
  1375. this.idleAsk = message
  1376. this.emit(RooCodeEventName.TaskIdle, this.taskId)
  1377. }
  1378. }, statusMutationTimeout),
  1379. )
  1380. }
  1381. } else if (isMessageQueued) {
  1382. const message = this.messageQueueService.dequeueMessage()
  1383. if (message) {
  1384. // Check if this is a tool approval ask that needs to be handled.
  1385. if (
  1386. type === "tool" ||
  1387. type === "command" ||
  1388. type === "browser_action_launch" ||
  1389. type === "use_mcp_server"
  1390. ) {
  1391. // For tool approvals, we need to approve first, then send
  1392. // the message if there's text/images.
  1393. this.handleWebviewAskResponse("yesButtonClicked", message.text, message.images)
  1394. } else {
  1395. // For other ask types (like followup or command_output), fulfill the ask
  1396. // directly.
  1397. this.handleWebviewAskResponse("messageResponse", message.text, message.images)
  1398. }
  1399. }
  1400. }
  1401. // Wait for askResponse to be set
  1402. await pWaitFor(
  1403. () => {
  1404. if (this.askResponse !== undefined || this.lastMessageTs !== askTs) {
  1405. return true
  1406. }
  1407. // If a queued message arrives while we're blocked on an ask (e.g. a follow-up
  1408. // suggestion click that was incorrectly queued due to UI state), consume it
  1409. // immediately so the task doesn't hang.
  1410. if (!this.messageQueueService.isEmpty()) {
  1411. const message = this.messageQueueService.dequeueMessage()
  1412. if (message) {
  1413. // If this is a tool approval ask, we need to approve first (yesButtonClicked)
  1414. // and include any queued text/images.
  1415. if (
  1416. type === "tool" ||
  1417. type === "command" ||
  1418. type === "browser_action_launch" ||
  1419. type === "use_mcp_server"
  1420. ) {
  1421. this.handleWebviewAskResponse("yesButtonClicked", message.text, message.images)
  1422. } else {
  1423. this.handleWebviewAskResponse("messageResponse", message.text, message.images)
  1424. }
  1425. }
  1426. }
  1427. return false
  1428. },
  1429. { interval: 100 },
  1430. )
  1431. if (this.lastMessageTs !== askTs) {
  1432. // Could happen if we send multiple asks in a row i.e. with
  1433. // command_output. It's important that when we know an ask could
  1434. // fail, it is handled gracefully.
  1435. throw new AskIgnoredError("superseded")
  1436. }
  1437. const result = { response: this.askResponse!, text: this.askResponseText, images: this.askResponseImages }
  1438. this.askResponse = undefined
  1439. this.askResponseText = undefined
  1440. this.askResponseImages = undefined
  1441. // Cancel the timeouts if they are still running.
  1442. timeouts.forEach((timeout) => clearTimeout(timeout))
  1443. // Switch back to an active state.
  1444. if (this.idleAsk || this.resumableAsk || this.interactiveAsk) {
  1445. this.idleAsk = undefined
  1446. this.resumableAsk = undefined
  1447. this.interactiveAsk = undefined
  1448. this.emit(RooCodeEventName.TaskActive, this.taskId)
  1449. }
  1450. this.emit(RooCodeEventName.TaskAskResponded)
  1451. return result
  1452. }
  1453. handleWebviewAskResponse(askResponse: ClineAskResponse, text?: string, images?: string[]) {
  1454. // Clear any pending auto-approval timeout when user responds
  1455. this.cancelAutoApprovalTimeout()
  1456. this.askResponse = askResponse
  1457. this.askResponseText = text
  1458. this.askResponseImages = images
  1459. // Create a checkpoint whenever the user sends a message.
  1460. // Use allowEmpty=true to ensure a checkpoint is recorded even if there are no file changes.
  1461. // Suppress the checkpoint_saved chat row for this particular checkpoint to keep the timeline clean.
  1462. if (askResponse === "messageResponse") {
  1463. void this.checkpointSave(false, true)
  1464. }
  1465. // Mark the last follow-up question as answered
  1466. if (askResponse === "messageResponse" || askResponse === "yesButtonClicked") {
  1467. // Find the last unanswered follow-up message using findLastIndex
  1468. const lastFollowUpIndex = findLastIndex(
  1469. this.clineMessages,
  1470. (msg) => msg.type === "ask" && msg.ask === "followup" && !msg.isAnswered,
  1471. )
  1472. if (lastFollowUpIndex !== -1) {
  1473. // Mark this follow-up as answered
  1474. this.clineMessages[lastFollowUpIndex].isAnswered = true
  1475. // Save the updated messages
  1476. this.saveClineMessages().catch((error) => {
  1477. console.error("Failed to save answered follow-up state:", error)
  1478. })
  1479. }
  1480. }
  1481. }
  1482. /**
  1483. * Cancel any pending auto-approval timeout.
  1484. * Called when user interacts (types, clicks buttons, etc.) to prevent the timeout from firing.
  1485. */
  1486. public cancelAutoApprovalTimeout(): void {
  1487. if (this.autoApprovalTimeoutRef) {
  1488. clearTimeout(this.autoApprovalTimeoutRef)
  1489. this.autoApprovalTimeoutRef = undefined
  1490. }
  1491. }
  1492. public approveAsk({ text, images }: { text?: string; images?: string[] } = {}) {
  1493. this.handleWebviewAskResponse("yesButtonClicked", text, images)
  1494. }
  1495. public denyAsk({ text, images }: { text?: string; images?: string[] } = {}) {
  1496. this.handleWebviewAskResponse("noButtonClicked", text, images)
  1497. }
  1498. public supersedePendingAsk(): void {
  1499. this.lastMessageTs = Date.now()
  1500. }
  1501. /**
  1502. * Updates the API configuration and rebuilds the API handler.
  1503. * There is no tool-protocol switching or tool parser swapping.
  1504. *
  1505. * @param newApiConfiguration - The new API configuration to use
  1506. */
  1507. public updateApiConfiguration(newApiConfiguration: ProviderSettings): void {
  1508. // Update the configuration and rebuild the API handler
  1509. this.apiConfiguration = newApiConfiguration
  1510. this.api = buildApiHandler(this.apiConfiguration)
  1511. }
  1512. public async submitUserMessage(
  1513. text: string,
  1514. images?: string[],
  1515. mode?: string,
  1516. providerProfile?: string,
  1517. ): Promise<void> {
  1518. try {
  1519. text = (text ?? "").trim()
  1520. images = images ?? []
  1521. if (text.length === 0 && images.length === 0) {
  1522. return
  1523. }
  1524. const provider = this.providerRef.deref()
  1525. if (provider) {
  1526. if (mode) {
  1527. await provider.setMode(mode)
  1528. }
  1529. if (providerProfile) {
  1530. await provider.setProviderProfile(providerProfile)
  1531. // Update this task's API configuration to match the new profile
  1532. // This ensures the parser state is synchronized with the selected model
  1533. const newState = await provider.getState()
  1534. if (newState?.apiConfiguration) {
  1535. this.updateApiConfiguration(newState.apiConfiguration)
  1536. }
  1537. }
  1538. this.emit(RooCodeEventName.TaskUserMessage, this.taskId)
  1539. // Handle the message directly instead of routing through the webview.
  1540. // This avoids a race condition where the webview's message state hasn't
  1541. // hydrated yet, causing it to interpret the message as a new task request.
  1542. this.handleWebviewAskResponse("messageResponse", text, images)
  1543. } else {
  1544. console.error("[Task#submitUserMessage] Provider reference lost")
  1545. }
  1546. } catch (error) {
  1547. console.error("[Task#submitUserMessage] Failed to submit user message:", error)
  1548. }
  1549. }
  1550. async handleTerminalOperation(terminalOperation: "continue" | "abort") {
  1551. if (terminalOperation === "continue") {
  1552. this.terminalProcess?.continue()
  1553. } else if (terminalOperation === "abort") {
  1554. this.terminalProcess?.abort()
  1555. }
  1556. }
  1557. private async getFilesReadByRooSafely(context: string): Promise<string[] | undefined> {
  1558. try {
  1559. return await this.fileContextTracker.getFilesReadByRoo()
  1560. } catch (error) {
  1561. console.error(`[Task#${context}] Failed to get files read by Roo:`, error)
  1562. return undefined
  1563. }
  1564. }
  1565. public async condenseContext(): Promise<void> {
  1566. // CRITICAL: Flush any pending tool results before condensing
  1567. // to ensure tool_use/tool_result pairs are complete in history
  1568. await this.flushPendingToolResultsToHistory()
  1569. const systemPrompt = await this.getSystemPrompt()
  1570. // Get condensing configuration
  1571. const state = await this.providerRef.deref()?.getState()
  1572. const customCondensingPrompt = state?.customSupportPrompts?.CONDENSE
  1573. const { mode, apiConfiguration } = state ?? {}
  1574. const { contextTokens: prevContextTokens } = this.getTokenUsage()
  1575. // Build tools for condensing metadata (same tools used for normal API calls)
  1576. const provider = this.providerRef.deref()
  1577. let allTools: import("openai").default.Chat.ChatCompletionTool[] = []
  1578. if (provider) {
  1579. const modelInfo = this.api.getModel().info
  1580. const toolsResult = await buildNativeToolsArrayWithRestrictions({
  1581. provider,
  1582. cwd: this.cwd,
  1583. mode,
  1584. customModes: state?.customModes,
  1585. experiments: state?.experiments,
  1586. apiConfiguration,
  1587. browserToolEnabled: state?.browserToolEnabled ?? true,
  1588. disabledTools: state?.disabledTools,
  1589. modelInfo,
  1590. includeAllToolsWithRestrictions: false,
  1591. })
  1592. allTools = toolsResult.tools
  1593. }
  1594. // Build metadata with tools and taskId for the condensing API call
  1595. const metadata: ApiHandlerCreateMessageMetadata = {
  1596. mode,
  1597. taskId: this.taskId,
  1598. ...(allTools.length > 0
  1599. ? {
  1600. tools: allTools,
  1601. tool_choice: "auto",
  1602. parallelToolCalls: true,
  1603. }
  1604. : {}),
  1605. }
  1606. // Generate environment details to include in the condensed summary
  1607. const environmentDetails = await getEnvironmentDetails(this, true)
  1608. const filesReadByRoo = await this.getFilesReadByRooSafely("condenseContext")
  1609. const {
  1610. messages,
  1611. summary,
  1612. cost,
  1613. newContextTokens = 0,
  1614. error,
  1615. errorDetails,
  1616. condenseId,
  1617. } = await summarizeConversation({
  1618. messages: this.apiConversationHistory,
  1619. apiHandler: this.api,
  1620. systemPrompt,
  1621. taskId: this.taskId,
  1622. isAutomaticTrigger: false,
  1623. customCondensingPrompt,
  1624. metadata,
  1625. environmentDetails,
  1626. filesReadByRoo,
  1627. cwd: this.cwd,
  1628. rooIgnoreController: this.rooIgnoreController,
  1629. })
  1630. if (error) {
  1631. await this.say(
  1632. "condense_context_error",
  1633. error,
  1634. undefined /* images */,
  1635. false /* partial */,
  1636. undefined /* checkpoint */,
  1637. undefined /* progressStatus */,
  1638. { isNonInteractive: true } /* options */,
  1639. )
  1640. return
  1641. }
  1642. await this.overwriteApiConversationHistory(messages)
  1643. const contextCondense: ContextCondense = {
  1644. summary,
  1645. cost,
  1646. newContextTokens,
  1647. prevContextTokens,
  1648. condenseId: condenseId!,
  1649. }
  1650. await this.say(
  1651. "condense_context",
  1652. undefined /* text */,
  1653. undefined /* images */,
  1654. false /* partial */,
  1655. undefined /* checkpoint */,
  1656. undefined /* progressStatus */,
  1657. { isNonInteractive: true } /* options */,
  1658. contextCondense,
  1659. )
  1660. // Process any queued messages after condensing completes
  1661. this.processQueuedMessages()
  1662. }
  1663. async say(
  1664. type: ClineSay,
  1665. text?: string,
  1666. images?: string[],
  1667. partial?: boolean,
  1668. checkpoint?: Record<string, unknown>,
  1669. progressStatus?: ToolProgressStatus,
  1670. options: {
  1671. isNonInteractive?: boolean
  1672. } = {},
  1673. contextCondense?: ContextCondense,
  1674. contextTruncation?: ContextTruncation,
  1675. ): Promise<undefined> {
  1676. if (this.abort) {
  1677. throw new Error(`[RooCode#say] task ${this.taskId}.${this.instanceId} aborted`)
  1678. }
  1679. if (partial !== undefined) {
  1680. const lastMessage = this.clineMessages.at(-1)
  1681. const isUpdatingPreviousPartial =
  1682. lastMessage && lastMessage.partial && lastMessage.type === "say" && lastMessage.say === type
  1683. if (partial) {
  1684. if (isUpdatingPreviousPartial) {
  1685. // Existing partial message, so update it.
  1686. lastMessage.text = text
  1687. lastMessage.images = images
  1688. lastMessage.partial = partial
  1689. lastMessage.progressStatus = progressStatus
  1690. this.updateClineMessage(lastMessage)
  1691. } else {
  1692. // This is a new partial message, so add it with partial state.
  1693. const sayTs = Date.now()
  1694. if (!options.isNonInteractive) {
  1695. this.lastMessageTs = sayTs
  1696. }
  1697. await this.addToClineMessages({
  1698. ts: sayTs,
  1699. type: "say",
  1700. say: type,
  1701. text,
  1702. images,
  1703. partial,
  1704. contextCondense,
  1705. contextTruncation,
  1706. })
  1707. }
  1708. } else {
  1709. // New now have a complete version of a previously partial message.
  1710. // This is the complete version of a previously partial
  1711. // message, so replace the partial with the complete version.
  1712. if (isUpdatingPreviousPartial) {
  1713. if (!options.isNonInteractive) {
  1714. this.lastMessageTs = lastMessage.ts
  1715. }
  1716. lastMessage.text = text
  1717. lastMessage.images = images
  1718. lastMessage.partial = false
  1719. lastMessage.progressStatus = progressStatus
  1720. // Instead of streaming partialMessage events, we do a save
  1721. // and post like normal to persist to disk.
  1722. await this.saveClineMessages()
  1723. // More performant than an entire `postStateToWebview`.
  1724. this.updateClineMessage(lastMessage)
  1725. } else {
  1726. // This is a new and complete message, so add it like normal.
  1727. const sayTs = Date.now()
  1728. if (!options.isNonInteractive) {
  1729. this.lastMessageTs = sayTs
  1730. }
  1731. await this.addToClineMessages({
  1732. ts: sayTs,
  1733. type: "say",
  1734. say: type,
  1735. text,
  1736. images,
  1737. contextCondense,
  1738. contextTruncation,
  1739. })
  1740. }
  1741. }
  1742. } else {
  1743. // This is a new non-partial message, so add it like normal.
  1744. const sayTs = Date.now()
  1745. // A "non-interactive" message is a message is one that the user
  1746. // does not need to respond to. We don't want these message types
  1747. // to trigger an update to `lastMessageTs` since they can be created
  1748. // asynchronously and could interrupt a pending ask.
  1749. if (!options.isNonInteractive) {
  1750. this.lastMessageTs = sayTs
  1751. }
  1752. await this.addToClineMessages({
  1753. ts: sayTs,
  1754. type: "say",
  1755. say: type,
  1756. text,
  1757. images,
  1758. checkpoint,
  1759. contextCondense,
  1760. contextTruncation,
  1761. })
  1762. }
  1763. // Broadcast browser session updates to panel when browser-related messages are added
  1764. if (type === "browser_action" || type === "browser_action_result" || type === "browser_session_status") {
  1765. this.broadcastBrowserSessionUpdate()
  1766. }
  1767. }
  1768. async sayAndCreateMissingParamError(toolName: ToolName, paramName: string, relPath?: string) {
  1769. await this.say(
  1770. "error",
  1771. `Roo tried to use ${toolName}${
  1772. relPath ? ` for '${relPath.toPosix()}'` : ""
  1773. } without value for required parameter '${paramName}'. Retrying...`,
  1774. )
  1775. return formatResponse.toolError(formatResponse.missingToolParameterError(paramName))
  1776. }
  1777. // Lifecycle
  1778. // Start / Resume / Abort / Dispose
  1779. /**
  1780. * Get enabled MCP tools count for this task.
  1781. * Returns the count along with the number of servers contributing.
  1782. *
  1783. * @returns Object with enabledToolCount and enabledServerCount
  1784. */
  1785. private async getEnabledMcpToolsCount(): Promise<{ enabledToolCount: number; enabledServerCount: number }> {
  1786. try {
  1787. const provider = this.providerRef.deref()
  1788. if (!provider) {
  1789. return { enabledToolCount: 0, enabledServerCount: 0 }
  1790. }
  1791. const { mcpEnabled } = (await provider.getState()) ?? {}
  1792. if (!(mcpEnabled ?? true)) {
  1793. return { enabledToolCount: 0, enabledServerCount: 0 }
  1794. }
  1795. const mcpHub = await McpServerManager.getInstance(provider.context, provider)
  1796. if (!mcpHub) {
  1797. return { enabledToolCount: 0, enabledServerCount: 0 }
  1798. }
  1799. const servers = mcpHub.getServers()
  1800. return countEnabledMcpTools(servers)
  1801. } catch (error) {
  1802. console.error("[Task#getEnabledMcpToolsCount] Error counting MCP tools:", error)
  1803. return { enabledToolCount: 0, enabledServerCount: 0 }
  1804. }
  1805. }
  1806. private async startTask(task?: string, images?: string[]): Promise<void> {
  1807. try {
  1808. if (this.enableBridge) {
  1809. try {
  1810. await BridgeOrchestrator.subscribeToTask(this)
  1811. } catch (error) {
  1812. console.error(
  1813. `[Task#startTask] BridgeOrchestrator.subscribeToTask() failed: ${error instanceof Error ? error.message : String(error)}`,
  1814. )
  1815. }
  1816. }
  1817. // `conversationHistory` (for API) and `clineMessages` (for webview)
  1818. // need to be in sync.
  1819. // If the extension process were killed, then on restart the
  1820. // `clineMessages` might not be empty, so we need to set it to [] when
  1821. // we create a new Cline client (otherwise webview would show stale
  1822. // messages from previous session).
  1823. this.clineMessages = []
  1824. this.apiConversationHistory = []
  1825. // The todo list is already set in the constructor if initialTodos were provided
  1826. // No need to add any messages - the todoList property is already set
  1827. await this.providerRef.deref()?.postStateToWebviewWithoutTaskHistory()
  1828. await this.say("text", task, images)
  1829. // Check for too many MCP tools and warn the user
  1830. const { enabledToolCount, enabledServerCount } = await this.getEnabledMcpToolsCount()
  1831. if (enabledToolCount > MAX_MCP_TOOLS_THRESHOLD) {
  1832. await this.say(
  1833. "too_many_tools_warning",
  1834. JSON.stringify({
  1835. toolCount: enabledToolCount,
  1836. serverCount: enabledServerCount,
  1837. threshold: MAX_MCP_TOOLS_THRESHOLD,
  1838. }),
  1839. undefined,
  1840. undefined,
  1841. undefined,
  1842. undefined,
  1843. { isNonInteractive: true },
  1844. )
  1845. }
  1846. this.isInitialized = true
  1847. const imageBlocks: Anthropic.ImageBlockParam[] = formatResponse.imageBlocks(images)
  1848. // Task starting
  1849. await this.initiateTaskLoop([
  1850. {
  1851. type: "text",
  1852. text: `<user_message>\n${task}\n</user_message>`,
  1853. },
  1854. ...imageBlocks,
  1855. ]).catch((error) => {
  1856. // Swallow loop rejection when the task was intentionally abandoned/aborted
  1857. // during delegation or user cancellation to prevent unhandled rejections.
  1858. if (this.abandoned === true || this.abortReason === "user_cancelled") {
  1859. return
  1860. }
  1861. throw error
  1862. })
  1863. } catch (error) {
  1864. // In tests and some UX flows, tasks can be aborted while `startTask` is still
  1865. // initializing. Treat abort/abandon as expected and avoid unhandled rejections.
  1866. if (this.abandoned === true || this.abort === true || this.abortReason === "user_cancelled") {
  1867. return
  1868. }
  1869. throw error
  1870. }
  1871. }
  1872. private async resumeTaskFromHistory() {
  1873. if (this.enableBridge) {
  1874. try {
  1875. await BridgeOrchestrator.subscribeToTask(this)
  1876. } catch (error) {
  1877. console.error(
  1878. `[Task#resumeTaskFromHistory] BridgeOrchestrator.subscribeToTask() failed: ${error instanceof Error ? error.message : String(error)}`,
  1879. )
  1880. }
  1881. }
  1882. const modifiedClineMessages = await this.getSavedClineMessages()
  1883. // Remove any resume messages that may have been added before.
  1884. const lastRelevantMessageIndex = findLastIndex(
  1885. modifiedClineMessages,
  1886. (m) => !(m.ask === "resume_task" || m.ask === "resume_completed_task"),
  1887. )
  1888. if (lastRelevantMessageIndex !== -1) {
  1889. modifiedClineMessages.splice(lastRelevantMessageIndex + 1)
  1890. }
  1891. // Remove any trailing reasoning-only UI messages that were not part of the persisted API conversation
  1892. while (modifiedClineMessages.length > 0) {
  1893. const last = modifiedClineMessages[modifiedClineMessages.length - 1]
  1894. if (last.type === "say" && last.say === "reasoning") {
  1895. modifiedClineMessages.pop()
  1896. } else {
  1897. break
  1898. }
  1899. }
  1900. // Since we don't use `api_req_finished` anymore, we need to check if the
  1901. // last `api_req_started` has a cost value, if it doesn't and no
  1902. // cancellation reason to present, then we remove it since it indicates
  1903. // an api request without any partial content streamed.
  1904. const lastApiReqStartedIndex = findLastIndex(
  1905. modifiedClineMessages,
  1906. (m) => m.type === "say" && m.say === "api_req_started",
  1907. )
  1908. if (lastApiReqStartedIndex !== -1) {
  1909. const lastApiReqStarted = modifiedClineMessages[lastApiReqStartedIndex]
  1910. const { cost, cancelReason }: ClineApiReqInfo = JSON.parse(lastApiReqStarted.text || "{}")
  1911. if (cost === undefined && cancelReason === undefined) {
  1912. modifiedClineMessages.splice(lastApiReqStartedIndex, 1)
  1913. }
  1914. }
  1915. await this.overwriteClineMessages(modifiedClineMessages)
  1916. this.clineMessages = await this.getSavedClineMessages()
  1917. // Now present the cline messages to the user and ask if they want to
  1918. // resume (NOTE: we ran into a bug before where the
  1919. // apiConversationHistory wouldn't be initialized when opening a old
  1920. // task, and it was because we were waiting for resume).
  1921. // This is important in case the user deletes messages without resuming
  1922. // the task first.
  1923. this.apiConversationHistory = await this.getSavedApiConversationHistory()
  1924. const lastClineMessage = this.clineMessages
  1925. .slice()
  1926. .reverse()
  1927. .find((m) => !(m.ask === "resume_task" || m.ask === "resume_completed_task")) // Could be multiple resume tasks.
  1928. let askType: ClineAsk
  1929. if (lastClineMessage?.ask === "completion_result") {
  1930. askType = "resume_completed_task"
  1931. } else {
  1932. askType = "resume_task"
  1933. }
  1934. this.isInitialized = true
  1935. const { response, text, images } = await this.ask(askType) // Calls `postStateToWebview`.
  1936. let responseText: string | undefined
  1937. let responseImages: string[] | undefined
  1938. if (response === "messageResponse") {
  1939. await this.say("user_feedback", text, images)
  1940. responseText = text
  1941. responseImages = images
  1942. }
  1943. // Make sure that the api conversation history can be resumed by the API,
  1944. // even if it goes out of sync with cline messages.
  1945. let existingApiConversationHistory: ApiMessage[] = await this.getSavedApiConversationHistory()
  1946. // Tool blocks are always preserved; native tool calling only.
  1947. // if the last message is an assistant message, we need to check if there's tool use since every tool use has to have a tool response
  1948. // if there's no tool use and only a text block, then we can just add a user message
  1949. // (note this isn't relevant anymore since we use custom tool prompts instead of tool use blocks, but this is here for legacy purposes in case users resume old tasks)
  1950. // if the last message is a user message, we can need to get the assistant message before it to see if it made tool calls, and if so, fill in the remaining tool responses with 'interrupted'
  1951. let modifiedOldUserContent: Anthropic.Messages.ContentBlockParam[] // either the last message if its user message, or the user message before the last (assistant) message
  1952. let modifiedApiConversationHistory: ApiMessage[] // need to remove the last user message to replace with new modified user message
  1953. if (existingApiConversationHistory.length > 0) {
  1954. const lastMessage = existingApiConversationHistory[existingApiConversationHistory.length - 1]
  1955. if (lastMessage.role === "assistant") {
  1956. const content = Array.isArray(lastMessage.content)
  1957. ? lastMessage.content
  1958. : [{ type: "text", text: lastMessage.content }]
  1959. const hasToolUse = content.some((block) => block.type === "tool_use")
  1960. if (hasToolUse) {
  1961. const toolUseBlocks = content.filter(
  1962. (block) => block.type === "tool_use",
  1963. ) as Anthropic.Messages.ToolUseBlock[]
  1964. const toolResponses: Anthropic.ToolResultBlockParam[] = toolUseBlocks.map((block) => ({
  1965. type: "tool_result",
  1966. tool_use_id: block.id,
  1967. content: "Task was interrupted before this tool call could be completed.",
  1968. }))
  1969. modifiedApiConversationHistory = [...existingApiConversationHistory] // no changes
  1970. modifiedOldUserContent = [...toolResponses]
  1971. } else {
  1972. modifiedApiConversationHistory = [...existingApiConversationHistory]
  1973. modifiedOldUserContent = []
  1974. }
  1975. } else if (lastMessage.role === "user") {
  1976. const previousAssistantMessage: ApiMessage | undefined =
  1977. existingApiConversationHistory[existingApiConversationHistory.length - 2]
  1978. const existingUserContent: Anthropic.Messages.ContentBlockParam[] = Array.isArray(lastMessage.content)
  1979. ? lastMessage.content
  1980. : [{ type: "text", text: lastMessage.content }]
  1981. if (previousAssistantMessage && previousAssistantMessage.role === "assistant") {
  1982. const assistantContent = Array.isArray(previousAssistantMessage.content)
  1983. ? previousAssistantMessage.content
  1984. : [{ type: "text", text: previousAssistantMessage.content }]
  1985. const toolUseBlocks = assistantContent.filter(
  1986. (block) => block.type === "tool_use",
  1987. ) as Anthropic.Messages.ToolUseBlock[]
  1988. if (toolUseBlocks.length > 0) {
  1989. const existingToolResults = existingUserContent.filter(
  1990. (block) => block.type === "tool_result",
  1991. ) as Anthropic.ToolResultBlockParam[]
  1992. const missingToolResponses: Anthropic.ToolResultBlockParam[] = toolUseBlocks
  1993. .filter(
  1994. (toolUse) => !existingToolResults.some((result) => result.tool_use_id === toolUse.id),
  1995. )
  1996. .map((toolUse) => ({
  1997. type: "tool_result",
  1998. tool_use_id: toolUse.id,
  1999. content: "Task was interrupted before this tool call could be completed.",
  2000. }))
  2001. modifiedApiConversationHistory = existingApiConversationHistory.slice(0, -1) // removes the last user message
  2002. modifiedOldUserContent = [...existingUserContent, ...missingToolResponses]
  2003. } else {
  2004. modifiedApiConversationHistory = existingApiConversationHistory.slice(0, -1)
  2005. modifiedOldUserContent = [...existingUserContent]
  2006. }
  2007. } else {
  2008. modifiedApiConversationHistory = existingApiConversationHistory.slice(0, -1)
  2009. modifiedOldUserContent = [...existingUserContent]
  2010. }
  2011. } else {
  2012. throw new Error("Unexpected: Last message is not a user or assistant message")
  2013. }
  2014. } else {
  2015. throw new Error("Unexpected: No existing API conversation history")
  2016. }
  2017. let newUserContent: Anthropic.Messages.ContentBlockParam[] = [...modifiedOldUserContent]
  2018. const agoText = ((): string => {
  2019. const timestamp = lastClineMessage?.ts ?? Date.now()
  2020. const now = Date.now()
  2021. const diff = now - timestamp
  2022. const minutes = Math.floor(diff / 60000)
  2023. const hours = Math.floor(minutes / 60)
  2024. const days = Math.floor(hours / 24)
  2025. if (days > 0) {
  2026. return `${days} day${days > 1 ? "s" : ""} ago`
  2027. }
  2028. if (hours > 0) {
  2029. return `${hours} hour${hours > 1 ? "s" : ""} ago`
  2030. }
  2031. if (minutes > 0) {
  2032. return `${minutes} minute${minutes > 1 ? "s" : ""} ago`
  2033. }
  2034. return "just now"
  2035. })()
  2036. if (responseText) {
  2037. newUserContent.push({
  2038. type: "text",
  2039. text: `<user_message>\n${responseText}\n</user_message>`,
  2040. })
  2041. }
  2042. if (responseImages && responseImages.length > 0) {
  2043. newUserContent.push(...formatResponse.imageBlocks(responseImages))
  2044. }
  2045. // Ensure we have at least some content to send to the API.
  2046. // If newUserContent is empty, add a minimal resumption message.
  2047. if (newUserContent.length === 0) {
  2048. newUserContent.push({
  2049. type: "text",
  2050. text: "[TASK RESUMPTION] Resuming task...",
  2051. })
  2052. }
  2053. await this.overwriteApiConversationHistory(modifiedApiConversationHistory)
  2054. // Task resuming from history item.
  2055. await this.initiateTaskLoop(newUserContent)
  2056. }
  2057. /**
  2058. * Cancels the current HTTP request if one is in progress.
  2059. * This immediately aborts the underlying stream rather than waiting for the next chunk.
  2060. */
  2061. public cancelCurrentRequest(): void {
  2062. if (this.currentRequestAbortController) {
  2063. console.log(`[Task#${this.taskId}.${this.instanceId}] Aborting current HTTP request`)
  2064. this.currentRequestAbortController.abort()
  2065. this.currentRequestAbortController = undefined
  2066. }
  2067. }
  2068. /**
  2069. * Force emit a final token usage update, ignoring throttle.
  2070. * Called before task completion or abort to ensure final stats are captured.
  2071. * Triggers the debounce with current values and immediately flushes to ensure emit.
  2072. */
  2073. public emitFinalTokenUsageUpdate(): void {
  2074. const tokenUsage = this.getTokenUsage()
  2075. this.debouncedEmitTokenUsage(tokenUsage, this.toolUsage)
  2076. this.debouncedEmitTokenUsage.flush()
  2077. }
  2078. public async abortTask(isAbandoned = false) {
  2079. // Aborting task
  2080. // Will stop any autonomously running promises.
  2081. if (isAbandoned) {
  2082. this.abandoned = true
  2083. }
  2084. this.abort = true
  2085. // Reset consecutive error counters on abort (manual intervention)
  2086. this.consecutiveNoToolUseCount = 0
  2087. this.consecutiveNoAssistantMessagesCount = 0
  2088. // Force final token usage update before abort event
  2089. this.emitFinalTokenUsageUpdate()
  2090. this.emit(RooCodeEventName.TaskAborted)
  2091. try {
  2092. this.dispose() // Call the centralized dispose method
  2093. } catch (error) {
  2094. console.error(`Error during task ${this.taskId}.${this.instanceId} disposal:`, error)
  2095. // Don't rethrow - we want abort to always succeed
  2096. }
  2097. // Save the countdown message in the automatic retry or other content.
  2098. try {
  2099. // Save the countdown message in the automatic retry or other content.
  2100. await this.saveClineMessages()
  2101. } catch (error) {
  2102. console.error(`Error saving messages during abort for task ${this.taskId}.${this.instanceId}:`, error)
  2103. }
  2104. }
  2105. public dispose(): void {
  2106. console.log(`[Task#dispose] disposing task ${this.taskId}.${this.instanceId}`)
  2107. // Cancel any in-progress HTTP request
  2108. try {
  2109. this.cancelCurrentRequest()
  2110. } catch (error) {
  2111. console.error("Error cancelling current request:", error)
  2112. }
  2113. // Remove provider profile change listener
  2114. try {
  2115. if (this.providerProfileChangeListener) {
  2116. const provider = this.providerRef.deref()
  2117. if (provider) {
  2118. provider.off(RooCodeEventName.ProviderProfileChanged, this.providerProfileChangeListener)
  2119. }
  2120. this.providerProfileChangeListener = undefined
  2121. }
  2122. } catch (error) {
  2123. console.error("Error removing provider profile change listener:", error)
  2124. }
  2125. // Dispose message queue and remove event listeners.
  2126. try {
  2127. if (this.messageQueueStateChangedHandler) {
  2128. this.messageQueueService.removeListener("stateChanged", this.messageQueueStateChangedHandler)
  2129. this.messageQueueStateChangedHandler = undefined
  2130. }
  2131. this.messageQueueService.dispose()
  2132. } catch (error) {
  2133. console.error("Error disposing message queue:", error)
  2134. }
  2135. // Remove all event listeners to prevent memory leaks.
  2136. try {
  2137. this.removeAllListeners()
  2138. } catch (error) {
  2139. console.error("Error removing event listeners:", error)
  2140. }
  2141. if (this.enableBridge) {
  2142. BridgeOrchestrator.getInstance()
  2143. ?.unsubscribeFromTask(this.taskId)
  2144. .catch((error) =>
  2145. console.error(
  2146. `[Task#dispose] BridgeOrchestrator#unsubscribeFromTask() failed: ${error instanceof Error ? error.message : String(error)}`,
  2147. ),
  2148. )
  2149. }
  2150. // Release any terminals associated with this task.
  2151. try {
  2152. // Release any terminals associated with this task.
  2153. TerminalRegistry.releaseTerminalsForTask(this.taskId)
  2154. } catch (error) {
  2155. console.error("Error releasing terminals:", error)
  2156. }
  2157. // Cleanup command output artifacts
  2158. getTaskDirectoryPath(this.globalStoragePath, this.taskId)
  2159. .then((taskDir) => {
  2160. const outputDir = path.join(taskDir, "command-output")
  2161. return OutputInterceptor.cleanup(outputDir)
  2162. })
  2163. .catch((error) => {
  2164. console.error("Error cleaning up command output artifacts:", error)
  2165. })
  2166. try {
  2167. this.urlContentFetcher.closeBrowser()
  2168. } catch (error) {
  2169. console.error("Error closing URL content fetcher browser:", error)
  2170. }
  2171. try {
  2172. this.browserSession.closeBrowser()
  2173. } catch (error) {
  2174. console.error("Error closing browser session:", error)
  2175. }
  2176. // Also close the Browser Session panel when the task is disposed
  2177. try {
  2178. const provider = this.providerRef.deref()
  2179. if (provider) {
  2180. const { BrowserSessionPanelManager } = require("../webview/BrowserSessionPanelManager")
  2181. BrowserSessionPanelManager.getInstance(provider).dispose()
  2182. }
  2183. } catch (error) {
  2184. console.error("Error closing browser session panel:", error)
  2185. }
  2186. try {
  2187. if (this.rooIgnoreController) {
  2188. this.rooIgnoreController.dispose()
  2189. this.rooIgnoreController = undefined
  2190. }
  2191. } catch (error) {
  2192. console.error("Error disposing RooIgnoreController:", error)
  2193. // This is the critical one for the leak fix.
  2194. }
  2195. try {
  2196. this.fileContextTracker.dispose()
  2197. } catch (error) {
  2198. console.error("Error disposing file context tracker:", error)
  2199. }
  2200. try {
  2201. // If we're not streaming then `abortStream` won't be called.
  2202. if (this.isStreaming && this.diffViewProvider.isEditing) {
  2203. this.diffViewProvider.revertChanges().catch(console.error)
  2204. }
  2205. } catch (error) {
  2206. console.error("Error reverting diff changes:", error)
  2207. }
  2208. }
  2209. // Subtasks
  2210. // Spawn / Wait / Complete
  2211. public async startSubtask(message: string, initialTodos: TodoItem[], mode: string) {
  2212. const provider = this.providerRef.deref()
  2213. if (!provider) {
  2214. throw new Error("Provider not available")
  2215. }
  2216. const child = await (provider as any).delegateParentAndOpenChild({
  2217. parentTaskId: this.taskId,
  2218. message,
  2219. initialTodos,
  2220. mode,
  2221. })
  2222. return child
  2223. }
  2224. /**
  2225. * Resume parent task after delegation completion without showing resume ask.
  2226. * Used in metadata-driven subtask flow.
  2227. *
  2228. * This method:
  2229. * - Clears any pending ask states
  2230. * - Resets abort and streaming flags
  2231. * - Ensures next API call includes full context
  2232. * - Immediately continues task loop without user interaction
  2233. */
  2234. public async resumeAfterDelegation(): Promise<void> {
  2235. // Clear any ask states that might have been set during history load
  2236. this.idleAsk = undefined
  2237. this.resumableAsk = undefined
  2238. this.interactiveAsk = undefined
  2239. // Reset abort and streaming state to ensure clean continuation
  2240. this.abort = false
  2241. this.abandoned = false
  2242. this.abortReason = undefined
  2243. this.didFinishAbortingStream = false
  2244. this.isStreaming = false
  2245. this.isWaitingForFirstChunk = false
  2246. // Ensure next API call includes full context after delegation
  2247. this.skipPrevResponseIdOnce = true
  2248. // Mark as initialized and active
  2249. this.isInitialized = true
  2250. this.emit(RooCodeEventName.TaskActive, this.taskId)
  2251. // Load conversation history if not already loaded
  2252. if (this.apiConversationHistory.length === 0) {
  2253. this.apiConversationHistory = await this.getSavedApiConversationHistory()
  2254. }
  2255. // Add environment details to the existing last user message (which contains the tool_result)
  2256. // This avoids creating a new user message which would cause consecutive user messages
  2257. const environmentDetails = await getEnvironmentDetails(this, true)
  2258. let lastUserMsgIndex = -1
  2259. for (let i = this.apiConversationHistory.length - 1; i >= 0; i--) {
  2260. if (this.apiConversationHistory[i].role === "user") {
  2261. lastUserMsgIndex = i
  2262. break
  2263. }
  2264. }
  2265. if (lastUserMsgIndex >= 0) {
  2266. const lastUserMsg = this.apiConversationHistory[lastUserMsgIndex]
  2267. if (Array.isArray(lastUserMsg.content)) {
  2268. // Remove any existing environment_details blocks before adding fresh ones
  2269. const contentWithoutEnvDetails = lastUserMsg.content.filter(
  2270. (block: Anthropic.Messages.ContentBlockParam) => {
  2271. if (block.type === "text" && typeof block.text === "string") {
  2272. const isEnvironmentDetailsBlock =
  2273. block.text.trim().startsWith("<environment_details>") &&
  2274. block.text.trim().endsWith("</environment_details>")
  2275. return !isEnvironmentDetailsBlock
  2276. }
  2277. return true
  2278. },
  2279. )
  2280. // Add fresh environment details
  2281. lastUserMsg.content = [...contentWithoutEnvDetails, { type: "text" as const, text: environmentDetails }]
  2282. }
  2283. }
  2284. // Save the updated history
  2285. await this.saveApiConversationHistory()
  2286. // Continue task loop - pass empty array to signal no new user content needed
  2287. // The initiateTaskLoop will handle this by skipping user message addition
  2288. await this.initiateTaskLoop([])
  2289. }
  2290. // Task Loop
  2291. private async initiateTaskLoop(userContent: Anthropic.Messages.ContentBlockParam[]): Promise<void> {
  2292. // Kicks off the checkpoints initialization process in the background.
  2293. getCheckpointService(this)
  2294. let nextUserContent = userContent
  2295. let includeFileDetails = true
  2296. this.emit(RooCodeEventName.TaskStarted)
  2297. while (!this.abort) {
  2298. const didEndLoop = await this.recursivelyMakeClineRequests(nextUserContent, includeFileDetails)
  2299. includeFileDetails = false // We only need file details the first time.
  2300. // The way this agentic loop works is that cline will be given a
  2301. // task that he then calls tools to complete. Unless there's an
  2302. // attempt_completion call, we keep responding back to him with his
  2303. // tool's responses until he either attempt_completion or does not
  2304. // use anymore tools. If he does not use anymore tools, we ask him
  2305. // to consider if he's completed the task and then call
  2306. // attempt_completion, otherwise proceed with completing the task.
  2307. // There is a MAX_REQUESTS_PER_TASK limit to prevent infinite
  2308. // requests, but Cline is prompted to finish the task as efficiently
  2309. // as he can.
  2310. if (didEndLoop) {
  2311. // For now a task never 'completes'. This will only happen if
  2312. // the user hits max requests and denies resetting the count.
  2313. break
  2314. } else {
  2315. nextUserContent = [{ type: "text", text: formatResponse.noToolsUsed() }]
  2316. }
  2317. }
  2318. }
  2319. public async recursivelyMakeClineRequests(
  2320. userContent: Anthropic.Messages.ContentBlockParam[],
  2321. includeFileDetails: boolean = false,
  2322. ): Promise<boolean> {
  2323. interface StackItem {
  2324. userContent: Anthropic.Messages.ContentBlockParam[]
  2325. includeFileDetails: boolean
  2326. retryAttempt?: number
  2327. userMessageWasRemoved?: boolean // Track if user message was removed due to empty response
  2328. }
  2329. const stack: StackItem[] = [{ userContent, includeFileDetails, retryAttempt: 0 }]
  2330. while (stack.length > 0) {
  2331. const currentItem = stack.pop()!
  2332. const currentUserContent = currentItem.userContent
  2333. const currentIncludeFileDetails = currentItem.includeFileDetails
  2334. if (this.abort) {
  2335. throw new Error(`[RooCode#recursivelyMakeRooRequests] task ${this.taskId}.${this.instanceId} aborted`)
  2336. }
  2337. if (this.consecutiveMistakeLimit > 0 && this.consecutiveMistakeCount >= this.consecutiveMistakeLimit) {
  2338. // Track consecutive mistake errors in telemetry via event and PostHog exception tracking.
  2339. // The reason is "no_tools_used" because this limit is reached via initiateTaskLoop
  2340. // which increments consecutiveMistakeCount when the model doesn't use any tools.
  2341. TelemetryService.instance.captureConsecutiveMistakeError(this.taskId)
  2342. TelemetryService.instance.captureException(
  2343. new ConsecutiveMistakeError(
  2344. `Task reached consecutive mistake limit (${this.consecutiveMistakeLimit})`,
  2345. this.taskId,
  2346. this.consecutiveMistakeCount,
  2347. this.consecutiveMistakeLimit,
  2348. "no_tools_used",
  2349. this.apiConfiguration.apiProvider,
  2350. getModelId(this.apiConfiguration),
  2351. ),
  2352. )
  2353. const { response, text, images } = await this.ask(
  2354. "mistake_limit_reached",
  2355. t("common:errors.mistake_limit_guidance"),
  2356. )
  2357. if (response === "messageResponse") {
  2358. currentUserContent.push(
  2359. ...[
  2360. { type: "text" as const, text: formatResponse.tooManyMistakes(text) },
  2361. ...formatResponse.imageBlocks(images),
  2362. ],
  2363. )
  2364. await this.say("user_feedback", text, images)
  2365. }
  2366. this.consecutiveMistakeCount = 0
  2367. }
  2368. // Getting verbose details is an expensive operation, it uses ripgrep to
  2369. // top-down build file structure of project which for large projects can
  2370. // take a few seconds. For the best UX we show a placeholder api_req_started
  2371. // message with a loading spinner as this happens.
  2372. // Determine API protocol based on provider and model
  2373. const modelId = getModelId(this.apiConfiguration)
  2374. const apiProtocol = getApiProtocol(this.apiConfiguration.apiProvider, modelId)
  2375. // Respect user-configured provider rate limiting BEFORE we emit api_req_started.
  2376. // This prevents the UI from showing an "API Request..." spinner while we are
  2377. // intentionally waiting due to the rate limit slider.
  2378. //
  2379. // NOTE: We also set Task.lastGlobalApiRequestTime here to reserve this slot
  2380. // before we build environment details (which can take time).
  2381. // This ensures subsequent requests (including subtasks) still honour the
  2382. // provider rate-limit window.
  2383. await this.maybeWaitForProviderRateLimit(currentItem.retryAttempt ?? 0)
  2384. Task.lastGlobalApiRequestTime = performance.now()
  2385. await this.say(
  2386. "api_req_started",
  2387. JSON.stringify({
  2388. apiProtocol,
  2389. }),
  2390. )
  2391. const {
  2392. showRooIgnoredFiles = false,
  2393. includeDiagnosticMessages = true,
  2394. maxDiagnosticMessages = 50,
  2395. } = (await this.providerRef.deref()?.getState()) ?? {}
  2396. const { content: parsedUserContent, mode: slashCommandMode } = await processUserContentMentions({
  2397. userContent: currentUserContent,
  2398. cwd: this.cwd,
  2399. urlContentFetcher: this.urlContentFetcher,
  2400. fileContextTracker: this.fileContextTracker,
  2401. rooIgnoreController: this.rooIgnoreController,
  2402. showRooIgnoredFiles,
  2403. includeDiagnosticMessages,
  2404. maxDiagnosticMessages,
  2405. })
  2406. // Switch mode if specified in a slash command's frontmatter
  2407. if (slashCommandMode) {
  2408. const provider = this.providerRef.deref()
  2409. if (provider) {
  2410. const state = await provider.getState()
  2411. const targetMode = getModeBySlug(slashCommandMode, state?.customModes)
  2412. if (targetMode) {
  2413. await provider.handleModeSwitch(slashCommandMode)
  2414. }
  2415. }
  2416. }
  2417. const environmentDetails = await getEnvironmentDetails(this, currentIncludeFileDetails)
  2418. // Remove any existing environment_details blocks before adding fresh ones.
  2419. // This prevents duplicate environment details when resuming tasks,
  2420. // where the old user message content may already contain environment details from the previous session.
  2421. // We check for both opening and closing tags to ensure we're matching complete environment detail blocks,
  2422. // not just mentions of the tag in regular content.
  2423. const contentWithoutEnvDetails = parsedUserContent.filter((block) => {
  2424. if (block.type === "text" && typeof block.text === "string") {
  2425. // Check if this text block is a complete environment_details block
  2426. // by verifying it starts with the opening tag and ends with the closing tag
  2427. const isEnvironmentDetailsBlock =
  2428. block.text.trim().startsWith("<environment_details>") &&
  2429. block.text.trim().endsWith("</environment_details>")
  2430. return !isEnvironmentDetailsBlock
  2431. }
  2432. return true
  2433. })
  2434. // Add environment details as its own text block, separate from tool
  2435. // results.
  2436. let finalUserContent = [...contentWithoutEnvDetails, { type: "text" as const, text: environmentDetails }]
  2437. // Only add user message to conversation history if:
  2438. // 1. This is the first attempt (retryAttempt === 0), AND
  2439. // 2. The original userContent was not empty (empty signals delegation resume where
  2440. // the user message with tool_result and env details is already in history), OR
  2441. // 3. The message was removed in a previous iteration (userMessageWasRemoved === true)
  2442. // This prevents consecutive user messages while allowing re-add when needed
  2443. const isEmptyUserContent = currentUserContent.length === 0
  2444. const shouldAddUserMessage =
  2445. ((currentItem.retryAttempt ?? 0) === 0 && !isEmptyUserContent) || currentItem.userMessageWasRemoved
  2446. if (shouldAddUserMessage) {
  2447. await this.addToApiConversationHistory({ role: "user", content: finalUserContent })
  2448. TelemetryService.instance.captureConversationMessage(this.taskId, "user")
  2449. }
  2450. // Since we sent off a placeholder api_req_started message to update the
  2451. // webview while waiting to actually start the API request (to load
  2452. // potential details for example), we need to update the text of that
  2453. // message.
  2454. const lastApiReqIndex = findLastIndex(this.clineMessages, (m) => m.say === "api_req_started")
  2455. this.clineMessages[lastApiReqIndex].text = JSON.stringify({
  2456. apiProtocol,
  2457. } satisfies ClineApiReqInfo)
  2458. await this.saveClineMessages()
  2459. await this.providerRef.deref()?.postStateToWebviewWithoutTaskHistory()
  2460. try {
  2461. let cacheWriteTokens = 0
  2462. let cacheReadTokens = 0
  2463. let inputTokens = 0
  2464. let outputTokens = 0
  2465. let totalCost: number | undefined
  2466. // We can't use `api_req_finished` anymore since it's a unique case
  2467. // where it could come after a streaming message (i.e. in the middle
  2468. // of being updated or executed).
  2469. // Fortunately `api_req_finished` was always parsed out for the GUI
  2470. // anyways, so it remains solely for legacy purposes to keep track
  2471. // of prices in tasks from history (it's worth removing a few months
  2472. // from now).
  2473. const updateApiReqMsg = (cancelReason?: ClineApiReqCancelReason, streamingFailedMessage?: string) => {
  2474. if (lastApiReqIndex < 0 || !this.clineMessages[lastApiReqIndex]) {
  2475. return
  2476. }
  2477. const existingData = JSON.parse(this.clineMessages[lastApiReqIndex].text || "{}")
  2478. // Calculate total tokens and cost using provider-aware function
  2479. const modelId = getModelId(this.apiConfiguration)
  2480. const apiProtocol = getApiProtocol(this.apiConfiguration.apiProvider, modelId)
  2481. const costResult =
  2482. apiProtocol === "anthropic"
  2483. ? calculateApiCostAnthropic(
  2484. streamModelInfo,
  2485. inputTokens,
  2486. outputTokens,
  2487. cacheWriteTokens,
  2488. cacheReadTokens,
  2489. )
  2490. : calculateApiCostOpenAI(
  2491. streamModelInfo,
  2492. inputTokens,
  2493. outputTokens,
  2494. cacheWriteTokens,
  2495. cacheReadTokens,
  2496. )
  2497. this.clineMessages[lastApiReqIndex].text = JSON.stringify({
  2498. ...existingData,
  2499. tokensIn: costResult.totalInputTokens,
  2500. tokensOut: costResult.totalOutputTokens,
  2501. cacheWrites: cacheWriteTokens,
  2502. cacheReads: cacheReadTokens,
  2503. cost: totalCost ?? costResult.totalCost,
  2504. cancelReason,
  2505. streamingFailedMessage,
  2506. } satisfies ClineApiReqInfo)
  2507. }
  2508. const abortStream = async (cancelReason: ClineApiReqCancelReason, streamingFailedMessage?: string) => {
  2509. if (this.diffViewProvider.isEditing) {
  2510. await this.diffViewProvider.revertChanges() // closes diff view
  2511. }
  2512. // if last message is a partial we need to update and save it
  2513. const lastMessage = this.clineMessages.at(-1)
  2514. if (lastMessage && lastMessage.partial) {
  2515. // lastMessage.ts = Date.now() DO NOT update ts since it is used as a key for virtuoso list
  2516. lastMessage.partial = false
  2517. // instead of streaming partialMessage events, we do a save and post like normal to persist to disk
  2518. }
  2519. // Update `api_req_started` to have cancelled and cost, so that
  2520. // we can display the cost of the partial stream and the cancellation reason
  2521. updateApiReqMsg(cancelReason, streamingFailedMessage)
  2522. await this.saveClineMessages()
  2523. // Signals to provider that it can retrieve the saved messages
  2524. // from disk, as abortTask can not be awaited on in nature.
  2525. this.didFinishAbortingStream = true
  2526. }
  2527. // Reset streaming state for each new API request
  2528. this.currentStreamingContentIndex = 0
  2529. this.currentStreamingDidCheckpoint = false
  2530. this.assistantMessageContent = []
  2531. this.didCompleteReadingStream = false
  2532. this.userMessageContent = []
  2533. this.userMessageContentReady = false
  2534. this.didRejectTool = false
  2535. this.didAlreadyUseTool = false
  2536. this.assistantMessageSavedToHistory = false
  2537. // Reset tool failure flag for each new assistant turn - this ensures that tool failures
  2538. // only prevent attempt_completion within the same assistant message, not across turns
  2539. // (e.g., if a tool fails, then user sends a message saying "just complete anyway")
  2540. this.didToolFailInCurrentTurn = false
  2541. this.presentAssistantMessageLocked = false
  2542. this.presentAssistantMessageHasPendingUpdates = false
  2543. // No legacy text-stream tool parser.
  2544. this.streamingToolCallIndices.clear()
  2545. // Clear any leftover streaming tool call state from previous interrupted streams
  2546. NativeToolCallParser.clearAllStreamingToolCalls()
  2547. NativeToolCallParser.clearRawChunkState()
  2548. await this.diffViewProvider.reset()
  2549. // Cache model info once per API request to avoid repeated calls during streaming
  2550. // This is especially important for tools and background usage collection
  2551. this.cachedStreamingModel = this.api.getModel()
  2552. const streamModelInfo = this.cachedStreamingModel.info
  2553. const cachedModelId = this.cachedStreamingModel.id
  2554. // Yields only if the first chunk is successful, otherwise will
  2555. // allow the user to retry the request (most likely due to rate
  2556. // limit error, which gets thrown on the first chunk).
  2557. const stream = this.attemptApiRequest(currentItem.retryAttempt ?? 0, { skipProviderRateLimit: true })
  2558. let assistantMessage = ""
  2559. let reasoningMessage = ""
  2560. let pendingGroundingSources: GroundingSource[] = []
  2561. this.isStreaming = true
  2562. try {
  2563. const iterator = stream[Symbol.asyncIterator]()
  2564. // Helper to race iterator.next() with abort signal
  2565. const nextChunkWithAbort = async () => {
  2566. const nextPromise = iterator.next()
  2567. // If we have an abort controller, race it with the next chunk
  2568. if (this.currentRequestAbortController) {
  2569. const abortPromise = new Promise<never>((_, reject) => {
  2570. const signal = this.currentRequestAbortController!.signal
  2571. if (signal.aborted) {
  2572. reject(new Error("Request cancelled by user"))
  2573. } else {
  2574. signal.addEventListener("abort", () => {
  2575. reject(new Error("Request cancelled by user"))
  2576. })
  2577. }
  2578. })
  2579. return await Promise.race([nextPromise, abortPromise])
  2580. }
  2581. // No abort controller, just return the next chunk normally
  2582. return await nextPromise
  2583. }
  2584. let item = await nextChunkWithAbort()
  2585. while (!item.done) {
  2586. const chunk = item.value
  2587. item = await nextChunkWithAbort()
  2588. if (!chunk) {
  2589. // Sometimes chunk is undefined, no idea that can cause
  2590. // it, but this workaround seems to fix it.
  2591. continue
  2592. }
  2593. switch (chunk.type) {
  2594. case "reasoning": {
  2595. reasoningMessage += chunk.text
  2596. // Only apply formatting if the message contains sentence-ending punctuation followed by **
  2597. let formattedReasoning = reasoningMessage
  2598. if (reasoningMessage.includes("**")) {
  2599. // Add line breaks before **Title** patterns that appear after sentence endings
  2600. // This targets section headers like "...end of sentence.**Title Here**"
  2601. // Handles periods, exclamation marks, and question marks
  2602. formattedReasoning = reasoningMessage.replace(
  2603. /([.!?])\*\*([^*\n]+)\*\*/g,
  2604. "$1\n\n**$2**",
  2605. )
  2606. }
  2607. await this.say("reasoning", formattedReasoning, undefined, true)
  2608. break
  2609. }
  2610. case "usage":
  2611. inputTokens += chunk.inputTokens
  2612. outputTokens += chunk.outputTokens
  2613. cacheWriteTokens += chunk.cacheWriteTokens ?? 0
  2614. cacheReadTokens += chunk.cacheReadTokens ?? 0
  2615. totalCost = chunk.totalCost
  2616. break
  2617. case "grounding":
  2618. // Handle grounding sources separately from regular content
  2619. // to prevent state persistence issues - store them separately
  2620. if (chunk.sources && chunk.sources.length > 0) {
  2621. pendingGroundingSources.push(...chunk.sources)
  2622. }
  2623. break
  2624. case "tool_call_partial": {
  2625. // Process raw tool call chunk through NativeToolCallParser
  2626. // which handles tracking, buffering, and emits events
  2627. const events = NativeToolCallParser.processRawChunk({
  2628. index: chunk.index,
  2629. id: chunk.id,
  2630. name: chunk.name,
  2631. arguments: chunk.arguments,
  2632. })
  2633. for (const event of events) {
  2634. this.handleToolCallEvent(event)
  2635. }
  2636. break
  2637. }
  2638. // Direct handlers for AI SDK tool streaming events (DeepSeek, Moonshot, etc.)
  2639. // These providers emit tool_call_start/delta/end directly instead of tool_call_partial
  2640. case "tool_call_start":
  2641. case "tool_call_delta":
  2642. case "tool_call_end":
  2643. this.handleToolCallEvent(chunk)
  2644. break
  2645. case "tool_call": {
  2646. // Legacy: Handle complete tool calls (for backward compatibility)
  2647. // Convert native tool call to ToolUse format
  2648. const toolUse = NativeToolCallParser.parseToolCall({
  2649. id: chunk.id,
  2650. name: chunk.name as ToolName,
  2651. arguments: chunk.arguments,
  2652. })
  2653. if (!toolUse) {
  2654. console.error(`Failed to parse tool call for task ${this.taskId}:`, chunk)
  2655. break
  2656. }
  2657. // Store the tool call ID on the ToolUse object for later reference
  2658. // This is needed to create tool_result blocks that reference the correct tool_use_id
  2659. toolUse.id = chunk.id
  2660. // Add the tool use to assistant message content
  2661. this.assistantMessageContent.push(toolUse)
  2662. // Mark that we have new content to process
  2663. this.userMessageContentReady = false
  2664. // Present the tool call to user - presentAssistantMessage will execute
  2665. // tools sequentially and accumulate all results in userMessageContent
  2666. presentAssistantMessage(this)
  2667. break
  2668. }
  2669. case "text": {
  2670. assistantMessage += chunk.text
  2671. // Native tool calling: text chunks are plain text.
  2672. // Create or update a text content block directly
  2673. const lastBlock = this.assistantMessageContent[this.assistantMessageContent.length - 1]
  2674. if (lastBlock?.type === "text" && lastBlock.partial) {
  2675. lastBlock.content = assistantMessage
  2676. } else {
  2677. this.assistantMessageContent.push({
  2678. type: "text",
  2679. content: assistantMessage,
  2680. partial: true,
  2681. })
  2682. this.userMessageContentReady = false
  2683. }
  2684. presentAssistantMessage(this)
  2685. break
  2686. }
  2687. }
  2688. if (this.abort) {
  2689. console.log(`aborting stream, this.abandoned = ${this.abandoned}`)
  2690. if (!this.abandoned) {
  2691. // Only need to gracefully abort if this instance
  2692. // isn't abandoned (sometimes OpenRouter stream
  2693. // hangs, in which case this would affect future
  2694. // instances of Cline).
  2695. await abortStream("user_cancelled")
  2696. }
  2697. break // Aborts the stream.
  2698. }
  2699. if (this.didRejectTool) {
  2700. // `userContent` has a tool rejection, so interrupt the
  2701. // assistant's response to present the user's feedback.
  2702. assistantMessage += "\n\n[Response interrupted by user feedback]"
  2703. // Instead of setting this preemptively, we allow the
  2704. // present iterator to finish and set
  2705. // userMessageContentReady when its ready.
  2706. // this.userMessageContentReady = true
  2707. break
  2708. }
  2709. if (this.didAlreadyUseTool) {
  2710. assistantMessage +=
  2711. "\n\n[Response interrupted by a tool use result. Only one tool may be used at a time and should be placed at the end of the message.]"
  2712. break
  2713. }
  2714. }
  2715. // Create a copy of current token values to avoid race conditions
  2716. const currentTokens = {
  2717. input: inputTokens,
  2718. output: outputTokens,
  2719. cacheWrite: cacheWriteTokens,
  2720. cacheRead: cacheReadTokens,
  2721. total: totalCost,
  2722. }
  2723. const drainStreamInBackgroundToFindAllUsage = async (apiReqIndex: number) => {
  2724. const timeoutMs = DEFAULT_USAGE_COLLECTION_TIMEOUT_MS
  2725. const startTime = performance.now()
  2726. const modelId = getModelId(this.apiConfiguration)
  2727. // Local variables to accumulate usage data without affecting the main flow
  2728. let bgInputTokens = currentTokens.input
  2729. let bgOutputTokens = currentTokens.output
  2730. let bgCacheWriteTokens = currentTokens.cacheWrite
  2731. let bgCacheReadTokens = currentTokens.cacheRead
  2732. let bgTotalCost = currentTokens.total
  2733. // Helper function to capture telemetry and update messages
  2734. const captureUsageData = async (
  2735. tokens: {
  2736. input: number
  2737. output: number
  2738. cacheWrite: number
  2739. cacheRead: number
  2740. total?: number
  2741. },
  2742. messageIndex: number = apiReqIndex,
  2743. ) => {
  2744. if (
  2745. tokens.input > 0 ||
  2746. tokens.output > 0 ||
  2747. tokens.cacheWrite > 0 ||
  2748. tokens.cacheRead > 0
  2749. ) {
  2750. // Update the shared variables atomically
  2751. inputTokens = tokens.input
  2752. outputTokens = tokens.output
  2753. cacheWriteTokens = tokens.cacheWrite
  2754. cacheReadTokens = tokens.cacheRead
  2755. totalCost = tokens.total
  2756. // Update the API request message with the latest usage data
  2757. updateApiReqMsg()
  2758. await this.saveClineMessages()
  2759. // Update the specific message in the webview
  2760. const apiReqMessage = this.clineMessages[messageIndex]
  2761. if (apiReqMessage) {
  2762. await this.updateClineMessage(apiReqMessage)
  2763. }
  2764. // Capture telemetry with provider-aware cost calculation
  2765. const modelId = getModelId(this.apiConfiguration)
  2766. const apiProtocol = getApiProtocol(this.apiConfiguration.apiProvider, modelId)
  2767. // Use the appropriate cost function based on the API protocol
  2768. const costResult =
  2769. apiProtocol === "anthropic"
  2770. ? calculateApiCostAnthropic(
  2771. streamModelInfo,
  2772. tokens.input,
  2773. tokens.output,
  2774. tokens.cacheWrite,
  2775. tokens.cacheRead,
  2776. )
  2777. : calculateApiCostOpenAI(
  2778. streamModelInfo,
  2779. tokens.input,
  2780. tokens.output,
  2781. tokens.cacheWrite,
  2782. tokens.cacheRead,
  2783. )
  2784. TelemetryService.instance.captureLlmCompletion(this.taskId, {
  2785. inputTokens: costResult.totalInputTokens,
  2786. outputTokens: costResult.totalOutputTokens,
  2787. cacheWriteTokens: tokens.cacheWrite,
  2788. cacheReadTokens: tokens.cacheRead,
  2789. cost: tokens.total ?? costResult.totalCost,
  2790. })
  2791. }
  2792. }
  2793. try {
  2794. // Continue processing the original stream from where the main loop left off
  2795. let usageFound = false
  2796. let chunkCount = 0
  2797. // Use the same iterator that the main loop was using
  2798. while (!item.done) {
  2799. // Check for timeout
  2800. if (performance.now() - startTime > timeoutMs) {
  2801. console.warn(
  2802. `[Background Usage Collection] Timed out after ${timeoutMs}ms for model: ${modelId}, processed ${chunkCount} chunks`,
  2803. )
  2804. // Clean up the iterator before breaking
  2805. if (iterator.return) {
  2806. await iterator.return(undefined)
  2807. }
  2808. break
  2809. }
  2810. const chunk = item.value
  2811. item = await iterator.next()
  2812. chunkCount++
  2813. if (chunk && chunk.type === "usage") {
  2814. usageFound = true
  2815. bgInputTokens += chunk.inputTokens
  2816. bgOutputTokens += chunk.outputTokens
  2817. bgCacheWriteTokens += chunk.cacheWriteTokens ?? 0
  2818. bgCacheReadTokens += chunk.cacheReadTokens ?? 0
  2819. bgTotalCost = chunk.totalCost
  2820. }
  2821. }
  2822. if (
  2823. usageFound ||
  2824. bgInputTokens > 0 ||
  2825. bgOutputTokens > 0 ||
  2826. bgCacheWriteTokens > 0 ||
  2827. bgCacheReadTokens > 0
  2828. ) {
  2829. // We have usage data either from a usage chunk or accumulated tokens
  2830. await captureUsageData(
  2831. {
  2832. input: bgInputTokens,
  2833. output: bgOutputTokens,
  2834. cacheWrite: bgCacheWriteTokens,
  2835. cacheRead: bgCacheReadTokens,
  2836. total: bgTotalCost,
  2837. },
  2838. lastApiReqIndex,
  2839. )
  2840. } else {
  2841. console.warn(
  2842. `[Background Usage Collection] Suspicious: request ${apiReqIndex} is complete, but no usage info was found. Model: ${modelId}`,
  2843. )
  2844. }
  2845. } catch (error) {
  2846. console.error("Error draining stream for usage data:", error)
  2847. // Still try to capture whatever usage data we have collected so far
  2848. if (
  2849. bgInputTokens > 0 ||
  2850. bgOutputTokens > 0 ||
  2851. bgCacheWriteTokens > 0 ||
  2852. bgCacheReadTokens > 0
  2853. ) {
  2854. await captureUsageData(
  2855. {
  2856. input: bgInputTokens,
  2857. output: bgOutputTokens,
  2858. cacheWrite: bgCacheWriteTokens,
  2859. cacheRead: bgCacheReadTokens,
  2860. total: bgTotalCost,
  2861. },
  2862. lastApiReqIndex,
  2863. )
  2864. }
  2865. }
  2866. }
  2867. // Start the background task and handle any errors
  2868. drainStreamInBackgroundToFindAllUsage(lastApiReqIndex).catch((error) => {
  2869. console.error("Background usage collection failed:", error)
  2870. })
  2871. } catch (error) {
  2872. // Abandoned happens when extension is no longer waiting for the
  2873. // Cline instance to finish aborting (error is thrown here when
  2874. // any function in the for loop throws due to this.abort).
  2875. if (!this.abandoned) {
  2876. // Determine cancellation reason
  2877. const cancelReason: ClineApiReqCancelReason = this.abort ? "user_cancelled" : "streaming_failed"
  2878. const rawErrorMessage = error.message ?? JSON.stringify(serializeError(error), null, 2)
  2879. const streamingFailedMessage = this.abort
  2880. ? undefined
  2881. : `${t("common:interruption.streamTerminatedByProvider")}: ${rawErrorMessage}`
  2882. // Clean up partial state
  2883. await abortStream(cancelReason, streamingFailedMessage)
  2884. if (this.abort) {
  2885. // User cancelled - abort the entire task
  2886. this.abortReason = cancelReason
  2887. await this.abortTask()
  2888. } else {
  2889. // Stream failed - log the error and retry with the same content
  2890. // The existing rate limiting will prevent rapid retries
  2891. console.error(
  2892. `[Task#${this.taskId}.${this.instanceId}] Stream failed, will retry: ${streamingFailedMessage}`,
  2893. )
  2894. // Apply exponential backoff similar to first-chunk errors when auto-resubmit is enabled
  2895. const stateForBackoff = await this.providerRef.deref()?.getState()
  2896. if (stateForBackoff?.autoApprovalEnabled) {
  2897. await this.backoffAndAnnounce(currentItem.retryAttempt ?? 0, error)
  2898. // Check if task was aborted during the backoff
  2899. if (this.abort) {
  2900. console.log(
  2901. `[Task#${this.taskId}.${this.instanceId}] Task aborted during mid-stream retry backoff`,
  2902. )
  2903. // Abort the entire task
  2904. this.abortReason = "user_cancelled"
  2905. await this.abortTask()
  2906. break
  2907. }
  2908. }
  2909. // Push the same content back onto the stack to retry, incrementing the retry attempt counter
  2910. stack.push({
  2911. userContent: currentUserContent,
  2912. includeFileDetails: false,
  2913. retryAttempt: (currentItem.retryAttempt ?? 0) + 1,
  2914. })
  2915. // Continue to retry the request
  2916. continue
  2917. }
  2918. }
  2919. } finally {
  2920. this.isStreaming = false
  2921. // Clean up the abort controller when streaming completes
  2922. this.currentRequestAbortController = undefined
  2923. }
  2924. // Need to call here in case the stream was aborted.
  2925. if (this.abort || this.abandoned) {
  2926. throw new Error(
  2927. `[RooCode#recursivelyMakeRooRequests] task ${this.taskId}.${this.instanceId} aborted`,
  2928. )
  2929. }
  2930. this.didCompleteReadingStream = true
  2931. // Set any blocks to be complete to allow `presentAssistantMessage`
  2932. // to finish and set `userMessageContentReady` to true.
  2933. // (Could be a text block that had no subsequent tool uses, or a
  2934. // text block at the very end, or an invalid tool use, etc. Whatever
  2935. // the case, `presentAssistantMessage` relies on these blocks either
  2936. // to be completed or the user to reject a block in order to proceed
  2937. // and eventually set userMessageContentReady to true.)
  2938. // Finalize any remaining streaming tool calls that weren't explicitly ended
  2939. // This is critical for MCP tools which need tool_call_end events to be properly
  2940. // converted from ToolUse to McpToolUse via finalizeStreamingToolCall()
  2941. const finalizeEvents = NativeToolCallParser.finalizeRawChunks()
  2942. for (const event of finalizeEvents) {
  2943. if (event.type === "tool_call_end") {
  2944. // Finalize the streaming tool call
  2945. const finalToolUse = NativeToolCallParser.finalizeStreamingToolCall(event.id)
  2946. // Get the index for this tool call
  2947. const toolUseIndex = this.streamingToolCallIndices.get(event.id)
  2948. if (finalToolUse) {
  2949. // Store the tool call ID
  2950. ;(finalToolUse as any).id = event.id
  2951. // Get the index and replace partial with final
  2952. if (toolUseIndex !== undefined) {
  2953. this.assistantMessageContent[toolUseIndex] = finalToolUse
  2954. }
  2955. // Clean up tracking
  2956. this.streamingToolCallIndices.delete(event.id)
  2957. // Mark that we have new content to process
  2958. this.userMessageContentReady = false
  2959. // Present the finalized tool call
  2960. presentAssistantMessage(this)
  2961. } else if (toolUseIndex !== undefined) {
  2962. // finalizeStreamingToolCall returned null (malformed JSON or missing args)
  2963. // We still need to mark the tool as non-partial so it gets executed
  2964. // The tool's validation will catch any missing required parameters
  2965. const existingToolUse = this.assistantMessageContent[toolUseIndex]
  2966. if (existingToolUse && existingToolUse.type === "tool_use") {
  2967. existingToolUse.partial = false
  2968. // Ensure it has the ID for native protocol
  2969. ;(existingToolUse as any).id = event.id
  2970. }
  2971. // Clean up tracking
  2972. this.streamingToolCallIndices.delete(event.id)
  2973. // Mark that we have new content to process
  2974. this.userMessageContentReady = false
  2975. // Present the tool call - validation will handle missing params
  2976. presentAssistantMessage(this)
  2977. }
  2978. }
  2979. }
  2980. // IMPORTANT: Capture partialBlocks AFTER finalizeRawChunks() to avoid double-presentation.
  2981. // Tools finalized above are already presented, so we only want blocks still partial after finalization.
  2982. const partialBlocks = this.assistantMessageContent.filter((block) => block.partial)
  2983. partialBlocks.forEach((block) => (block.partial = false))
  2984. // Can't just do this b/c a tool could be in the middle of executing.
  2985. // this.assistantMessageContent.forEach((e) => (e.partial = false))
  2986. // No legacy streaming parser to finalize.
  2987. // Note: updateApiReqMsg() is now called from within drainStreamInBackgroundToFindAllUsage
  2988. // to ensure usage data is captured even when the stream is interrupted. The background task
  2989. // uses local variables to accumulate usage data before atomically updating the shared state.
  2990. // Complete the reasoning message if it exists
  2991. // We can't use say() here because the reasoning message may not be the last message
  2992. // (other messages like text blocks or tool uses may have been added after it during streaming)
  2993. if (reasoningMessage) {
  2994. const lastReasoningIndex = findLastIndex(
  2995. this.clineMessages,
  2996. (m) => m.type === "say" && m.say === "reasoning",
  2997. )
  2998. if (lastReasoningIndex !== -1 && this.clineMessages[lastReasoningIndex].partial) {
  2999. this.clineMessages[lastReasoningIndex].partial = false
  3000. await this.updateClineMessage(this.clineMessages[lastReasoningIndex])
  3001. }
  3002. }
  3003. await this.saveClineMessages()
  3004. await this.providerRef.deref()?.postStateToWebviewWithoutTaskHistory()
  3005. // No legacy text-stream tool parser state to reset.
  3006. // CRITICAL: Save assistant message to API history BEFORE executing tools.
  3007. // This ensures that when new_task triggers delegation and calls flushPendingToolResultsToHistory(),
  3008. // the assistant message is already in history. Otherwise, tool_result blocks would appear
  3009. // BEFORE their corresponding tool_use blocks, causing API errors.
  3010. // Check if we have any content to process (text or tool uses)
  3011. const hasTextContent = assistantMessage.length > 0
  3012. const hasToolUses = this.assistantMessageContent.some(
  3013. (block) => block.type === "tool_use" || block.type === "mcp_tool_use",
  3014. )
  3015. if (hasTextContent || hasToolUses) {
  3016. // Reset counter when we get a successful response with content
  3017. this.consecutiveNoAssistantMessagesCount = 0
  3018. // Display grounding sources to the user if they exist
  3019. if (pendingGroundingSources.length > 0) {
  3020. const citationLinks = pendingGroundingSources.map((source, i) => `[${i + 1}](${source.url})`)
  3021. const sourcesText = `${t("common:gemini.sources")} ${citationLinks.join(", ")}`
  3022. await this.say("text", sourcesText, undefined, false, undefined, undefined, {
  3023. isNonInteractive: true,
  3024. })
  3025. }
  3026. // Build the assistant message content array
  3027. const assistantContent: Array<Anthropic.TextBlockParam | Anthropic.ToolUseBlockParam> = []
  3028. // Add text content if present
  3029. if (assistantMessage) {
  3030. assistantContent.push({
  3031. type: "text" as const,
  3032. text: assistantMessage,
  3033. })
  3034. }
  3035. // Add tool_use blocks with their IDs for native protocol
  3036. // This handles both regular ToolUse and McpToolUse types
  3037. // IMPORTANT: Track seen IDs to prevent duplicates in the API request.
  3038. // Duplicate tool_use IDs cause Anthropic API 400 errors:
  3039. // "tool_use ids must be unique"
  3040. const seenToolUseIds = new Set<string>()
  3041. const toolUseBlocks = this.assistantMessageContent.filter(
  3042. (block) => block.type === "tool_use" || block.type === "mcp_tool_use",
  3043. )
  3044. for (const block of toolUseBlocks) {
  3045. if (block.type === "mcp_tool_use") {
  3046. // McpToolUse already has the original tool name (e.g., "mcp_serverName_toolName")
  3047. // The arguments are the raw tool arguments (matching the simplified schema)
  3048. const mcpBlock = block as import("../../shared/tools").McpToolUse
  3049. if (mcpBlock.id) {
  3050. const sanitizedId = sanitizeToolUseId(mcpBlock.id)
  3051. // Pre-flight deduplication: Skip if we've already added this ID
  3052. if (seenToolUseIds.has(sanitizedId)) {
  3053. console.warn(
  3054. `[Task#${this.taskId}] Pre-flight deduplication: Skipping duplicate MCP tool_use ID: ${sanitizedId} (tool: ${mcpBlock.name})`,
  3055. )
  3056. continue
  3057. }
  3058. seenToolUseIds.add(sanitizedId)
  3059. assistantContent.push({
  3060. type: "tool_use" as const,
  3061. id: sanitizedId,
  3062. name: mcpBlock.name, // Original dynamic name
  3063. input: mcpBlock.arguments, // Direct tool arguments
  3064. })
  3065. }
  3066. } else {
  3067. // Regular ToolUse
  3068. const toolUse = block as import("../../shared/tools").ToolUse
  3069. const toolCallId = toolUse.id
  3070. if (toolCallId) {
  3071. const sanitizedId = sanitizeToolUseId(toolCallId)
  3072. // Pre-flight deduplication: Skip if we've already added this ID
  3073. if (seenToolUseIds.has(sanitizedId)) {
  3074. console.warn(
  3075. `[Task#${this.taskId}] Pre-flight deduplication: Skipping duplicate tool_use ID: ${sanitizedId} (tool: ${toolUse.name})`,
  3076. )
  3077. continue
  3078. }
  3079. seenToolUseIds.add(sanitizedId)
  3080. // nativeArgs is already in the correct API format for all tools
  3081. const input = toolUse.nativeArgs || toolUse.params
  3082. // Use originalName (alias) if present for API history consistency.
  3083. // When tool aliases are used (e.g., "edit_file" -> "search_and_replace"),
  3084. // we want the alias name in the conversation history to match what the model
  3085. // was told the tool was named, preventing confusion in multi-turn conversations.
  3086. const toolNameForHistory = toolUse.originalName ?? toolUse.name
  3087. assistantContent.push({
  3088. type: "tool_use" as const,
  3089. id: sanitizedId,
  3090. name: toolNameForHistory,
  3091. input,
  3092. })
  3093. }
  3094. }
  3095. }
  3096. // Enforce new_task isolation: if new_task is called alongside other tools,
  3097. // truncate any tools that come after it and inject error tool_results.
  3098. // This prevents orphaned tools when delegation disposes the parent task.
  3099. const newTaskIndex = assistantContent.findIndex(
  3100. (block) => block.type === "tool_use" && block.name === "new_task",
  3101. )
  3102. if (newTaskIndex !== -1 && newTaskIndex < assistantContent.length - 1) {
  3103. // new_task found but not last - truncate subsequent tools
  3104. const truncatedTools = assistantContent.slice(newTaskIndex + 1)
  3105. assistantContent.length = newTaskIndex + 1 // Truncate API history array
  3106. // ALSO truncate the execution array (assistantMessageContent) to prevent
  3107. // tools after new_task from being executed by presentAssistantMessage().
  3108. // Find new_task index in assistantMessageContent (may differ from assistantContent
  3109. // due to text blocks being structured differently).
  3110. const executionNewTaskIndex = this.assistantMessageContent.findIndex(
  3111. (block) => block.type === "tool_use" && block.name === "new_task",
  3112. )
  3113. if (executionNewTaskIndex !== -1) {
  3114. this.assistantMessageContent.length = executionNewTaskIndex + 1
  3115. }
  3116. // Pre-inject error tool_results for truncated tools
  3117. for (const tool of truncatedTools) {
  3118. if (tool.type === "tool_use" && (tool as Anthropic.ToolUseBlockParam).id) {
  3119. this.pushToolResultToUserContent({
  3120. type: "tool_result",
  3121. tool_use_id: (tool as Anthropic.ToolUseBlockParam).id,
  3122. content:
  3123. "This tool was not executed because new_task was called in the same message turn. The new_task tool must be the last tool in a message.",
  3124. is_error: true,
  3125. })
  3126. }
  3127. }
  3128. }
  3129. // Save assistant message BEFORE executing tools
  3130. // This is critical for new_task: when it triggers delegation, flushPendingToolResultsToHistory()
  3131. // will save the user message with tool_results. The assistant message must already be in history
  3132. // so that tool_result blocks appear AFTER their corresponding tool_use blocks.
  3133. await this.addToApiConversationHistory(
  3134. { role: "assistant", content: assistantContent },
  3135. reasoningMessage || undefined,
  3136. )
  3137. this.assistantMessageSavedToHistory = true
  3138. TelemetryService.instance.captureConversationMessage(this.taskId, "assistant")
  3139. }
  3140. // Present any partial blocks that were just completed.
  3141. // Tool calls are typically presented during streaming via tool_call_partial events,
  3142. // but we still present here if any partial blocks remain (e.g., malformed streams).
  3143. // NOTE: This MUST happen AFTER saving the assistant message to API history.
  3144. // When new_task is in the batch, it triggers delegation which calls flushPendingToolResultsToHistory().
  3145. // If the assistant message isn't saved yet, tool_results would appear before tool_use blocks.
  3146. if (partialBlocks.length > 0) {
  3147. // If there is content to update then it will complete and
  3148. // update `this.userMessageContentReady` to true, which we
  3149. // `pWaitFor` before making the next request.
  3150. presentAssistantMessage(this)
  3151. }
  3152. if (hasTextContent || hasToolUses) {
  3153. // NOTE: This comment is here for future reference - this was a
  3154. // workaround for `userMessageContent` not getting set to true.
  3155. // It was due to it not recursively calling for partial blocks
  3156. // when `didRejectTool`, so it would get stuck waiting for a
  3157. // partial block to complete before it could continue.
  3158. // In case the content blocks finished it may be the api stream
  3159. // finished after the last parsed content block was executed, so
  3160. // we are able to detect out of bounds and set
  3161. // `userMessageContentReady` to true (note you should not call
  3162. // `presentAssistantMessage` since if the last block i
  3163. // completed it will be presented again).
  3164. // const completeBlocks = this.assistantMessageContent.filter((block) => !block.partial) // If there are any partial blocks after the stream ended we can consider them invalid.
  3165. // if (this.currentStreamingContentIndex >= completeBlocks.length) {
  3166. // this.userMessageContentReady = true
  3167. // }
  3168. await pWaitFor(() => this.userMessageContentReady)
  3169. // If the model did not tool use, then we need to tell it to
  3170. // either use a tool or attempt_completion.
  3171. const didToolUse = this.assistantMessageContent.some(
  3172. (block) => block.type === "tool_use" || block.type === "mcp_tool_use",
  3173. )
  3174. if (!didToolUse) {
  3175. // Increment consecutive no-tool-use counter
  3176. this.consecutiveNoToolUseCount++
  3177. // Only show error and count toward mistake limit after 2 consecutive failures
  3178. if (this.consecutiveNoToolUseCount >= 2) {
  3179. await this.say("error", "MODEL_NO_TOOLS_USED")
  3180. // Only count toward mistake limit after second consecutive failure
  3181. this.consecutiveMistakeCount++
  3182. }
  3183. // Use the task's locked protocol for consistent behavior
  3184. this.userMessageContent.push({
  3185. type: "text",
  3186. text: formatResponse.noToolsUsed(),
  3187. })
  3188. } else {
  3189. // Reset counter when tools are used successfully
  3190. this.consecutiveNoToolUseCount = 0
  3191. }
  3192. // Push to stack if there's content OR if we're paused waiting for a subtask.
  3193. // When paused, we push an empty item so the loop continues to the pause check.
  3194. if (this.userMessageContent.length > 0 || this.isPaused) {
  3195. stack.push({
  3196. userContent: [...this.userMessageContent], // Create a copy to avoid mutation issues
  3197. includeFileDetails: false, // Subsequent iterations don't need file details
  3198. })
  3199. // Add periodic yielding to prevent blocking
  3200. await new Promise((resolve) => setImmediate(resolve))
  3201. }
  3202. continue
  3203. } else {
  3204. // If there's no assistant_responses, that means we got no text
  3205. // or tool_use content blocks from API which we should assume is
  3206. // an error.
  3207. // Increment consecutive no-assistant-messages counter
  3208. this.consecutiveNoAssistantMessagesCount++
  3209. // Only show error and count toward mistake limit after 2 consecutive failures
  3210. // This provides a "grace retry" - first failure retries silently
  3211. if (this.consecutiveNoAssistantMessagesCount >= 2) {
  3212. await this.say("error", "MODEL_NO_ASSISTANT_MESSAGES")
  3213. }
  3214. // IMPORTANT: We already added the user message to
  3215. // apiConversationHistory at line 1876. Since the assistant failed to respond,
  3216. // we need to remove that message before retrying to avoid having two consecutive
  3217. // user messages (which would cause tool_result validation errors).
  3218. let state = await this.providerRef.deref()?.getState()
  3219. if (this.apiConversationHistory.length > 0) {
  3220. const lastMessage = this.apiConversationHistory[this.apiConversationHistory.length - 1]
  3221. if (lastMessage.role === "user") {
  3222. // Remove the last user message that we added earlier
  3223. this.apiConversationHistory.pop()
  3224. }
  3225. }
  3226. // Check if we should auto-retry or prompt the user
  3227. // Reuse the state variable from above
  3228. if (state?.autoApprovalEnabled) {
  3229. // Auto-retry with backoff - don't persist failure message when retrying
  3230. await this.backoffAndAnnounce(
  3231. currentItem.retryAttempt ?? 0,
  3232. new Error(
  3233. "Unexpected API Response: The language model did not provide any assistant messages. This may indicate an issue with the API or the model's output.",
  3234. ),
  3235. )
  3236. // Check if task was aborted during the backoff
  3237. if (this.abort) {
  3238. console.log(
  3239. `[Task#${this.taskId}.${this.instanceId}] Task aborted during empty-assistant retry backoff`,
  3240. )
  3241. break
  3242. }
  3243. // Push the same content back onto the stack to retry, incrementing the retry attempt counter
  3244. // Mark that user message was removed so it gets re-added on retry
  3245. stack.push({
  3246. userContent: currentUserContent,
  3247. includeFileDetails: false,
  3248. retryAttempt: (currentItem.retryAttempt ?? 0) + 1,
  3249. userMessageWasRemoved: true,
  3250. })
  3251. // Continue to retry the request
  3252. continue
  3253. } else {
  3254. // Prompt the user for retry decision
  3255. const { response } = await this.ask(
  3256. "api_req_failed",
  3257. "The model returned no assistant messages. This may indicate an issue with the API or the model's output.",
  3258. )
  3259. if (response === "yesButtonClicked") {
  3260. await this.say("api_req_retried")
  3261. // Push the same content back to retry
  3262. stack.push({
  3263. userContent: currentUserContent,
  3264. includeFileDetails: false,
  3265. retryAttempt: (currentItem.retryAttempt ?? 0) + 1,
  3266. })
  3267. // Continue to retry the request
  3268. continue
  3269. } else {
  3270. // User declined to retry
  3271. // Re-add the user message we removed.
  3272. await this.addToApiConversationHistory({
  3273. role: "user",
  3274. content: currentUserContent,
  3275. })
  3276. await this.say(
  3277. "error",
  3278. "Unexpected API Response: The language model did not provide any assistant messages. This may indicate an issue with the API or the model's output.",
  3279. )
  3280. await this.addToApiConversationHistory({
  3281. role: "assistant",
  3282. content: [{ type: "text", text: "Failure: I did not provide a response." }],
  3283. })
  3284. }
  3285. }
  3286. }
  3287. // If we reach here without continuing, return false (will always be false for now)
  3288. return false
  3289. } catch (error) {
  3290. // This should never happen since the only thing that can throw an
  3291. // error is the attemptApiRequest, which is wrapped in a try catch
  3292. // that sends an ask where if noButtonClicked, will clear current
  3293. // task and destroy this instance. However to avoid unhandled
  3294. // promise rejection, we will end this loop which will end execution
  3295. // of this instance (see `startTask`).
  3296. return true // Needs to be true so parent loop knows to end task.
  3297. }
  3298. }
  3299. // If we exit the while loop normally (stack is empty), return false
  3300. return false
  3301. }
  3302. private async getSystemPrompt(): Promise<string> {
  3303. const { mcpEnabled } = (await this.providerRef.deref()?.getState()) ?? {}
  3304. let mcpHub: McpHub | undefined
  3305. if (mcpEnabled ?? true) {
  3306. const provider = this.providerRef.deref()
  3307. if (!provider) {
  3308. throw new Error("Provider reference lost during view transition")
  3309. }
  3310. // Wait for MCP hub initialization through McpServerManager
  3311. mcpHub = await McpServerManager.getInstance(provider.context, provider)
  3312. if (!mcpHub) {
  3313. throw new Error("Failed to get MCP hub from server manager")
  3314. }
  3315. // Wait for MCP servers to be connected before generating system prompt
  3316. await pWaitFor(() => !mcpHub!.isConnecting, { timeout: 10_000 }).catch(() => {
  3317. console.error("MCP servers failed to connect in time")
  3318. })
  3319. }
  3320. const rooIgnoreInstructions = this.rooIgnoreController?.getInstructions()
  3321. const state = await this.providerRef.deref()?.getState()
  3322. const {
  3323. browserViewportSize,
  3324. mode,
  3325. customModes,
  3326. customModePrompts,
  3327. customInstructions,
  3328. experiments,
  3329. browserToolEnabled,
  3330. language,
  3331. apiConfiguration,
  3332. enableSubfolderRules,
  3333. } = state ?? {}
  3334. return await (async () => {
  3335. const provider = this.providerRef.deref()
  3336. if (!provider) {
  3337. throw new Error("Provider not available")
  3338. }
  3339. // Align browser tool enablement with generateSystemPrompt: require model image support,
  3340. // mode to include the browser group, and the user setting to be enabled.
  3341. const modeConfig = getModeBySlug(mode ?? defaultModeSlug, customModes)
  3342. const modeSupportsBrowser = modeConfig?.groups.some((group) => getGroupName(group) === "browser") ?? false
  3343. // Check if model supports browser capability (images)
  3344. const modelInfo = this.api.getModel().info
  3345. const modelSupportsBrowser = (modelInfo as any)?.supportsImages === true
  3346. const canUseBrowserTool = modelSupportsBrowser && modeSupportsBrowser && (browserToolEnabled ?? true)
  3347. return SYSTEM_PROMPT(
  3348. provider.context,
  3349. this.cwd,
  3350. canUseBrowserTool,
  3351. mcpHub,
  3352. this.diffStrategy,
  3353. browserViewportSize ?? "900x600",
  3354. mode ?? defaultModeSlug,
  3355. customModePrompts,
  3356. customModes,
  3357. customInstructions,
  3358. experiments,
  3359. language,
  3360. rooIgnoreInstructions,
  3361. {
  3362. todoListEnabled: apiConfiguration?.todoListEnabled ?? true,
  3363. browserToolEnabled: browserToolEnabled ?? true,
  3364. useAgentRules:
  3365. vscode.workspace.getConfiguration(Package.name).get<boolean>("useAgentRules") ?? true,
  3366. enableSubfolderRules: enableSubfolderRules ?? false,
  3367. newTaskRequireTodos: vscode.workspace
  3368. .getConfiguration(Package.name)
  3369. .get<boolean>("newTaskRequireTodos", false),
  3370. isStealthModel: modelInfo?.isStealthModel,
  3371. },
  3372. undefined, // todoList
  3373. this.api.getModel().id,
  3374. provider.getSkillsManager(),
  3375. )
  3376. })()
  3377. }
  3378. private getCurrentProfileId(state: any): string {
  3379. return (
  3380. state?.listApiConfigMeta?.find((profile: any) => profile.name === state?.currentApiConfigName)?.id ??
  3381. "default"
  3382. )
  3383. }
  3384. private async handleContextWindowExceededError(): Promise<void> {
  3385. const state = await this.providerRef.deref()?.getState()
  3386. const { profileThresholds = {}, mode, apiConfiguration } = state ?? {}
  3387. const { contextTokens } = this.getTokenUsage()
  3388. const modelInfo = this.api.getModel().info
  3389. const maxTokens = getModelMaxOutputTokens({
  3390. modelId: this.api.getModel().id,
  3391. model: modelInfo,
  3392. settings: this.apiConfiguration,
  3393. })
  3394. const contextWindow = modelInfo.contextWindow
  3395. // Get the current profile ID using the helper method
  3396. const currentProfileId = this.getCurrentProfileId(state)
  3397. // Log the context window error for debugging
  3398. console.warn(
  3399. `[Task#${this.taskId}] Context window exceeded for model ${this.api.getModel().id}. ` +
  3400. `Current tokens: ${contextTokens}, Context window: ${contextWindow}. ` +
  3401. `Forcing truncation to ${FORCED_CONTEXT_REDUCTION_PERCENT}% of current context.`,
  3402. )
  3403. // Send condenseTaskContextStarted to show in-progress indicator
  3404. await this.providerRef.deref()?.postMessageToWebview({ type: "condenseTaskContextStarted", text: this.taskId })
  3405. // Build tools for condensing metadata (same tools used for normal API calls)
  3406. const provider = this.providerRef.deref()
  3407. let allTools: import("openai").default.Chat.ChatCompletionTool[] = []
  3408. if (provider) {
  3409. const toolsResult = await buildNativeToolsArrayWithRestrictions({
  3410. provider,
  3411. cwd: this.cwd,
  3412. mode,
  3413. customModes: state?.customModes,
  3414. experiments: state?.experiments,
  3415. apiConfiguration,
  3416. browserToolEnabled: state?.browserToolEnabled ?? true,
  3417. disabledTools: state?.disabledTools,
  3418. modelInfo,
  3419. includeAllToolsWithRestrictions: false,
  3420. })
  3421. allTools = toolsResult.tools
  3422. }
  3423. // Build metadata with tools and taskId for the condensing API call
  3424. const metadata: ApiHandlerCreateMessageMetadata = {
  3425. mode,
  3426. taskId: this.taskId,
  3427. ...(allTools.length > 0
  3428. ? {
  3429. tools: allTools,
  3430. tool_choice: "auto",
  3431. parallelToolCalls: true,
  3432. }
  3433. : {}),
  3434. }
  3435. try {
  3436. // Generate environment details to include in the condensed summary
  3437. const environmentDetails = await getEnvironmentDetails(this, true)
  3438. // Force aggressive truncation by keeping only 75% of the conversation history
  3439. const truncateResult = await manageContext({
  3440. messages: this.apiConversationHistory,
  3441. totalTokens: contextTokens || 0,
  3442. maxTokens,
  3443. contextWindow,
  3444. apiHandler: this.api,
  3445. autoCondenseContext: true,
  3446. autoCondenseContextPercent: FORCED_CONTEXT_REDUCTION_PERCENT,
  3447. systemPrompt: await this.getSystemPrompt(),
  3448. taskId: this.taskId,
  3449. profileThresholds,
  3450. currentProfileId,
  3451. metadata,
  3452. environmentDetails,
  3453. })
  3454. if (truncateResult.messages !== this.apiConversationHistory) {
  3455. await this.overwriteApiConversationHistory(truncateResult.messages)
  3456. }
  3457. if (truncateResult.summary) {
  3458. const { summary, cost, prevContextTokens, newContextTokens = 0 } = truncateResult
  3459. const contextCondense: ContextCondense = { summary, cost, newContextTokens, prevContextTokens }
  3460. await this.say(
  3461. "condense_context",
  3462. undefined /* text */,
  3463. undefined /* images */,
  3464. false /* partial */,
  3465. undefined /* checkpoint */,
  3466. undefined /* progressStatus */,
  3467. { isNonInteractive: true } /* options */,
  3468. contextCondense,
  3469. )
  3470. } else if (truncateResult.truncationId) {
  3471. // Sliding window truncation occurred (fallback when condensing fails or is disabled)
  3472. const contextTruncation: ContextTruncation = {
  3473. truncationId: truncateResult.truncationId,
  3474. messagesRemoved: truncateResult.messagesRemoved ?? 0,
  3475. prevContextTokens: truncateResult.prevContextTokens,
  3476. newContextTokens: truncateResult.newContextTokensAfterTruncation ?? 0,
  3477. }
  3478. await this.say(
  3479. "sliding_window_truncation",
  3480. undefined /* text */,
  3481. undefined /* images */,
  3482. false /* partial */,
  3483. undefined /* checkpoint */,
  3484. undefined /* progressStatus */,
  3485. { isNonInteractive: true } /* options */,
  3486. undefined /* contextCondense */,
  3487. contextTruncation,
  3488. )
  3489. }
  3490. } finally {
  3491. // Notify webview that context management is complete (removes in-progress spinner)
  3492. // IMPORTANT: Must always be sent to dismiss the spinner, even on error
  3493. await this.providerRef
  3494. .deref()
  3495. ?.postMessageToWebview({ type: "condenseTaskContextResponse", text: this.taskId })
  3496. }
  3497. }
  3498. /**
  3499. * Enforce the user-configured provider rate limit.
  3500. *
  3501. * NOTE: This is intentionally treated as expected behavior and is surfaced via
  3502. * the `api_req_rate_limit_wait` say type (not an error).
  3503. */
  3504. private async maybeWaitForProviderRateLimit(retryAttempt: number): Promise<void> {
  3505. const state = await this.providerRef.deref()?.getState()
  3506. const rateLimitSeconds =
  3507. state?.apiConfiguration?.rateLimitSeconds ?? this.apiConfiguration?.rateLimitSeconds ?? 0
  3508. if (rateLimitSeconds <= 0 || !Task.lastGlobalApiRequestTime) {
  3509. return
  3510. }
  3511. const now = performance.now()
  3512. const timeSinceLastRequest = now - Task.lastGlobalApiRequestTime
  3513. const rateLimitDelay = Math.ceil(
  3514. Math.min(rateLimitSeconds, Math.max(0, rateLimitSeconds * 1000 - timeSinceLastRequest) / 1000),
  3515. )
  3516. // Only show the countdown UX on the first attempt. Retry flows have their own delay messaging.
  3517. if (rateLimitDelay > 0 && retryAttempt === 0) {
  3518. for (let i = rateLimitDelay; i > 0; i--) {
  3519. // Send structured JSON data for i18n-safe transport
  3520. const delayMessage = JSON.stringify({ seconds: i })
  3521. await this.say("api_req_rate_limit_wait", delayMessage, undefined, true)
  3522. await delay(1000)
  3523. }
  3524. // Finalize the partial message so the UI doesn't keep rendering an in-progress spinner.
  3525. await this.say("api_req_rate_limit_wait", undefined, undefined, false)
  3526. }
  3527. }
  3528. public async *attemptApiRequest(
  3529. retryAttempt: number = 0,
  3530. options: { skipProviderRateLimit?: boolean } = {},
  3531. ): ApiStream {
  3532. const state = await this.providerRef.deref()?.getState()
  3533. const {
  3534. apiConfiguration,
  3535. autoApprovalEnabled,
  3536. requestDelaySeconds,
  3537. mode,
  3538. autoCondenseContext = true,
  3539. autoCondenseContextPercent = 100,
  3540. profileThresholds = {},
  3541. } = state ?? {}
  3542. // Get condensing configuration for automatic triggers.
  3543. const customCondensingPrompt = state?.customSupportPrompts?.CONDENSE
  3544. if (!options.skipProviderRateLimit) {
  3545. await this.maybeWaitForProviderRateLimit(retryAttempt)
  3546. }
  3547. // Update last request time right before making the request so that subsequent
  3548. // requests — even from new subtasks — will honour the provider's rate-limit.
  3549. //
  3550. // NOTE: When recursivelyMakeClineRequests handles rate limiting, it sets the
  3551. // timestamp earlier to include the environment details build. We still set it
  3552. // here for direct callers (tests) and for the case where we didn't rate-limit
  3553. // in the caller.
  3554. Task.lastGlobalApiRequestTime = performance.now()
  3555. const systemPrompt = await this.getSystemPrompt()
  3556. const { contextTokens } = this.getTokenUsage()
  3557. if (contextTokens) {
  3558. const modelInfo = this.api.getModel().info
  3559. const maxTokens = getModelMaxOutputTokens({
  3560. modelId: this.api.getModel().id,
  3561. model: modelInfo,
  3562. settings: this.apiConfiguration,
  3563. })
  3564. const contextWindow = modelInfo.contextWindow
  3565. // Get the current profile ID using the helper method
  3566. const currentProfileId = this.getCurrentProfileId(state)
  3567. // Check if context management will likely run (threshold check)
  3568. // This allows us to show an in-progress indicator to the user
  3569. // We use the centralized willManageContext helper to avoid duplicating threshold logic
  3570. const lastMessage = this.apiConversationHistory[this.apiConversationHistory.length - 1]
  3571. const lastMessageContent = lastMessage?.content
  3572. let lastMessageTokens = 0
  3573. if (lastMessageContent) {
  3574. lastMessageTokens = Array.isArray(lastMessageContent)
  3575. ? await this.api.countTokens(lastMessageContent)
  3576. : await this.api.countTokens([{ type: "text", text: lastMessageContent as string }])
  3577. }
  3578. const contextManagementWillRun = willManageContext({
  3579. totalTokens: contextTokens,
  3580. contextWindow,
  3581. maxTokens,
  3582. autoCondenseContext,
  3583. autoCondenseContextPercent,
  3584. profileThresholds,
  3585. currentProfileId,
  3586. lastMessageTokens,
  3587. })
  3588. // Send condenseTaskContextStarted BEFORE manageContext to show in-progress indicator
  3589. // This notification must be sent here (not earlier) because the early check uses stale token count
  3590. // (before user message is added to history), which could incorrectly skip showing the indicator
  3591. if (contextManagementWillRun && autoCondenseContext) {
  3592. await this.providerRef
  3593. .deref()
  3594. ?.postMessageToWebview({ type: "condenseTaskContextStarted", text: this.taskId })
  3595. }
  3596. // Build tools for condensing metadata (same tools used for normal API calls)
  3597. // This ensures the condensing API call includes tool definitions for providers that need them
  3598. let contextMgmtTools: import("openai").default.Chat.ChatCompletionTool[] = []
  3599. {
  3600. const provider = this.providerRef.deref()
  3601. if (provider) {
  3602. const toolsResult = await buildNativeToolsArrayWithRestrictions({
  3603. provider,
  3604. cwd: this.cwd,
  3605. mode,
  3606. customModes: state?.customModes,
  3607. experiments: state?.experiments,
  3608. apiConfiguration,
  3609. browserToolEnabled: state?.browserToolEnabled ?? true,
  3610. disabledTools: state?.disabledTools,
  3611. modelInfo,
  3612. includeAllToolsWithRestrictions: false,
  3613. })
  3614. contextMgmtTools = toolsResult.tools
  3615. }
  3616. }
  3617. // Build metadata with tools and taskId for the condensing API call
  3618. const contextMgmtMetadata: ApiHandlerCreateMessageMetadata = {
  3619. mode,
  3620. taskId: this.taskId,
  3621. ...(contextMgmtTools.length > 0
  3622. ? {
  3623. tools: contextMgmtTools,
  3624. tool_choice: "auto",
  3625. parallelToolCalls: true,
  3626. }
  3627. : {}),
  3628. }
  3629. // Only generate environment details when context management will actually run.
  3630. // getEnvironmentDetails(this, true) triggers a recursive workspace listing which
  3631. // adds overhead - avoid this for the common case where context is below threshold.
  3632. const contextMgmtEnvironmentDetails = contextManagementWillRun
  3633. ? await getEnvironmentDetails(this, true)
  3634. : undefined
  3635. // Get files read by Roo for code folding - only when context management will run
  3636. const contextMgmtFilesReadByRoo =
  3637. contextManagementWillRun && autoCondenseContext
  3638. ? await this.getFilesReadByRooSafely("attemptApiRequest")
  3639. : undefined
  3640. try {
  3641. const truncateResult = await manageContext({
  3642. messages: this.apiConversationHistory,
  3643. totalTokens: contextTokens,
  3644. maxTokens,
  3645. contextWindow,
  3646. apiHandler: this.api,
  3647. autoCondenseContext,
  3648. autoCondenseContextPercent,
  3649. systemPrompt,
  3650. taskId: this.taskId,
  3651. customCondensingPrompt,
  3652. profileThresholds,
  3653. currentProfileId,
  3654. metadata: contextMgmtMetadata,
  3655. environmentDetails: contextMgmtEnvironmentDetails,
  3656. filesReadByRoo: contextMgmtFilesReadByRoo,
  3657. cwd: this.cwd,
  3658. rooIgnoreController: this.rooIgnoreController,
  3659. })
  3660. if (truncateResult.messages !== this.apiConversationHistory) {
  3661. await this.overwriteApiConversationHistory(truncateResult.messages)
  3662. }
  3663. if (truncateResult.error) {
  3664. await this.say("condense_context_error", truncateResult.error)
  3665. }
  3666. if (truncateResult.summary) {
  3667. const { summary, cost, prevContextTokens, newContextTokens = 0, condenseId } = truncateResult
  3668. const contextCondense: ContextCondense = {
  3669. summary,
  3670. cost,
  3671. newContextTokens,
  3672. prevContextTokens,
  3673. condenseId,
  3674. }
  3675. await this.say(
  3676. "condense_context",
  3677. undefined /* text */,
  3678. undefined /* images */,
  3679. false /* partial */,
  3680. undefined /* checkpoint */,
  3681. undefined /* progressStatus */,
  3682. { isNonInteractive: true } /* options */,
  3683. contextCondense,
  3684. )
  3685. } else if (truncateResult.truncationId) {
  3686. // Sliding window truncation occurred (fallback when condensing fails or is disabled)
  3687. const contextTruncation: ContextTruncation = {
  3688. truncationId: truncateResult.truncationId,
  3689. messagesRemoved: truncateResult.messagesRemoved ?? 0,
  3690. prevContextTokens: truncateResult.prevContextTokens,
  3691. newContextTokens: truncateResult.newContextTokensAfterTruncation ?? 0,
  3692. }
  3693. await this.say(
  3694. "sliding_window_truncation",
  3695. undefined /* text */,
  3696. undefined /* images */,
  3697. false /* partial */,
  3698. undefined /* checkpoint */,
  3699. undefined /* progressStatus */,
  3700. { isNonInteractive: true } /* options */,
  3701. undefined /* contextCondense */,
  3702. contextTruncation,
  3703. )
  3704. }
  3705. } finally {
  3706. // Notify webview that context management is complete (sets isCondensing = false)
  3707. // This removes the in-progress spinner and allows the completed result to show
  3708. // IMPORTANT: Must always be sent to dismiss the spinner, even on error
  3709. if (contextManagementWillRun && autoCondenseContext) {
  3710. await this.providerRef
  3711. .deref()
  3712. ?.postMessageToWebview({ type: "condenseTaskContextResponse", text: this.taskId })
  3713. }
  3714. }
  3715. }
  3716. // Get the effective API history by filtering out condensed messages
  3717. // This allows non-destructive condensing where messages are tagged but not deleted,
  3718. // enabling accurate rewind operations while still sending condensed history to the API.
  3719. const effectiveHistory = getEffectiveApiHistory(this.apiConversationHistory)
  3720. const messagesSinceLastSummary = getMessagesSinceLastSummary(effectiveHistory)
  3721. // For API only: merge consecutive user messages (excludes summary messages per
  3722. // mergeConsecutiveApiMessages implementation) without mutating stored history.
  3723. const mergedForApi = mergeConsecutiveApiMessages(messagesSinceLastSummary, { roles: ["user"] })
  3724. const messagesWithoutImages = maybeRemoveImageBlocks(mergedForApi, this.api)
  3725. const cleanConversationHistory = this.buildCleanConversationHistory(messagesWithoutImages as ApiMessage[])
  3726. // Check auto-approval limits
  3727. const approvalResult = await this.autoApprovalHandler.checkAutoApprovalLimits(
  3728. state,
  3729. this.combineMessages(this.clineMessages.slice(1)),
  3730. async (type, data) => this.ask(type, data),
  3731. )
  3732. if (!approvalResult.shouldProceed) {
  3733. // User did not approve, task should be aborted
  3734. throw new Error("Auto-approval limit reached and user did not approve continuation")
  3735. }
  3736. // Whether we include tools is determined by whether we have any tools to send.
  3737. const modelInfo = this.api.getModel().info
  3738. // Build complete tools array: native tools + dynamic MCP tools
  3739. // When includeAllToolsWithRestrictions is true, returns all tools but provides
  3740. // allowedFunctionNames for providers (like Gemini) that need to see all tool
  3741. // definitions in history while restricting callable tools for the current mode.
  3742. // Only Gemini currently supports this - other providers filter tools normally.
  3743. let allTools: OpenAI.Chat.ChatCompletionTool[] = []
  3744. let allowedFunctionNames: string[] | undefined
  3745. // Gemini requires all tool definitions to be present for history compatibility,
  3746. // but uses allowedFunctionNames to restrict which tools can be called.
  3747. // Other providers (Anthropic, OpenAI, etc.) don't support this feature yet,
  3748. // so they continue to receive only the filtered tools for the current mode.
  3749. const supportsAllowedFunctionNames = apiConfiguration?.apiProvider === "gemini"
  3750. {
  3751. const provider = this.providerRef.deref()
  3752. if (!provider) {
  3753. throw new Error("Provider reference lost during tool building")
  3754. }
  3755. const toolsResult = await buildNativeToolsArrayWithRestrictions({
  3756. provider,
  3757. cwd: this.cwd,
  3758. mode,
  3759. customModes: state?.customModes,
  3760. experiments: state?.experiments,
  3761. apiConfiguration,
  3762. browserToolEnabled: state?.browserToolEnabled ?? true,
  3763. disabledTools: state?.disabledTools,
  3764. modelInfo,
  3765. includeAllToolsWithRestrictions: supportsAllowedFunctionNames,
  3766. })
  3767. allTools = toolsResult.tools
  3768. allowedFunctionNames = toolsResult.allowedFunctionNames
  3769. }
  3770. const shouldIncludeTools = allTools.length > 0
  3771. const metadata: ApiHandlerCreateMessageMetadata = {
  3772. mode: mode,
  3773. taskId: this.taskId,
  3774. suppressPreviousResponseId: this.skipPrevResponseIdOnce,
  3775. // Include tools whenever they are present.
  3776. ...(shouldIncludeTools
  3777. ? {
  3778. tools: allTools,
  3779. tool_choice: "auto",
  3780. parallelToolCalls: true,
  3781. // When mode restricts tools, provide allowedFunctionNames so providers
  3782. // like Gemini can see all tools in history but only call allowed ones
  3783. ...(allowedFunctionNames ? { allowedFunctionNames } : {}),
  3784. }
  3785. : {}),
  3786. }
  3787. // Create an AbortController to allow cancelling the request mid-stream
  3788. this.currentRequestAbortController = new AbortController()
  3789. const abortSignal = this.currentRequestAbortController.signal
  3790. // Reset the flag after using it
  3791. this.skipPrevResponseIdOnce = false
  3792. // The provider accepts reasoning items alongside standard messages; cast to the expected parameter type.
  3793. const stream = this.api.createMessage(
  3794. systemPrompt,
  3795. cleanConversationHistory as unknown as Anthropic.Messages.MessageParam[],
  3796. metadata,
  3797. )
  3798. const iterator = stream[Symbol.asyncIterator]()
  3799. // Set up abort handling - when the signal is aborted, clean up the controller reference
  3800. abortSignal.addEventListener("abort", () => {
  3801. console.log(`[Task#${this.taskId}.${this.instanceId}] AbortSignal triggered for current request`)
  3802. this.currentRequestAbortController = undefined
  3803. })
  3804. try {
  3805. // Awaiting first chunk to see if it will throw an error.
  3806. this.isWaitingForFirstChunk = true
  3807. // Race between the first chunk and the abort signal
  3808. const firstChunkPromise = iterator.next()
  3809. const abortPromise = new Promise<never>((_, reject) => {
  3810. if (abortSignal.aborted) {
  3811. reject(new Error("Request cancelled by user"))
  3812. } else {
  3813. abortSignal.addEventListener("abort", () => {
  3814. reject(new Error("Request cancelled by user"))
  3815. })
  3816. }
  3817. })
  3818. const firstChunk = await Promise.race([firstChunkPromise, abortPromise])
  3819. yield firstChunk.value
  3820. this.isWaitingForFirstChunk = false
  3821. } catch (error) {
  3822. this.isWaitingForFirstChunk = false
  3823. this.currentRequestAbortController = undefined
  3824. const isContextWindowExceededError = checkContextWindowExceededError(error)
  3825. // If it's a context window error and we haven't exceeded max retries for this error type
  3826. if (isContextWindowExceededError && retryAttempt < MAX_CONTEXT_WINDOW_RETRIES) {
  3827. console.warn(
  3828. `[Task#${this.taskId}] Context window exceeded for model ${this.api.getModel().id}. ` +
  3829. `Retry attempt ${retryAttempt + 1}/${MAX_CONTEXT_WINDOW_RETRIES}. ` +
  3830. `Attempting automatic truncation...`,
  3831. )
  3832. await this.handleContextWindowExceededError()
  3833. // Retry the request after handling the context window error
  3834. yield* this.attemptApiRequest(retryAttempt + 1)
  3835. return
  3836. }
  3837. // note that this api_req_failed ask is unique in that we only present this option if the api hasn't streamed any content yet (ie it fails on the first chunk due), as it would allow them to hit a retry button. However if the api failed mid-stream, it could be in any arbitrary state where some tools may have executed, so that error is handled differently and requires cancelling the task entirely.
  3838. if (autoApprovalEnabled) {
  3839. // Apply shared exponential backoff and countdown UX
  3840. await this.backoffAndAnnounce(retryAttempt, error)
  3841. // CRITICAL: Check if task was aborted during the backoff countdown
  3842. // This prevents infinite loops when users cancel during auto-retry
  3843. // Without this check, the recursive call below would continue even after abort
  3844. if (this.abort) {
  3845. throw new Error(
  3846. `[Task#attemptApiRequest] task ${this.taskId}.${this.instanceId} aborted during retry`,
  3847. )
  3848. }
  3849. // Delegate generator output from the recursive call with
  3850. // incremented retry count.
  3851. yield* this.attemptApiRequest(retryAttempt + 1)
  3852. return
  3853. } else {
  3854. const { response } = await this.ask(
  3855. "api_req_failed",
  3856. error.message ?? JSON.stringify(serializeError(error), null, 2),
  3857. )
  3858. if (response !== "yesButtonClicked") {
  3859. // This will never happen since if noButtonClicked, we will
  3860. // clear current task, aborting this instance.
  3861. throw new Error("API request failed")
  3862. }
  3863. await this.say("api_req_retried")
  3864. // Delegate generator output from the recursive call.
  3865. yield* this.attemptApiRequest()
  3866. return
  3867. }
  3868. }
  3869. // No error, so we can continue to yield all remaining chunks.
  3870. // (Needs to be placed outside of try/catch since it we want caller to
  3871. // handle errors not with api_req_failed as that is reserved for first
  3872. // chunk failures only.)
  3873. // This delegates to another generator or iterable object. In this case,
  3874. // it's saying "yield all remaining values from this iterator". This
  3875. // effectively passes along all subsequent chunks from the original
  3876. // stream.
  3877. yield* iterator
  3878. }
  3879. // Shared exponential backoff for retries (first-chunk and mid-stream)
  3880. private async backoffAndAnnounce(retryAttempt: number, error: any): Promise<void> {
  3881. try {
  3882. const state = await this.providerRef.deref()?.getState()
  3883. const baseDelay = state?.requestDelaySeconds || 5
  3884. let exponentialDelay = Math.min(
  3885. Math.ceil(baseDelay * Math.pow(2, retryAttempt)),
  3886. MAX_EXPONENTIAL_BACKOFF_SECONDS,
  3887. )
  3888. // Respect provider rate limit window
  3889. let rateLimitDelay = 0
  3890. const rateLimit = (state?.apiConfiguration ?? this.apiConfiguration)?.rateLimitSeconds || 0
  3891. if (Task.lastGlobalApiRequestTime && rateLimit > 0) {
  3892. const elapsed = performance.now() - Task.lastGlobalApiRequestTime
  3893. rateLimitDelay = Math.ceil(Math.min(rateLimit, Math.max(0, rateLimit * 1000 - elapsed) / 1000))
  3894. }
  3895. // Prefer RetryInfo on 429 if present
  3896. if (error?.status === 429) {
  3897. const retryInfo = error?.errorDetails?.find(
  3898. (d: any) => d["@type"] === "type.googleapis.com/google.rpc.RetryInfo",
  3899. )
  3900. const match = retryInfo?.retryDelay?.match?.(/^(\d+)s$/)
  3901. if (match) {
  3902. exponentialDelay = Number(match[1]) + 1
  3903. }
  3904. }
  3905. const finalDelay = Math.max(exponentialDelay, rateLimitDelay)
  3906. if (finalDelay <= 0) {
  3907. return
  3908. }
  3909. // Build header text; fall back to error message if none provided
  3910. let headerText
  3911. if (error.status) {
  3912. // Include both status code (for ChatRow parsing) and detailed message (for error details)
  3913. // Format: "<status>\n<message>" allows ChatRow to extract status via parseInt(text.substring(0,3))
  3914. // while preserving the full error message in errorDetails for debugging
  3915. const errorMessage = error?.message || "Unknown error"
  3916. headerText = `${error.status}\n${errorMessage}`
  3917. } else if (error?.message) {
  3918. headerText = error.message
  3919. } else {
  3920. headerText = "Unknown error"
  3921. }
  3922. headerText = headerText ? `${headerText}\n` : ""
  3923. // Show countdown timer with exponential backoff
  3924. for (let i = finalDelay; i > 0; i--) {
  3925. // Check abort flag during countdown to allow early exit
  3926. if (this.abort) {
  3927. throw new Error(`[Task#${this.taskId}] Aborted during retry countdown`)
  3928. }
  3929. await this.say("api_req_retry_delayed", `${headerText}<retry_timer>${i}</retry_timer>`, undefined, true)
  3930. await delay(1000)
  3931. }
  3932. await this.say("api_req_retry_delayed", headerText, undefined, false)
  3933. } catch (err) {
  3934. console.error("Exponential backoff failed:", err)
  3935. }
  3936. }
  3937. // Checkpoints
  3938. public async checkpointSave(force: boolean = false, suppressMessage: boolean = false) {
  3939. return checkpointSave(this, force, suppressMessage)
  3940. }
  3941. private buildCleanConversationHistory(
  3942. messages: ApiMessage[],
  3943. ): Array<
  3944. Anthropic.Messages.MessageParam | { type: "reasoning"; encrypted_content: string; id?: string; summary?: any[] }
  3945. > {
  3946. type ReasoningItemForRequest = {
  3947. type: "reasoning"
  3948. encrypted_content: string
  3949. id?: string
  3950. summary?: any[]
  3951. }
  3952. const cleanConversationHistory: (Anthropic.Messages.MessageParam | ReasoningItemForRequest)[] = []
  3953. for (const msg of messages) {
  3954. // Standalone reasoning: send encrypted, skip plain text
  3955. if (msg.type === "reasoning") {
  3956. if (msg.encrypted_content) {
  3957. cleanConversationHistory.push({
  3958. type: "reasoning",
  3959. summary: msg.summary,
  3960. encrypted_content: msg.encrypted_content!,
  3961. ...(msg.id ? { id: msg.id } : {}),
  3962. })
  3963. }
  3964. continue
  3965. }
  3966. // Preferred path: assistant message with embedded reasoning as first content block
  3967. if (msg.role === "assistant") {
  3968. const rawContent = msg.content
  3969. const contentArray: Anthropic.Messages.ContentBlockParam[] = Array.isArray(rawContent)
  3970. ? (rawContent as Anthropic.Messages.ContentBlockParam[])
  3971. : rawContent !== undefined
  3972. ? ([
  3973. { type: "text", text: rawContent } satisfies Anthropic.Messages.TextBlockParam,
  3974. ] as Anthropic.Messages.ContentBlockParam[])
  3975. : []
  3976. const [first, ...rest] = contentArray
  3977. // Check if this message has reasoning_details (OpenRouter format for Gemini 3, etc.)
  3978. const msgWithDetails = msg
  3979. if (msgWithDetails.reasoning_details && Array.isArray(msgWithDetails.reasoning_details)) {
  3980. // Build the assistant message with reasoning_details
  3981. let assistantContent: Anthropic.Messages.MessageParam["content"]
  3982. if (contentArray.length === 0) {
  3983. assistantContent = ""
  3984. } else if (contentArray.length === 1 && contentArray[0].type === "text") {
  3985. assistantContent = (contentArray[0] as Anthropic.Messages.TextBlockParam).text
  3986. } else {
  3987. assistantContent = contentArray
  3988. }
  3989. // Create message with reasoning_details property
  3990. cleanConversationHistory.push({
  3991. role: "assistant",
  3992. content: assistantContent,
  3993. reasoning_details: msgWithDetails.reasoning_details,
  3994. } as any)
  3995. continue
  3996. }
  3997. // Embedded reasoning: encrypted (send) or plain text (skip)
  3998. const hasEncryptedReasoning =
  3999. first && (first as any).type === "reasoning" && typeof (first as any).encrypted_content === "string"
  4000. const hasPlainTextReasoning =
  4001. first && (first as any).type === "reasoning" && typeof (first as any).text === "string"
  4002. if (hasEncryptedReasoning) {
  4003. const reasoningBlock = first as any
  4004. // Send as separate reasoning item (OpenAI Native)
  4005. cleanConversationHistory.push({
  4006. type: "reasoning",
  4007. summary: reasoningBlock.summary ?? [],
  4008. encrypted_content: reasoningBlock.encrypted_content,
  4009. ...(reasoningBlock.id ? { id: reasoningBlock.id } : {}),
  4010. })
  4011. // Send assistant message without reasoning
  4012. let assistantContent: Anthropic.Messages.MessageParam["content"]
  4013. if (rest.length === 0) {
  4014. assistantContent = ""
  4015. } else if (rest.length === 1 && rest[0].type === "text") {
  4016. assistantContent = (rest[0] as Anthropic.Messages.TextBlockParam).text
  4017. } else {
  4018. assistantContent = rest
  4019. }
  4020. cleanConversationHistory.push({
  4021. role: "assistant",
  4022. content: assistantContent,
  4023. } satisfies Anthropic.Messages.MessageParam)
  4024. continue
  4025. } else if (hasPlainTextReasoning) {
  4026. // Preserve plain-text reasoning blocks for:
  4027. // - models explicitly opting in via preserveReasoning
  4028. // - AI SDK providers (provider packages decide what to include in the native request)
  4029. const shouldPreserveForApi =
  4030. this.api.getModel().info.preserveReasoning === true || this.api.isAiSdkProvider()
  4031. let assistantContent: Anthropic.Messages.MessageParam["content"]
  4032. if (shouldPreserveForApi) {
  4033. assistantContent = contentArray
  4034. } else {
  4035. // Strip reasoning out - stored for history only, not sent back to API
  4036. if (rest.length === 0) {
  4037. assistantContent = ""
  4038. } else if (rest.length === 1 && rest[0].type === "text") {
  4039. assistantContent = (rest[0] as Anthropic.Messages.TextBlockParam).text
  4040. } else {
  4041. assistantContent = rest
  4042. }
  4043. }
  4044. cleanConversationHistory.push({
  4045. role: "assistant",
  4046. content: assistantContent,
  4047. } satisfies Anthropic.Messages.MessageParam)
  4048. continue
  4049. }
  4050. }
  4051. // Default path for regular messages (no embedded reasoning)
  4052. if (msg.role) {
  4053. cleanConversationHistory.push({
  4054. role: msg.role,
  4055. content: msg.content as Anthropic.Messages.ContentBlockParam[] | string,
  4056. })
  4057. }
  4058. }
  4059. return cleanConversationHistory
  4060. }
  4061. public async checkpointRestore(options: CheckpointRestoreOptions) {
  4062. return checkpointRestore(this, options)
  4063. }
  4064. public async checkpointDiff(options: CheckpointDiffOptions) {
  4065. return checkpointDiff(this, options)
  4066. }
  4067. // Metrics
  4068. public combineMessages(messages: ClineMessage[]) {
  4069. return combineApiRequests(combineCommandSequences(messages))
  4070. }
  4071. public getTokenUsage(): TokenUsage {
  4072. return getApiMetrics(this.combineMessages(this.clineMessages.slice(1)))
  4073. }
  4074. public recordToolUsage(toolName: ToolName) {
  4075. if (!this.toolUsage[toolName]) {
  4076. this.toolUsage[toolName] = { attempts: 0, failures: 0 }
  4077. }
  4078. this.toolUsage[toolName].attempts++
  4079. }
  4080. public recordToolError(toolName: ToolName, error?: string) {
  4081. if (!this.toolUsage[toolName]) {
  4082. this.toolUsage[toolName] = { attempts: 0, failures: 0 }
  4083. }
  4084. this.toolUsage[toolName].failures++
  4085. if (error) {
  4086. this.emit(RooCodeEventName.TaskToolFailed, this.taskId, toolName, error)
  4087. }
  4088. }
  4089. // Getters
  4090. public get taskStatus(): TaskStatus {
  4091. if (this.interactiveAsk) {
  4092. return TaskStatus.Interactive
  4093. }
  4094. if (this.resumableAsk) {
  4095. return TaskStatus.Resumable
  4096. }
  4097. if (this.idleAsk) {
  4098. return TaskStatus.Idle
  4099. }
  4100. return TaskStatus.Running
  4101. }
  4102. public get taskAsk(): ClineMessage | undefined {
  4103. return this.idleAsk || this.resumableAsk || this.interactiveAsk
  4104. }
  4105. public get queuedMessages(): QueuedMessage[] {
  4106. return this.messageQueueService.messages
  4107. }
  4108. public get tokenUsage(): TokenUsage | undefined {
  4109. if (this.tokenUsageSnapshot && this.tokenUsageSnapshotAt) {
  4110. return this.tokenUsageSnapshot
  4111. }
  4112. this.tokenUsageSnapshot = this.getTokenUsage()
  4113. this.tokenUsageSnapshotAt = this.clineMessages.at(-1)?.ts
  4114. return this.tokenUsageSnapshot
  4115. }
  4116. public get cwd() {
  4117. return this.workspacePath
  4118. }
  4119. /**
  4120. * Provides convenient access to high-level message operations.
  4121. * Uses lazy initialization - the MessageManager is only created when first accessed.
  4122. * Subsequent accesses return the same cached instance.
  4123. *
  4124. * ## Important: Single Coordination Point
  4125. *
  4126. * **All MessageManager operations must go through this getter** rather than
  4127. * instantiating `new MessageManager(task)` directly. This ensures:
  4128. * - A single shared instance for consistent behavior
  4129. * - Centralized coordination of all rewind/message operations
  4130. * - Ability to add internal state or instrumentation in the future
  4131. *
  4132. * @example
  4133. * ```typescript
  4134. * // Correct: Use the getter
  4135. * await task.messageManager.rewindToTimestamp(ts)
  4136. *
  4137. * // Incorrect: Do NOT create new instances directly
  4138. * // const manager = new MessageManager(task) // Don't do this!
  4139. * ```
  4140. */
  4141. get messageManager(): MessageManager {
  4142. if (!this._messageManager) {
  4143. this._messageManager = new MessageManager(this)
  4144. }
  4145. return this._messageManager
  4146. }
  4147. /**
  4148. * Broadcast browser session updates to the browser panel (if open)
  4149. */
  4150. private broadcastBrowserSessionUpdate(): void {
  4151. const provider = this.providerRef.deref()
  4152. if (!provider) {
  4153. return
  4154. }
  4155. try {
  4156. const { BrowserSessionPanelManager } = require("../webview/BrowserSessionPanelManager")
  4157. const panelManager = BrowserSessionPanelManager.getInstance(provider)
  4158. // Get browser session messages
  4159. const browserSessionStartIndex = this.clineMessages.findIndex(
  4160. (m) =>
  4161. m.ask === "browser_action_launch" ||
  4162. (m.say === "browser_session_status" && m.text?.includes("opened")),
  4163. )
  4164. const browserSessionMessages =
  4165. browserSessionStartIndex !== -1 ? this.clineMessages.slice(browserSessionStartIndex) : []
  4166. const isBrowserSessionActive = this.browserSession?.isSessionActive() ?? false
  4167. // Update the panel asynchronously
  4168. panelManager.updateBrowserSession(browserSessionMessages, isBrowserSessionActive).catch((error: Error) => {
  4169. console.error("Failed to broadcast browser session update:", error)
  4170. })
  4171. } catch (error) {
  4172. // Silently fail if panel manager is not available
  4173. console.debug("Browser panel not available for update:", error)
  4174. }
  4175. }
  4176. /**
  4177. * Process any queued messages by dequeuing and submitting them.
  4178. * This ensures that queued user messages are sent when appropriate,
  4179. * preventing them from getting stuck in the queue.
  4180. *
  4181. * @param context - Context string for logging (e.g., the calling tool name)
  4182. */
  4183. public processQueuedMessages(): void {
  4184. try {
  4185. if (!this.messageQueueService.isEmpty()) {
  4186. const queued = this.messageQueueService.dequeueMessage()
  4187. if (queued) {
  4188. setTimeout(() => {
  4189. this.submitUserMessage(queued.text, queued.images).catch((err) =>
  4190. console.error(`[Task] Failed to submit queued message:`, err),
  4191. )
  4192. }, 0)
  4193. }
  4194. }
  4195. } catch (e) {
  4196. console.error(`[Task] Queue processing error:`, e)
  4197. }
  4198. }
  4199. }