Browse Source

Add reasoningEffort to provider settings schema (#2518)

Chris Estreich 8 months ago
parent
commit
df01e7948b

+ 8 - 5
evals/packages/types/src/roo-code-defaults.ts

@@ -20,18 +20,21 @@ export const rooCodeDefaults: RooCodeSettings = {
 	// 	thinking: false,
 	// },
 
+	modelTemperature: null,
+	// reasoningEffort: "high",
+
 	pinnedApiConfigs: {},
-	lastShownAnnouncementId: "mar-20-2025-3-10",
+	lastShownAnnouncementId: "apr-04-2025-boomerang",
 
 	autoApprovalEnabled: true,
 	alwaysAllowReadOnly: true,
 	alwaysAllowReadOnlyOutsideWorkspace: false,
 	alwaysAllowWrite: true,
 	alwaysAllowWriteOutsideWorkspace: false,
-	writeDelayMs: 200,
+	writeDelayMs: 1000,
 	alwaysAllowBrowser: true,
 	alwaysApproveResubmit: true,
-	requestDelaySeconds: 5,
+	requestDelaySeconds: 10,
 	alwaysAllowMcp: true,
 	alwaysAllowModeSwitch: true,
 	alwaysAllowSubtasks: true,
@@ -40,8 +43,8 @@ export const rooCodeDefaults: RooCodeSettings = {
 
 	browserToolEnabled: false,
 	browserViewportSize: "900x600",
-	screenshotQuality: 38,
-	remoteBrowserEnabled: true,
+	screenshotQuality: 75,
+	remoteBrowserEnabled: false,
 
 	enableCheckpoints: false,
 	checkpointStorage: "task",

+ 9 - 4
evals/packages/types/src/roo-code.ts

@@ -96,7 +96,7 @@ export type TelemetrySetting = z.infer<typeof telemetrySettingsSchema>
  */
 
 export const modelInfoSchema = z.object({
-	maxTokens: z.number().optional(),
+	maxTokens: z.number().nullish(),
 	contextWindow: z.number(),
 	supportsImages: z.boolean().optional(),
 	supportsComputerUse: z.boolean().optional(),
@@ -373,11 +373,14 @@ export const providerSettingsSchema = z.object({
 	requestyApiKey: z.string().optional(),
 	requestyModelId: z.string().optional(),
 	requestyModelInfo: modelInfoSchema.optional(),
-	// Generic
+	// Claude 3.7 Sonnet Thinking
 	modelMaxTokens: z.number().optional(), // Currently only used by Anthropic hybrid thinking models.
 	modelMaxThinkingTokens: z.number().optional(), // Currently only used by Anthropic hybrid thinking models.
-	modelTemperature: z.number().nullish(),
+	// Generic
 	includeMaxTokens: z.boolean().optional(),
+	modelTemperature: z.number().nullish(),
+	reasoningEffort: z.enum(["low", "medium", "high"]).optional(),
+	rateLimitSeconds: z.number().optional(),
 	// Fake AI
 	fakeAi: z.unknown().optional(),
 })
@@ -457,11 +460,13 @@ const providerSettingsRecord: ProviderSettingsRecord = {
 	requestyModelId: undefined,
 	requestyModelInfo: undefined,
 	// Claude 3.7 Sonnet Thinking
-	modelTemperature: undefined,
 	modelMaxTokens: undefined,
 	modelMaxThinkingTokens: undefined,
 	// Generic
 	includeMaxTokens: undefined,
+	modelTemperature: undefined,
+	reasoningEffort: undefined,
+	rateLimitSeconds: undefined,
 	// Fake AI
 	fakeAi: undefined,
 }

+ 2 - 1
src/exports/roo-code.d.ts

@@ -175,10 +175,11 @@ type ProviderSettings = {
 				cachableFields?: string[] | undefined
 		  } | null)
 		| undefined
-	modelTemperature?: (number | null) | undefined
 	modelMaxTokens?: number | undefined
 	modelMaxThinkingTokens?: number | undefined
 	includeMaxTokens?: boolean | undefined
+	modelTemperature?: (number | null) | undefined
+	reasoningEffort?: ("low" | "medium" | "high") | undefined
 	rateLimitSeconds?: number | undefined
 	fakeAi?: unknown | undefined
 }

+ 2 - 1
src/exports/types.ts

@@ -176,10 +176,11 @@ type ProviderSettings = {
 				cachableFields?: string[] | undefined
 		  } | null)
 		| undefined
-	modelTemperature?: (number | null) | undefined
 	modelMaxTokens?: number | undefined
 	modelMaxThinkingTokens?: number | undefined
 	includeMaxTokens?: boolean | undefined
+	modelTemperature?: (number | null) | undefined
+	reasoningEffort?: ("low" | "medium" | "high") | undefined
 	rateLimitSeconds?: number | undefined
 	fakeAi?: unknown | undefined
 }

+ 15 - 3
src/schemas/index.ts

@@ -95,6 +95,16 @@ export const telemetrySettingsSchema = z.enum(telemetrySettings)
 
 export type TelemetrySetting = z.infer<typeof telemetrySettingsSchema>
 
+/**
+ * ReasoningEffort
+ */
+
+export const reasoningEfforts = ["low", "medium", "high"] as const
+
+export const reasoningEffortsSchema = z.enum(reasoningEfforts)
+
+export type ReasoningEffort = z.infer<typeof reasoningEffortsSchema>
+
 /**
  * ModelInfo
  */
@@ -110,7 +120,7 @@ export const modelInfoSchema = z.object({
 	cacheWritesPrice: z.number().optional(),
 	cacheReadsPrice: z.number().optional(),
 	description: z.string().optional(),
-	reasoningEffort: z.enum(["low", "medium", "high"]).optional(),
+	reasoningEffort: reasoningEffortsSchema.optional(),
 	thinking: z.boolean().optional(),
 	minTokensPerCachePoint: z.number().optional(),
 	maxCachePoints: z.number().optional(),
@@ -383,11 +393,12 @@ export const providerSettingsSchema = z.object({
 	requestyModelId: z.string().optional(),
 	requestyModelInfo: modelInfoSchema.nullish(),
 	// Claude 3.7 Sonnet Thinking
-	modelTemperature: z.number().nullish(),
 	modelMaxTokens: z.number().optional(),
 	modelMaxThinkingTokens: z.number().optional(),
 	// Generic
 	includeMaxTokens: z.boolean().optional(),
+	modelTemperature: z.number().nullish(),
+	reasoningEffort: reasoningEffortsSchema.optional(),
 	rateLimitSeconds: z.number().optional(),
 	// Fake AI
 	fakeAi: z.unknown().optional(),
@@ -470,11 +481,12 @@ const providerSettingsRecord: ProviderSettingsRecord = {
 	requestyModelId: undefined,
 	requestyModelInfo: undefined,
 	// Claude 3.7 Sonnet Thinking
-	modelTemperature: undefined,
 	modelMaxTokens: undefined,
 	modelMaxThinkingTokens: undefined,
 	// Generic
 	includeMaxTokens: undefined,
+	modelTemperature: undefined,
+	reasoningEffort: undefined,
 	rateLimitSeconds: undefined,
 	// Fake AI
 	fakeAi: undefined,