Browse Source

Add Nous Research provider (#7141)

* Added Nous Research provider

* Fix casing on import
canvrno 1 month ago
parent
commit
cc25833963

+ 5 - 0
.changeset/dull-steaks-wave.md

@@ -0,0 +1,5 @@
+---
+"claude-dev": patch
+---
+
+Added Nous Research provider

+ 1 - 0
cli/pkg/cli/auth/byo_quick_setup.go

@@ -176,6 +176,7 @@ func validateQuickSetupProvider(providerID string) (cline.ApiProvider, error) {
 		cline.ApiProvider_XAI:           true,
 		cline.ApiProvider_CEREBRAS:      true,
 		cline.ApiProvider_OLLAMA:        true,
+		cline.ApiProvider_NOUSRESEARCH:  true,
 	}
 
 	if !supportedProviders[provider] {

+ 3 - 0
cli/pkg/cli/auth/providers_byo.go

@@ -26,6 +26,7 @@ func GetBYOProviderList() []BYOProviderOption {
 		{Name: "Google Gemini", Provider: cline.ApiProvider_GEMINI},
 		{Name: "Ollama", Provider: cline.ApiProvider_OLLAMA},
 		{Name: "Cerebras", Provider: cline.ApiProvider_CEREBRAS},
+		{Name: "NousResearch", Provider: cline.ApiProvider_NOUSRESEARCH},
 		{Name: "Oracle Code Assist", Provider: cline.ApiProvider_OCA},
 	}
 }
@@ -100,6 +101,8 @@ func GetBYOProviderPlaceholder(provider cline.ApiProvider) string {
 		return "e.g., qwen3-coder:30b"
 	case cline.ApiProvider_CEREBRAS:
 		return "e.g., gpt-oss-120b"
+	case cline.ApiProvider_NOUSRESEARCH:
+		return "e.g., Hermes-4-405B"
 	case cline.ApiProvider_OCA:
 		return "e.g., oca/llama4"
 	default:

+ 8 - 0
cli/pkg/cli/auth/providers_list.go

@@ -111,6 +111,7 @@ func (r *ProviderListResult) GetAllReadyProviders() []*ProviderDisplay {
 		cline.ApiProvider_GEMINI,
 		cline.ApiProvider_OLLAMA,
 		cline.ApiProvider_CEREBRAS,
+		cline.ApiProvider_NOUSRESEARCH,
 		cline.ApiProvider_OCA,
 		cline.ApiProvider_HICAP,
 	}
@@ -241,6 +242,8 @@ func mapProviderStringToEnum(providerStr string) (cline.ApiProvider, bool) {
 		return cline.ApiProvider_OCA, true
 	case "hicap":
 		return cline.ApiProvider_HICAP, true
+	case "nousResearch":
+		return cline.ApiProvider_NOUSRESEARCH, true
 	default:
 		return cline.ApiProvider_ANTHROPIC, false // Return 0 value with false
 	}
@@ -274,6 +277,8 @@ func GetProviderIDForEnum(provider cline.ApiProvider) string {
 		return "oca"
 	case cline.ApiProvider_HICAP:
 		return "hicap"
+	case cline.ApiProvider_NOUSRESEARCH:
+		return "nousResearch"
 	default:
 		return ""
 	}
@@ -353,6 +358,8 @@ func GetProviderDisplayName(provider cline.ApiProvider) string {
 		return "Oracle Code Assist"
 	case cline.ApiProvider_HICAP:
 		return "Hicap"
+	case cline.ApiProvider_NOUSRESEARCH:
+		return "NousResearch"
 	default:
 		return "Unknown"
 	}
@@ -475,6 +482,7 @@ func DetectAllConfiguredProviders(ctx context.Context, manager *task.Manager) ([
 		{cline.ApiProvider_OLLAMA, "ollamaBaseUrl"}, // Ollama uses baseUrl instead of API key
 		{cline.ApiProvider_CEREBRAS, "cerebrasApiKey"},
 		{cline.ApiProvider_HICAP, "hicapApiKey"},
+		{cline.ApiProvider_NOUSRESEARCH, "nousResearchApiKey"},
 	}
 
 	for _, providerCheck := range providersToCheck {

+ 14 - 0
cli/pkg/cli/auth/update_api_configurations.go

@@ -163,6 +163,15 @@ func GetProviderFields(provider cline.ApiProvider) (ProviderFields, error) {
 			ActModeProviderSpecificModelIDField:  "actModeHicapModelId",
 		}, nil
 
+	case cline.ApiProvider_NOUSRESEARCH:
+		return ProviderFields{
+			APIKeyField:                          "nousResearchApiKey",
+			PlanModeModelIDField:                 "planModeApiModelId",
+			ActModeModelIDField:                  "actModeApiModelId",
+			PlanModeProviderSpecificModelIDField: "planModeNousResearchModelId",
+			ActModeProviderSpecificModelIDField:  "actModeNousResearchModelId",
+		}, nil
+
 	default:
 		return ProviderFields{}, fmt.Errorf("unsupported provider: %v", provider)
 	}
@@ -278,6 +287,8 @@ func setAPIKeyField(apiConfig *cline.ModelsApiConfiguration, fieldName string, v
 		apiConfig.OcaApiKey = value
 	case "hicapApiKey":
 		apiConfig.HicapApiKey = value
+	case "nousResearchApiKey":
+		apiConfig.nousResearchApiKey = value
 	}
 }
 
@@ -302,6 +313,9 @@ func setProviderSpecificModelID(apiConfig *cline.ModelsApiConfiguration, fieldNa
 	case "planModeHicapModelId":
 		apiConfig.PlanModeHicapModelId = value
 		apiConfig.ActModeHicapModelId = value
+	case "planModeNousResearchModelId":
+		apiConfig.planModeNousResearchModelId = value
+		apiConfig.ActModeNousResearchModelId = value
 	}
 }
 

+ 71 - 0
cli/pkg/generated/providers.go

@@ -145,6 +145,7 @@ const (
 	XAI = "xai"
 	CEREBRAS = "cerebras"
 	OCA = "oca"
+	NOUSRESEARCH = "nousResearch"
 )
 
 // AllProviders returns a slice of enabled provider IDs for the CLI build.
@@ -161,6 +162,7 @@ var AllProviders = []string{
 	"xai",
 	"cerebras",
 	"oca",
+	"nousResearch",
 }
 
 // ConfigField represents a configuration field requirement
@@ -318,6 +320,15 @@ var rawConfigFields = `	[
 	    "fieldType": "password",
 	    "placeholder": "Enter your API key"
 	  },
+	  {
+	    "name": "nousResearchApiKey",
+	    "type": "string",
+	    "comment": "",
+	    "category": "nousResearch",
+	    "required": true,
+	    "fieldType": "password",
+	    "placeholder": "Enter your API key"
+	  },
 	  {
 	    "name": "ulid",
 	    "type": "string",
@@ -435,6 +446,15 @@ var rawConfigFields = `	[
 	    "fieldType": "url",
 	    "placeholder": "https://api.example.com"
 	  },
+	  {
+	    "name": "minimaxApiLine",
+	    "type": "string",
+	    "comment": "",
+	    "category": "general",
+	    "required": false,
+	    "fieldType": "string",
+	    "placeholder": ""
+	  },
 	  {
 	    "name": "ocaMode",
 	    "type": "string",
@@ -775,6 +795,24 @@ var rawModelDefinitions = `	{
 	      "supportsImages": false,
 	      "supportsPromptCache": false,
 	      "description": "A compact 20B open-weight Mixture-of-Experts language model designed for strong reasoning and tool use, ideal for edge devices and local inference."
+	    },
+	    "qwen.qwen3-coder-30b-a3b-v1:0": {
+	      "maxTokens": 8192,
+	      "contextWindow": 262144,
+	      "inputPrice": 0,
+	      "outputPrice": 0,
+	      "supportsImages": false,
+	      "supportsPromptCache": false,
+	      "description": "Qwen3 Coder 30B MoE model with 3.3B activated parameters, optimized for code generation and analysis with 256K context window."
+	    },
+	    "qwen.qwen3-coder-480b-a35b-v1:0": {
+	      "maxTokens": 8192,
+	      "contextWindow": 262144,
+	      "inputPrice": 0,
+	      "outputPrice": 1,
+	      "supportsImages": false,
+	      "supportsPromptCache": false,
+	      "description": "Qwen3 Coder 480B flagship MoE model with 35B activated parameters, designed for complex coding tasks with advanced reasoning capabilities and 256K context window."
 	    }
 	  },
 	  "gemini": {
@@ -1263,6 +1301,26 @@ var rawModelDefinitions = `	{
 	      "supportsPromptCache": false,
 	      "description": "SOTA performance with ~1500 tokens/s"
 	    }
+	  },
+	  "nousResearch": {
+	    "Hermes-4-405B": {
+	      "maxTokens": 8192,
+	      "contextWindow": 128000,
+	      "inputPrice": 0,
+	      "outputPrice": 0,
+	      "supportsImages": false,
+	      "supportsPromptCache": false,
+	      "description": "This is the largest model in the Hermes 4 family, and it is the fullest expression of our design, focused on advanced reasoning and creative depth rather than optimizing inference speed or cost."
+	    },
+	    "Hermes-4-70B": {
+	      "maxTokens": 8192,
+	      "contextWindow": 128000,
+	      "inputPrice": 0,
+	      "outputPrice": 0,
+	      "supportsImages": false,
+	      "supportsPromptCache": false,
+	      "description": "This incarnation of Hermes 4 balances scale and size. It handles complex reasoning tasks, while staying fast and cost effective. A versatile choice for many use cases."
+	    }
 	  }
 	}`
 
@@ -1432,6 +1490,18 @@ func GetProviderDefinitions() (map[string]ProviderDefinition, error) {
 		HasDynamicModels: false,
 		SetupInstructions: `Configure Oca API credentials`,
 	}
+
+	// NousResearch
+	definitions["nousResearch"] = ProviderDefinition{
+		ID:              "nousResearch",
+		Name:            "NousResearch",
+		RequiredFields:  getFieldsByProvider("nousResearch", configFields, true),
+		OptionalFields:  getFieldsByProvider("nousResearch", configFields, false),
+		Models:          modelDefinitions["nousResearch"],
+		DefaultModelID:  "Hermes-4-405B",
+		HasDynamicModels: false,
+		SetupInstructions: `Configure NousResearch API credentials`,
+	}
 	
 	return definitions, nil
 }
@@ -1459,6 +1529,7 @@ func GetProviderDisplayName(providerID string) string {
 		"xai": "X AI (Grok)",
 		"cerebras": "Cerebras",
 		"oca": "Oca",
+		"nousResearch": "NousResearch",
 	}
 	
 	if name, exists := displayNames[providerID]; exists {

+ 4 - 0
proto/cline/models.proto

@@ -422,6 +422,7 @@ enum ApiProvider {
   MINIMAX = 36;
   HICAP = 37;
   AIHUBMIX = 38;
+  NOUSRESEARCH = 39;
 }
 
 // Model info for OpenAI-compatible models
@@ -546,6 +547,7 @@ message ModelsApiConfiguration {
   optional string aihubmix_api_key = 82;
   optional string aihubmix_base_url = 83;
   optional string aihubmix_app_code = 84;
+  optional string nousResearch_api_key = 85;
 
   // Plan mode configurations
   optional ApiProvider plan_mode_api_provider = 100;
@@ -585,6 +587,7 @@ message ModelsApiConfiguration {
   optional OpenRouterModelInfo plan_mode_hicap_model_info = 134;
   optional string plan_mode_aihubmix_model_id = 135;
   optional OpenAiCompatibleModelInfo plan_mode_aihubmix_model_info = 136;
+  optional string plan_mode_nousResearch_model_id = 137;
 
   // Act mode configurations
   optional ApiProvider act_mode_api_provider = 200;
@@ -624,4 +627,5 @@ message ModelsApiConfiguration {
   optional OpenRouterModelInfo act_mode_hicap_model_info = 234;
   optional string act_mode_aihubmix_model_id = 235;
   optional OpenAiCompatibleModelInfo act_mode_aihubmix_model_info = 236;
+  optional string act_mode_nousResearch_model_id = 237;
 }

+ 1 - 0
scripts/cli-providers.mjs

@@ -95,6 +95,7 @@ const ENABLED_PROVIDERS = [
 	"ollama", // Ollama local models
 	"cerebras", // Cerebras models
 	"oca", // Oracle Code Assist
+	"nousResearch", // NousResearch provider
 ]
 
 /**

+ 7 - 0
src/core/api/index.ts

@@ -25,6 +25,7 @@ import { MinimaxHandler } from "./providers/minimax"
 import { MistralHandler } from "./providers/mistral"
 import { MoonshotHandler } from "./providers/moonshot"
 import { NebiusHandler } from "./providers/nebius"
+import { NousResearchHandler } from "./providers/nousresearch"
 import { OcaHandler } from "./providers/oca"
 import { OllamaHandler } from "./providers/ollama"
 import { OpenAiHandler } from "./providers/openai"
@@ -416,6 +417,12 @@ function createHandlerForProvider(
 				hicapApiKey: options.hicapApiKey,
 				hicapModelId: mode === "plan" ? options.planModeHicapModelId : options.actModeHicapModelId,
 			})
+		case "nousResearch":
+			return new NousResearchHandler({
+				onRetryAttempt: options.onRetryAttempt,
+				nousResearchApiKey: options.nousResearchApiKey,
+				apiModelId: mode === "plan" ? options.planModeNousResearchModelId : options.actModeNousResearchModelId,
+			})
 		default:
 			return new AnthropicHandler({
 				onRetryAttempt: options.onRetryAttempt,

+ 92 - 0
src/core/api/providers/nousresearch.ts

@@ -0,0 +1,92 @@
+import { Anthropic } from "@anthropic-ai/sdk"
+import { ModelInfo, NousResearchModelId, nousResearchDefaultModelId, nousResearchModels } from "@shared/api"
+import OpenAI from "openai"
+import { ApiHandler, CommonApiHandlerOptions } from "../index"
+import { withRetry } from "../retry"
+import { convertToOpenAiMessages } from "../transform/openai-format"
+import { ApiStream } from "../transform/stream"
+
+interface NousResearchHandlerOptions extends CommonApiHandlerOptions {
+	nousResearchApiKey?: string
+	apiModelId?: string
+}
+
+export class NousResearchHandler implements ApiHandler {
+	private options: NousResearchHandlerOptions
+	private client: OpenAI | undefined
+
+	constructor(options: NousResearchHandlerOptions) {
+		this.options = options
+	}
+
+	private ensureClient(): OpenAI {
+		if (!this.client) {
+			if (!this.options.nousResearchApiKey) {
+				throw new Error("NousResearch API key is required")
+			}
+			try {
+				this.client = new OpenAI({
+					baseURL: "https://inference-api.nousResearch.com/v1",
+					apiKey: this.options.nousResearchApiKey,
+				})
+			} catch (error: any) {
+				throw new Error(`Error creating NousResearch client: ${error.message}`)
+			}
+		}
+		return this.client
+	}
+
+	@withRetry()
+	async *createMessage(systemPrompt: string, messages: Anthropic.Messages.MessageParam[]): ApiStream {
+		const client = this.ensureClient()
+		const model = this.getModel()
+
+		const openAiMessages: OpenAI.Chat.ChatCompletionMessageParam[] = [
+			{ role: "system", content: systemPrompt },
+			...convertToOpenAiMessages(messages),
+		]
+
+		const stream = await client.chat.completions.create({
+			model: model.id,
+			messages: openAiMessages,
+			temperature: 0,
+			stream: true,
+			stream_options: { include_usage: true },
+		})
+
+		for await (const chunk of stream) {
+			const delta = chunk.choices[0]?.delta
+			if (delta?.content) {
+				yield {
+					type: "text",
+					text: delta.content,
+				}
+			}
+
+			if (delta && "reasoning_content" in delta && delta.reasoning_content) {
+				yield {
+					type: "reasoning",
+					reasoning: (delta.reasoning_content as string | undefined) || "",
+				}
+			}
+
+			if (chunk.usage) {
+				yield {
+					type: "usage",
+					inputTokens: chunk.usage.prompt_tokens || 0,
+					outputTokens: chunk.usage.completion_tokens || 0,
+				}
+			}
+		}
+	}
+
+	getModel(): { id: NousResearchModelId; info: ModelInfo } {
+		const modelId = this.options.apiModelId
+
+		if (modelId && modelId in nousResearchModels) {
+			const id = modelId as NousResearchModelId
+			return { id, info: nousResearchModels[id] }
+		}
+		return { id: nousResearchDefaultModelId, info: nousResearchModels[nousResearchDefaultModelId] }
+	}
+}

+ 11 - 0
src/core/storage/StateManager.ts

@@ -492,6 +492,7 @@ export class StateManager {
 			zaiApiKey,
 			minimaxApiKey,
 			minimaxApiLine,
+			nousResearchApiKey,
 			requestTimeoutMs,
 			ocaBaseUrl,
 			ocaMode,
@@ -536,6 +537,7 @@ export class StateManager {
 			planModeHicapModelInfo,
 			planModeAihubmixModelId,
 			planModeAihubmixModelInfo,
+			planModeNousResearchModelId,
 			// Act mode configurations
 			actModeApiProvider,
 			actModeApiModelId,
@@ -572,6 +574,7 @@ export class StateManager {
 			actModeHicapModelInfo,
 			actModeAihubmixModelId,
 			actModeAihubmixModelInfo,
+			actModeNousResearchModelId,
 		} = apiConfiguration
 
 		// Batch update global state keys
@@ -612,6 +615,7 @@ export class StateManager {
 			planModeHicapModelInfo,
 			planModeAihubmixModelId,
 			planModeAihubmixModelInfo,
+			planModeNousResearchModelId,
 
 			// Act mode configuration updates
 			actModeApiProvider,
@@ -649,6 +653,7 @@ export class StateManager {
 			actModeHicapModelInfo,
 			actModeAihubmixModelId,
 			actModeAihubmixModelInfo,
+			actModeNousResearchModelId,
 
 			// Global state updates
 			awsRegion,
@@ -735,6 +740,7 @@ export class StateManager {
 			minimaxApiKey,
 			hicapApiKey,
 			aihubmixApiKey,
+			nousResearchApiKey,
 		})
 	}
 
@@ -1155,6 +1161,8 @@ export class StateManager {
 				this.taskStateCache["planModeAihubmixModelId"] || this.globalStateCache["planModeAihubmixModelId"],
 			planModeAihubmixModelInfo:
 				this.taskStateCache["planModeAihubmixModelInfo"] || this.globalStateCache["planModeAihubmixModelInfo"],
+			planModeNousResearchModelId:
+				this.taskStateCache["planModeNousResearchModelId"] || this.globalStateCache["planModeNousResearchModelId"],
 
 			// Act mode configurations
 			actModeApiProvider:
@@ -1221,6 +1229,9 @@ export class StateManager {
 				this.taskStateCache["actModeAihubmixModelId"] || this.globalStateCache["actModeAihubmixModelId"],
 			actModeAihubmixModelInfo:
 				this.taskStateCache["actModeAihubmixModelInfo"] || this.globalStateCache["actModeAihubmixModelInfo"],
+			actModeNousResearchModelId:
+				this.taskStateCache["actModeNousResearchModelId"] || this.globalStateCache["actModeNousResearchModelId"],
+			nousResearchApiKey: this.secretsCache["nousResearchApiKey"],
 		}
 	}
 }

+ 10 - 0
src/core/storage/utils/state-helpers.ts

@@ -54,6 +54,7 @@ export async function readSecretsFromDisk(context: ExtensionContext): Promise<Se
 		minimaxApiKey,
 		hicapApiKey,
 		aihubmixApiKey,
+		nousResearchApiKey,
 	] = await Promise.all([
 		context.secrets.get("apiKey") as Promise<Secrets["apiKey"]>,
 		context.secrets.get("openRouterApiKey") as Promise<Secrets["openRouterApiKey"]>,
@@ -96,6 +97,7 @@ export async function readSecretsFromDisk(context: ExtensionContext): Promise<Se
 		context.secrets.get("minimaxApiKey") as Promise<Secrets["minimaxApiKey"]>,
 		context.secrets.get("hicapApiKey") as Promise<Secrets["hicapApiKey"]>,
 		context.secrets.get("aihubmixApiKey") as Promise<Secrets["aihubmixApiKey"]>,
+		context.secrets.get("nousResearchApiKey") as Promise<Secrets["nousResearchApiKey"]>,
 	])
 
 	return {
@@ -140,6 +142,7 @@ export async function readSecretsFromDisk(context: ExtensionContext): Promise<Se
 		minimaxApiKey,
 		hicapApiKey,
 		aihubmixApiKey,
+		nousResearchApiKey,
 	}
 }
 
@@ -379,6 +382,8 @@ export async function readGlobalStateFromDisk(context: ExtensionContext): Promis
 			context.globalState.get<GlobalStateAndSettings["planModeAihubmixModelId"]>("planModeAihubmixModelId")
 		const planModeAihubmixModelInfo =
 			context.globalState.get<GlobalStateAndSettings["planModeAihubmixModelInfo"]>("planModeAihubmixModelInfo")
+		const planModeNousResearchModelId =
+			context.globalState.get<GlobalStateAndSettings["planModeNousResearchModelId"]>("planModeNousResearchModelId")
 		// Act mode configurations
 		const actModeApiProvider = context.globalState.get<GlobalStateAndSettings["actModeApiProvider"]>("actModeApiProvider")
 		const actModeApiModelId = context.globalState.get<GlobalStateAndSettings["actModeApiModelId"]>("actModeApiModelId")
@@ -440,6 +445,8 @@ export async function readGlobalStateFromDisk(context: ExtensionContext): Promis
 			context.globalState.get<GlobalStateAndSettings["actModeBasetenModelInfo"]>("actModeBasetenModelInfo")
 		const actModeOcaModelId = context.globalState.get("actModeOcaModelId") as string | undefined
 		const actModeOcaModelInfo = context.globalState.get("actModeOcaModelInfo") as OcaModelInfo | undefined
+		const actModeNousResearchModelId =
+			context.globalState.get<GlobalStateAndSettings["actModeNousResearchModelId"]>("actModeNousResearchModelId")
 		const sapAiCoreUseOrchestrationMode =
 			context.globalState.get<GlobalStateAndSettings["sapAiCoreUseOrchestrationMode"]>("sapAiCoreUseOrchestrationMode")
 		const actModeHicapModelId = context.globalState.get<GlobalStateAndSettings["actModeHicapModelId"]>("actModeHicapModelId")
@@ -569,6 +576,7 @@ export async function readGlobalStateFromDisk(context: ExtensionContext): Promis
 			planModeHicapModelInfo,
 			planModeAihubmixModelId,
 			planModeAihubmixModelInfo,
+			planModeNousResearchModelId,
 			// Act mode configurations
 			actModeApiProvider: actModeApiProvider || apiProvider,
 			actModeApiModelId,
@@ -605,6 +613,7 @@ export async function readGlobalStateFromDisk(context: ExtensionContext): Promis
 			actModeHicapModelInfo,
 			actModeAihubmixModelId,
 			actModeAihubmixModelInfo,
+			actModeNousResearchModelId,
 
 			// Other global fields
 			focusChainSettings: focusChainSettings || DEFAULT_FOCUS_CHAIN_SETTINGS,
@@ -726,6 +735,7 @@ export async function resetGlobalState(controller: Controller) {
 		"minimaxApiKey",
 		"hicapApiKey",
 		"aihubmixApiKey",
+		"nousResearchApiKey",
 	]
 	await Promise.all(secretKeys.map((key) => context.secrets.delete(key)))
 	await controller.stateManager.reInitialize()

+ 31 - 0
src/shared/api.ts

@@ -40,6 +40,7 @@ export type ApiProvider =
 	| "aihubmix"
 	| "minimax"
 	| "hicap"
+	| "nousResearch"
 
 export interface ApiHandlerSecrets {
 	apiKey?: string // anthropic
@@ -83,6 +84,7 @@ export interface ApiHandlerSecrets {
 	difyApiKey?: string
 	minimaxApiKey?: string
 	hicapApiKey?: string
+	nousResearchApiKey?: string
 }
 
 export interface ApiHandlerOptions {
@@ -170,6 +172,7 @@ export interface ApiHandlerOptions {
 	planModeAihubmixModelInfo?: OpenAiCompatibleModelInfo
 	planModeHicapModelId?: string
 	planModeHicapModelInfo?: ModelInfo
+	planModeNousResearchModelId?: string
 	// Act mode configurations
 
 	// Act mode configurations
@@ -207,6 +210,7 @@ export interface ApiHandlerOptions {
 	actModeAihubmixModelInfo?: OpenAiCompatibleModelInfo
 	actModeHicapModelId?: string
 	actModeHicapModelInfo?: ModelInfo
+	actModeNousResearchModelId?: string
 }
 
 export type ApiConfiguration = ApiHandlerOptions &
@@ -3853,3 +3857,30 @@ export const minimaxModels = {
 		cacheReadsPrice: 0,
 	},
 } as const satisfies Record<string, ModelInfo>
+
+// NousResearch
+// https://inference-api.nousResearch.com
+export type NousResearchModelId = keyof typeof nousResearchModels
+export const nousResearchDefaultModelId: NousResearchModelId = "Hermes-4-405B"
+export const nousResearchModels = {
+	"Hermes-4-405B": {
+		maxTokens: 8192,
+		contextWindow: 128_000,
+		supportsImages: false,
+		supportsPromptCache: false,
+		inputPrice: 0.09,
+		outputPrice: 0.37,
+		description:
+			"This is the largest model in the Hermes 4 family, and it is the fullest expression of our design, focused on advanced reasoning and creative depth rather than optimizing inference speed or cost.",
+	},
+	"Hermes-4-70B": {
+		maxTokens: 8192,
+		contextWindow: 128_000,
+		supportsImages: false,
+		supportsPromptCache: false,
+		inputPrice: 0.05,
+		outputPrice: 0.2,
+		description:
+			"This incarnation of Hermes 4 balances scale and size. It handles complex reasoning tasks, while staying fast and cost effective. A versatile choice for many use cases.",
+	},
+} as const satisfies Record<string, ModelInfo>

+ 10 - 0
src/shared/proto-conversions/models/api-configuration-conversion.ts

@@ -313,6 +313,8 @@ function convertApiProviderToProto(provider: string | undefined): ProtoApiProvid
 			return ProtoApiProvider.MINIMAX
 		case "hicap":
 			return ProtoApiProvider.HICAP
+		case "nousResearch":
+			return ProtoApiProvider.NOUSRESEARCH
 		default:
 			return ProtoApiProvider.ANTHROPIC
 	}
@@ -399,6 +401,8 @@ export function convertProtoToApiProvider(provider: ProtoApiProvider): ApiProvid
 			return "aihubmix"
 		case ProtoApiProvider.MINIMAX:
 			return "minimax"
+		case ProtoApiProvider.NOUSRESEARCH:
+			return "nousResearch"
 		default:
 			return "anthropic"
 	}
@@ -483,6 +487,7 @@ export function convertApiConfigurationToProto(config: ApiConfiguration): ProtoA
 		ocaBaseUrl: config.ocaBaseUrl,
 		minimaxApiKey: config.minimaxApiKey,
 		minimaxApiLine: config.minimaxApiLine,
+		nousResearchApiKey: config.nousResearchApiKey,
 		ocaMode: config.ocaMode,
 		aihubmixApiKey: config.aihubmixApiKey,
 		aihubmixBaseUrl: config.aihubmixBaseUrl,
@@ -526,6 +531,7 @@ export function convertApiConfigurationToProto(config: ApiConfiguration): ProtoA
 		planModeAihubmixModelInfo: convertOpenAiCompatibleModelInfoToProto(config.planModeAihubmixModelInfo),
 		planModeHicapModelId: config.planModeHicapModelId,
 		planModeHicapModelInfo: convertModelInfoToProtoOpenRouter(config.planModeHicapModelInfo),
+		planModeNousResearchModelId: config.planModeNousResearchModelId,
 
 		// Act mode configurations
 		actModeApiProvider: config.actModeApiProvider ? convertApiProviderToProto(config.actModeApiProvider) : undefined,
@@ -563,6 +569,7 @@ export function convertApiConfigurationToProto(config: ApiConfiguration): ProtoA
 		actModeAihubmixModelInfo: convertOpenAiCompatibleModelInfoToProto(config.actModeAihubmixModelInfo),
 		actModeHicapModelId: config.actModeHicapModelId,
 		actModeHicapModelInfo: convertModelInfoToProtoOpenRouter(config.actModeHicapModelInfo),
+		actModeNousResearchModelId: config.actModeNousResearchModelId,
 	}
 }
 
@@ -651,6 +658,7 @@ export function convertProtoToApiConfiguration(protoConfig: ProtoApiConfiguratio
 		minimaxApiLine: protoConfig.minimaxApiLine,
 		hicapApiKey: protoConfig.hicapApiKey,
 		hicapModelId: protoConfig.hicapModelId,
+		nousResearchApiKey: protoConfig.nousResearchApiKey,
 
 		// Plan mode configurations
 		planModeApiProvider:
@@ -691,6 +699,7 @@ export function convertProtoToApiConfiguration(protoConfig: ProtoApiConfiguratio
 		planModeAihubmixModelInfo: convertProtoToOpenAiCompatibleModelInfo(protoConfig.planModeAihubmixModelInfo),
 		planModeHicapModelId: protoConfig.planModeHicapModelId,
 		planModeHicapModelInfo: convertProtoToModelInfo(protoConfig.planModeHicapModelInfo),
+		planModeNousResearchModelId: protoConfig.planModeNousResearchModelId,
 
 		// Act mode configurations
 		actModeApiProvider:
@@ -729,5 +738,6 @@ export function convertProtoToApiConfiguration(protoConfig: ProtoApiConfiguratio
 		actModeAihubmixModelInfo: convertProtoToOpenAiCompatibleModelInfo(protoConfig.actModeAihubmixModelInfo),
 		actModeHicapModelId: protoConfig.actModeHicapModelId,
 		actModeHicapModelInfo: convertProtoToModelInfo(protoConfig.actModeHicapModelInfo),
+		actModeNousResearchModelId: protoConfig.actModeNousResearchModelId,
 	}
 }

+ 3 - 0
src/shared/storage/state-keys.ts

@@ -158,6 +158,7 @@ export interface Settings {
 	planModeHicapModelInfo: ModelInfo | undefined
 	planModeAihubmixModelId: string | undefined
 	planModeAihubmixModelInfo: ModelInfo | undefined
+	planModeNousResearchModelId: string | undefined
 	// Act mode configurations
 	actModeApiProvider: ApiProvider
 	actModeApiModelId: string | undefined
@@ -194,6 +195,7 @@ export interface Settings {
 	actModeHicapModelInfo: ModelInfo | undefined
 	actModeAihubmixModelId: string | undefined
 	actModeAihubmixModelInfo: ModelInfo | undefined
+	actModeNousResearchModelId: string | undefined
 
 	// OpenTelemetry configuration
 	openTelemetryEnabled: boolean
@@ -254,6 +256,7 @@ export interface Secrets {
 	minimaxApiKey: string | undefined
 	hicapApiKey: string | undefined
 	aihubmixApiKey: string | undefined
+	nousResearchApiKey: string | undefined
 }
 
 export interface LocalState {

+ 6 - 0
webview-ui/src/components/settings/ApiOptions.tsx

@@ -34,6 +34,7 @@ import { MinimaxProvider } from "./providers/MiniMaxProvider"
 import { MistralProvider } from "./providers/MistralProvider"
 import { MoonshotProvider } from "./providers/MoonshotProvider"
 import { NebiusProvider } from "./providers/NebiusProvider"
+import { NousResearchProvider } from "./providers/NousresearchProvider"
 import { OcaProvider } from "./providers/OcaProvider"
 import { OllamaProvider } from "./providers/OllamaProvider"
 import { OpenAICompatibleProvider } from "./providers/OpenAICompatible"
@@ -168,6 +169,7 @@ const ApiOptions = ({ showModelOptions, apiErrorMessage, modelIdErrorMessage, is
 			{ value: "minimax", label: "MiniMax" },
 			{ value: "hicap", label: "Hicap" },
 			{ value: "aihubmix", label: "AIhubmix" },
+			{ value: "nousResearch", label: "NousResearch" },
 		]
 
 		// Filter by platform
@@ -525,6 +527,10 @@ const ApiOptions = ({ showModelOptions, apiErrorMessage, modelIdErrorMessage, is
 				<MinimaxProvider currentMode={currentMode} isPopup={isPopup} showModelOptions={showModelOptions} />
 			)}
 
+			{apiConfiguration && selectedProvider === "nousResearch" && (
+				<NousResearchProvider currentMode={currentMode} isPopup={isPopup} showModelOptions={showModelOptions} />
+			)}
+
 			{apiConfiguration && selectedProvider === "oca" && <OcaProvider currentMode={currentMode} isPopup={isPopup} />}
 
 			{apiConfiguration && selectedProvider === "aihubmix" && (

+ 69 - 0
webview-ui/src/components/settings/providers/NousresearchProvider.tsx

@@ -0,0 +1,69 @@
+import { nousResearchModels } from "@shared/api"
+import { Mode } from "@shared/storage/types"
+import { useExtensionState } from "@/context/ExtensionStateContext"
+import { ApiKeyField } from "../common/ApiKeyField"
+import { ModelInfoView } from "../common/ModelInfoView"
+import { ModelSelector } from "../common/ModelSelector"
+import { normalizeApiConfiguration } from "../utils/providerUtils"
+import { useApiConfigurationHandlers } from "../utils/useApiConfigurationHandlers"
+
+/**
+ * Props for the NousResearchProvider component
+ */
+interface NousResearchProviderProps {
+	showModelOptions: boolean
+	isPopup?: boolean
+	currentMode: Mode
+}
+
+/**
+ * The NousResearch provider configuration component
+ */
+export const NousResearchProvider = ({ showModelOptions, isPopup, currentMode }: NousResearchProviderProps) => {
+	const { apiConfiguration } = useExtensionState()
+	const { handleFieldChange, handleModeFieldChange } = useApiConfigurationHandlers()
+
+	// Get the normalized configuration
+	const { selectedModelId, selectedModelInfo } = normalizeApiConfiguration(apiConfiguration, currentMode)
+
+	return (
+		<div>
+			<ApiKeyField
+				initialValue={apiConfiguration?.nousResearchApiKey || ""}
+				onChange={(value) => handleFieldChange("nousResearchApiKey", value)}
+				providerName="NousResearch"
+			/>
+
+			{showModelOptions && (
+				<>
+					<ModelSelector
+						label="Model"
+						models={nousResearchModels}
+						onChange={(e: any) =>
+							handleModeFieldChange(
+								{ plan: "planModeNousResearchModelId", act: "actModeNousResearchModelId" },
+								e.target.value,
+								currentMode,
+							)
+						}
+						selectedModelId={selectedModelId}
+					/>
+
+					<ModelInfoView isPopup={isPopup} modelInfo={selectedModelInfo} selectedModelId={selectedModelId} />
+
+					<p
+						style={{
+							fontSize: "12px",
+							marginTop: 3,
+							color: "var(--vscode-descriptionForeground)",
+						}}>
+						<span style={{ color: "var(--vscode-errorForeground)" }}>
+							(<span style={{ fontWeight: 500 }}>Note:</span> Cline uses complex prompts and works best with Claude
+							models. Less capable models may not work as expected.)
+						</span>
+					</p>
+				</>
+			)}
+		</div>
+	)
+}

+ 22 - 0
webview-ui/src/components/settings/utils/providerUtils.ts

@@ -46,6 +46,8 @@ import {
 	moonshotModels,
 	nebiusDefaultModelId,
 	nebiusModels,
+	nousResearchDefaultModelId,
+	nousResearchModels,
 	openAiModelInfoSaneDefaults,
 	openAiNativeDefaultModelId,
 	openAiNativeModels,
@@ -378,6 +380,19 @@ export function normalizeApiConfiguration(
 			}
 		case "minimax":
 			return getProviderData(minimaxModels, minimaxDefaultModelId)
+		case "nousResearch":
+			const nousResearchModelId =
+				currentMode === "plan"
+					? apiConfiguration?.planModeNousResearchModelId
+					: apiConfiguration?.actModeNousResearchModelId
+			return {
+				selectedProvider: provider,
+				selectedModelId: nousResearchModelId || nousResearchDefaultModelId,
+				selectedModelInfo:
+					nousResearchModelId && nousResearchModelId in nousResearchModels
+						? nousResearchModels[nousResearchModelId as keyof typeof nousResearchModels]
+						: nousResearchModels[nousResearchDefaultModelId],
+			}
 		default:
 			return getProviderData(anthropicModels, anthropicDefaultModelId)
 	}
@@ -411,6 +426,7 @@ export function getModeSpecificFields(apiConfiguration: ApiConfiguration | undef
 			huaweiCloudMaasModelId: undefined,
 			hicapModelId: undefined,
 			aihubmixModelId: undefined,
+			nousResearchModelId: undefined,
 
 			// Model info objects
 			openAiModelInfo: undefined,
@@ -460,6 +476,8 @@ export function getModeSpecificFields(apiConfiguration: ApiConfiguration | undef
 		ocaModelId: mode === "plan" ? apiConfiguration.planModeOcaModelId : apiConfiguration.actModeOcaModelId,
 		hicapModelId: mode === "plan" ? apiConfiguration.planModeHicapModelId : apiConfiguration.actModeHicapModelId,
 		aihubmixModelId: mode === "plan" ? apiConfiguration.planModeAihubmixModelId : apiConfiguration.actModeAihubmixModelId,
+		nousResearchModelId:
+			mode === "plan" ? apiConfiguration.planModeNousResearchModelId : apiConfiguration.actModeNousResearchModelId,
 
 		// Model info objects
 		openAiModelInfo: mode === "plan" ? apiConfiguration.planModeOpenAiModelInfo : apiConfiguration.actModeOpenAiModelInfo,
@@ -651,6 +669,10 @@ export async function syncModeConfigurations(
 			updates.planModeOcaModelInfo = sourceFields.ocaModelInfo
 			updates.actModeOcaModelInfo = sourceFields.ocaModelInfo
 			break
+		case "nousResearch":
+			updates.planModeNousResearchModelId = sourceFields.nousResearchModelId
+			updates.actModeNousResearchModelId = sourceFields.nousResearchModelId
+			break
 
 		case "aihubmix":
 			updates.planModeAihubmixModelId = sourceFields.aihubmixModelId