Hannes Rudolph 2 недель назад
Родитель
Сommit
f05dd59a2b
53 измененных файлов с 4 добавлено и 786 удалено
  1. 0 1
      .github/ISSUE_TEMPLATE/bug_report.yml
  2. 0 1
      packages/types/src/global-settings.ts
  3. 0 11
      packages/types/src/provider-settings.ts
  4. 0 19
      packages/types/src/providers/glama.ts
  5. 0 4
      packages/types/src/providers/index.ts
  6. 0 7
      src/activate/handleUri.ts
  7. 0 3
      src/api/index.ts
  8. 0 232
      src/api/providers/__tests__/glama.spec.ts
  9. 1 21
      src/api/providers/fetchers/__tests__/modelCache.spec.ts
  10. 0 42
      src/api/providers/fetchers/glama.ts
  11. 0 5
      src/api/providers/fetchers/modelCache.ts
  12. 0 146
      src/api/providers/glama.ts
  13. 0 1
      src/api/providers/index.ts
  14. 0 34
      src/core/webview/ClineProvider.ts
  15. 0 1
      src/core/webview/__tests__/ClineProvider.apiHandlerRebuild.spec.ts
  16. 0 9
      src/core/webview/__tests__/ClineProvider.spec.ts
  17. 0 2
      src/core/webview/__tests__/webviewMessageHandler.routerModels.spec.ts
  18. 0 16
      src/core/webview/__tests__/webviewMessageHandler.spec.ts
  19. 0 2
      src/core/webview/webviewMessageHandler.ts
  20. 0 2
      src/shared/ProfileValidator.ts
  21. 0 15
      src/shared/__tests__/ProfileValidator.spec.ts
  22. 0 2
      src/shared/__tests__/checkExistApiConfig.spec.ts
  23. 0 1
      src/shared/api.ts
  24. 1 16
      webview-ui/src/components/settings/ApiOptions.tsx
  25. 0 1
      webview-ui/src/components/settings/ModelPicker.tsx
  26. 0 1
      webview-ui/src/components/settings/__tests__/ApiOptions.provider-filtering.spec.tsx
  27. 1 1
      webview-ui/src/components/settings/__tests__/ModelPicker.spec.tsx
  28. 0 1
      webview-ui/src/components/settings/constants.ts
  29. 0 79
      webview-ui/src/components/settings/providers/Glama.tsx
  30. 0 1
      webview-ui/src/components/settings/providers/index.ts
  31. 1 9
      webview-ui/src/components/ui/hooks/__tests__/useSelectedModel.spec.ts
  32. 0 5
      webview-ui/src/components/ui/hooks/useSelectedModel.ts
  33. 0 2
      webview-ui/src/i18n/locales/ca/settings.json
  34. 0 2
      webview-ui/src/i18n/locales/de/settings.json
  35. 0 2
      webview-ui/src/i18n/locales/en/settings.json
  36. 0 2
      webview-ui/src/i18n/locales/es/settings.json
  37. 0 2
      webview-ui/src/i18n/locales/fr/settings.json
  38. 0 2
      webview-ui/src/i18n/locales/hi/settings.json
  39. 0 2
      webview-ui/src/i18n/locales/id/settings.json
  40. 0 2
      webview-ui/src/i18n/locales/it/settings.json
  41. 0 2
      webview-ui/src/i18n/locales/ja/settings.json
  42. 0 2
      webview-ui/src/i18n/locales/ko/settings.json
  43. 0 2
      webview-ui/src/i18n/locales/nl/settings.json
  44. 0 2
      webview-ui/src/i18n/locales/pl/settings.json
  45. 0 2
      webview-ui/src/i18n/locales/pt-BR/settings.json
  46. 0 2
      webview-ui/src/i18n/locales/ru/settings.json
  47. 0 2
      webview-ui/src/i18n/locales/tr/settings.json
  48. 0 2
      webview-ui/src/i18n/locales/vi/settings.json
  49. 0 2
      webview-ui/src/i18n/locales/zh-CN/settings.json
  50. 0 2
      webview-ui/src/i18n/locales/zh-TW/settings.json
  51. 0 4
      webview-ui/src/oauth/urls.ts
  52. 0 50
      webview-ui/src/utils/__tests__/validate.test.ts
  53. 0 5
      webview-ui/src/utils/validate.ts

+ 0 - 1
.github/ISSUE_TEMPLATE/bug_report.yml

@@ -81,7 +81,6 @@ body:
         - DeepSeek
         - Featherless AI
         - Fireworks AI
-        - Glama
         - Google Gemini
         - Google Vertex AI
         - Groq

+ 0 - 1
packages/types/src/global-settings.ts

@@ -210,7 +210,6 @@ export type RooCodeSettings = GlobalSettings & ProviderSettings
  */
 export const SECRET_STATE_KEYS = [
 	"apiKey",
-	"glamaApiKey",
 	"openRouterApiKey",
 	"awsAccessKey",
 	"awsApiKey",

+ 0 - 11
packages/types/src/provider-settings.ts

@@ -48,7 +48,6 @@ export const dynamicProviders = [
 	"io-intelligence",
 	"requesty",
 	"unbound",
-	"glama",
 	"roo",
 	"chutes",
 ] as const
@@ -206,11 +205,6 @@ const claudeCodeSchema = apiModelIdProviderModelSchema.extend({
 	claudeCodeMaxOutputTokens: z.number().int().min(1).max(200000).optional(),
 })
 
-const glamaSchema = baseProviderSettingsSchema.extend({
-	glamaModelId: z.string().optional(),
-	glamaApiKey: z.string().optional(),
-})
-
 const openRouterSchema = baseProviderSettingsSchema.extend({
 	openRouterApiKey: z.string().optional(),
 	openRouterModelId: z.string().optional(),
@@ -437,7 +431,6 @@ const defaultSchema = z.object({
 export const providerSettingsSchemaDiscriminated = z.discriminatedUnion("apiProvider", [
 	anthropicSchema.merge(z.object({ apiProvider: z.literal("anthropic") })),
 	claudeCodeSchema.merge(z.object({ apiProvider: z.literal("claude-code") })),
-	glamaSchema.merge(z.object({ apiProvider: z.literal("glama") })),
 	openRouterSchema.merge(z.object({ apiProvider: z.literal("openrouter") })),
 	bedrockSchema.merge(z.object({ apiProvider: z.literal("bedrock") })),
 	vertexSchema.merge(z.object({ apiProvider: z.literal("vertex") })),
@@ -480,7 +473,6 @@ export const providerSettingsSchema = z.object({
 	apiProvider: providerNamesSchema.optional(),
 	...anthropicSchema.shape,
 	...claudeCodeSchema.shape,
-	...glamaSchema.shape,
 	...openRouterSchema.shape,
 	...bedrockSchema.shape,
 	...vertexSchema.shape,
@@ -537,7 +529,6 @@ export const PROVIDER_SETTINGS_KEYS = providerSettingsSchema.keyof().options
 
 export const modelIdKeys = [
 	"apiModelId",
-	"glamaModelId",
 	"openRouterModelId",
 	"openAiModelId",
 	"ollamaModelId",
@@ -571,7 +562,6 @@ export const isTypicalProvider = (key: unknown): key is TypicalProvider =>
 export const modelIdKeysByProvider: Record<TypicalProvider, ModelIdKey> = {
 	anthropic: "apiModelId",
 	"claude-code": "apiModelId",
-	glama: "glamaModelId",
 	openrouter: "openRouterModelId",
 	bedrock: "apiModelId",
 	vertex: "apiModelId",
@@ -727,7 +717,6 @@ export const MODELS_BY_PROVIDER: Record<
 	baseten: { id: "baseten", label: "Baseten", models: Object.keys(basetenModels) },
 
 	// Dynamic providers; models pulled from remote APIs.
-	glama: { id: "glama", label: "Glama", models: [] },
 	huggingface: { id: "huggingface", label: "Hugging Face", models: [] },
 	litellm: { id: "litellm", label: "LiteLLM", models: [] },
 	openrouter: { id: "openrouter", label: "OpenRouter", models: [] },

+ 0 - 19
packages/types/src/providers/glama.ts

@@ -1,19 +0,0 @@
-import type { ModelInfo } from "../model.js"
-
-// https://glama.ai/models
-export const glamaDefaultModelId = "anthropic/claude-3-7-sonnet"
-
-export const glamaDefaultModelInfo: ModelInfo = {
-	maxTokens: 8192,
-	contextWindow: 200_000,
-	supportsImages: true,
-	supportsPromptCache: true,
-	inputPrice: 3.0,
-	outputPrice: 15.0,
-	cacheWritesPrice: 3.75,
-	cacheReadsPrice: 0.3,
-	description:
-		"Claude 3.7 Sonnet is an advanced large language model with improved reasoning, coding, and problem-solving capabilities. It introduces a hybrid reasoning approach, allowing users to choose between rapid responses and extended, step-by-step processing for complex tasks. The model demonstrates notable improvements in coding, particularly in front-end development and full-stack updates, and excels in agentic workflows, where it can autonomously navigate multi-step processes. Claude 3.7 Sonnet maintains performance parity with its predecessor in standard mode while offering an extended reasoning mode for enhanced accuracy in math, coding, and instruction-following tasks. Read more at the [blog post here](https://www.anthropic.com/news/claude-3-7-sonnet)",
-}
-
-export const GLAMA_DEFAULT_TEMPERATURE = 0

+ 0 - 4
packages/types/src/providers/index.ts

@@ -9,7 +9,6 @@ export * from "./doubao.js"
 export * from "./featherless.js"
 export * from "./fireworks.js"
 export * from "./gemini.js"
-export * from "./glama.js"
 export * from "./groq.js"
 export * from "./huggingface.js"
 export * from "./io-intelligence.js"
@@ -44,7 +43,6 @@ import { doubaoDefaultModelId } from "./doubao.js"
 import { featherlessDefaultModelId } from "./featherless.js"
 import { fireworksDefaultModelId } from "./fireworks.js"
 import { geminiDefaultModelId } from "./gemini.js"
-import { glamaDefaultModelId } from "./glama.js"
 import { groqDefaultModelId } from "./groq.js"
 import { ioIntelligenceDefaultModelId } from "./io-intelligence.js"
 import { litellmDefaultModelId } from "./lite-llm.js"
@@ -81,8 +79,6 @@ export function getProviderDefaultModelId(
 			return openRouterDefaultModelId
 		case "requesty":
 			return requestyDefaultModelId
-		case "glama":
-			return glamaDefaultModelId
 		case "unbound":
 			return unboundDefaultModelId
 		case "litellm":

+ 0 - 7
src/activate/handleUri.ts

@@ -14,13 +14,6 @@ export const handleUri = async (uri: vscode.Uri) => {
 	}
 
 	switch (path) {
-		case "/glama": {
-			const code = query.get("code")
-			if (code) {
-				await visibleProvider.handleGlamaCallback(code)
-			}
-			break
-		}
 		case "/openrouter": {
 			const code = query.get("code")
 			if (code) {

+ 0 - 3
src/api/index.ts

@@ -6,7 +6,6 @@ import type { ProviderSettings, ModelInfo, ToolProtocol } from "@roo-code/types"
 import { ApiStream } from "./transform/stream"
 
 import {
-	GlamaHandler,
 	AnthropicHandler,
 	AwsBedrockHandler,
 	CerebrasHandler,
@@ -126,8 +125,6 @@ export function buildApiHandler(configuration: ProviderSettings): ApiHandler {
 			return new AnthropicHandler(options)
 		case "claude-code":
 			return new ClaudeCodeHandler(options)
-		case "glama":
-			return new GlamaHandler(options)
 		case "openrouter":
 			return new OpenRouterHandler(options)
 		case "bedrock":

+ 0 - 232
src/api/providers/__tests__/glama.spec.ts

@@ -1,232 +0,0 @@
-// npx vitest run src/api/providers/__tests__/glama.spec.ts
-
-import { Anthropic } from "@anthropic-ai/sdk"
-
-import { GlamaHandler } from "../glama"
-import { ApiHandlerOptions } from "../../../shared/api"
-
-// Mock dependencies
-vitest.mock("../fetchers/modelCache", () => ({
-	getModels: vitest.fn().mockImplementation(() => {
-		return Promise.resolve({
-			"anthropic/claude-3-7-sonnet": {
-				maxTokens: 8192,
-				contextWindow: 200000,
-				supportsImages: true,
-				supportsPromptCache: true,
-				inputPrice: 3,
-				outputPrice: 15,
-				cacheWritesPrice: 3.75,
-				cacheReadsPrice: 0.3,
-				description: "Claude 3.7 Sonnet",
-				thinking: false,
-			},
-			"openai/gpt-4o": {
-				maxTokens: 4096,
-				contextWindow: 128000,
-				supportsImages: true,
-				supportsPromptCache: false,
-				inputPrice: 5,
-				outputPrice: 15,
-				description: "GPT-4o",
-			},
-		})
-	}),
-}))
-
-// Mock OpenAI client
-const mockCreate = vitest.fn()
-const mockWithResponse = vitest.fn()
-
-vitest.mock("openai", () => {
-	return {
-		__esModule: true,
-		default: vitest.fn().mockImplementation(() => ({
-			chat: {
-				completions: {
-					create: (...args: any[]) => {
-						const stream = {
-							[Symbol.asyncIterator]: async function* () {
-								yield {
-									choices: [{ delta: { content: "Test response" }, index: 0 }],
-									usage: null,
-								}
-								yield {
-									choices: [{ delta: {}, index: 0 }],
-									usage: { prompt_tokens: 10, completion_tokens: 5, total_tokens: 15 },
-								}
-							},
-						}
-
-						const result = mockCreate(...args)
-
-						if (args[0].stream) {
-							mockWithResponse.mockReturnValue(
-								Promise.resolve({
-									data: stream,
-									response: {
-										headers: {
-											get: (name: string) =>
-												name === "x-completion-request-id" ? "test-request-id" : null,
-										},
-									},
-								}),
-							)
-							result.withResponse = mockWithResponse
-						}
-
-						return result
-					},
-				},
-			},
-		})),
-	}
-})
-
-describe("GlamaHandler", () => {
-	let handler: GlamaHandler
-	let mockOptions: ApiHandlerOptions
-
-	beforeEach(() => {
-		mockOptions = {
-			glamaApiKey: "test-api-key",
-			glamaModelId: "anthropic/claude-3-7-sonnet",
-		}
-
-		handler = new GlamaHandler(mockOptions)
-		mockCreate.mockClear()
-		mockWithResponse.mockClear()
-
-		// Default mock implementation for non-streaming responses
-		mockCreate.mockResolvedValue({
-			id: "test-completion",
-			choices: [
-				{
-					message: { role: "assistant", content: "Test response" },
-					finish_reason: "stop",
-					index: 0,
-				},
-			],
-			usage: {
-				prompt_tokens: 10,
-				completion_tokens: 5,
-				total_tokens: 15,
-			},
-		})
-	})
-
-	describe("constructor", () => {
-		it("should initialize with provided options", () => {
-			expect(handler).toBeInstanceOf(GlamaHandler)
-			expect(handler.getModel().id).toBe(mockOptions.glamaModelId)
-		})
-	})
-
-	describe("createMessage", () => {
-		const systemPrompt = "You are a helpful assistant."
-		const messages: Anthropic.Messages.MessageParam[] = [
-			{
-				role: "user",
-				content: "Hello!",
-			},
-		]
-
-		it("should handle streaming responses", async () => {
-			const stream = handler.createMessage(systemPrompt, messages)
-			const chunks: any[] = []
-
-			for await (const chunk of stream) {
-				chunks.push(chunk)
-			}
-
-			expect(chunks.length).toBe(1)
-			expect(chunks[0]).toEqual({ type: "text", text: "Test response" })
-		})
-
-		it("should handle API errors", async () => {
-			mockCreate.mockImplementationOnce(() => {
-				throw new Error("API Error")
-			})
-
-			const stream = handler.createMessage(systemPrompt, messages)
-			const chunks = []
-
-			try {
-				for await (const chunk of stream) {
-					chunks.push(chunk)
-				}
-				expect.fail("Expected error to be thrown")
-			} catch (error) {
-				expect(error).toBeInstanceOf(Error)
-				expect(error.message).toBe("API Error")
-			}
-		})
-	})
-
-	describe("completePrompt", () => {
-		it("should complete prompt successfully", async () => {
-			const result = await handler.completePrompt("Test prompt")
-			expect(result).toBe("Test response")
-			expect(mockCreate).toHaveBeenCalledWith(
-				expect.objectContaining({
-					model: mockOptions.glamaModelId,
-					messages: [{ role: "user", content: "Test prompt" }],
-					temperature: 0,
-					max_tokens: 8192,
-				}),
-			)
-		})
-
-		it("should handle API errors", async () => {
-			mockCreate.mockRejectedValueOnce(new Error("API Error"))
-			await expect(handler.completePrompt("Test prompt")).rejects.toThrow("Glama completion error: API Error")
-		})
-
-		it("should handle empty response", async () => {
-			mockCreate.mockResolvedValueOnce({
-				choices: [{ message: { content: "" } }],
-			})
-			const result = await handler.completePrompt("Test prompt")
-			expect(result).toBe("")
-		})
-
-		it("should not set max_tokens for non-Anthropic models", async () => {
-			// Reset mock to clear any previous calls
-			mockCreate.mockClear()
-
-			const nonAnthropicOptions = {
-				glamaApiKey: "test-key",
-				glamaModelId: "openai/gpt-4o",
-			}
-
-			const nonAnthropicHandler = new GlamaHandler(nonAnthropicOptions)
-
-			await nonAnthropicHandler.completePrompt("Test prompt")
-			expect(mockCreate).toHaveBeenCalledWith(
-				expect.objectContaining({
-					model: "openai/gpt-4o",
-					messages: [{ role: "user", content: "Test prompt" }],
-					temperature: 0,
-				}),
-			)
-			expect(mockCreate.mock.calls[0][0]).not.toHaveProperty("max_tokens")
-		})
-	})
-
-	describe("fetchModel", () => {
-		it("should return model info", async () => {
-			const modelInfo = await handler.fetchModel()
-			expect(modelInfo.id).toBe(mockOptions.glamaModelId)
-			expect(modelInfo.info).toBeDefined()
-			expect(modelInfo.info.maxTokens).toBe(8192)
-			expect(modelInfo.info.contextWindow).toBe(200_000)
-		})
-
-		it("should return default model when invalid model provided", async () => {
-			const handlerWithInvalidModel = new GlamaHandler({ ...mockOptions, glamaModelId: "invalid/model" })
-			const modelInfo = await handlerWithInvalidModel.fetchModel()
-			expect(modelInfo.id).toBe("anthropic/claude-3-7-sonnet")
-			expect(modelInfo.info).toBeDefined()
-		})
-	})
-})

+ 1 - 21
src/api/providers/fetchers/__tests__/modelCache.spec.ts

@@ -41,7 +41,6 @@ vi.mock("fs", () => ({
 vi.mock("../litellm")
 vi.mock("../openrouter")
 vi.mock("../requesty")
-vi.mock("../glama")
 vi.mock("../unbound")
 vi.mock("../io-intelligence")
 
@@ -64,14 +63,12 @@ import { getModels, getModelsFromCache } from "../modelCache"
 import { getLiteLLMModels } from "../litellm"
 import { getOpenRouterModels } from "../openrouter"
 import { getRequestyModels } from "../requesty"
-import { getGlamaModels } from "../glama"
 import { getUnboundModels } from "../unbound"
 import { getIOIntelligenceModels } from "../io-intelligence"
 
 const mockGetLiteLLMModels = getLiteLLMModels as Mock<typeof getLiteLLMModels>
 const mockGetOpenRouterModels = getOpenRouterModels as Mock<typeof getOpenRouterModels>
 const mockGetRequestyModels = getRequestyModels as Mock<typeof getRequestyModels>
-const mockGetGlamaModels = getGlamaModels as Mock<typeof getGlamaModels>
 const mockGetUnboundModels = getUnboundModels as Mock<typeof getUnboundModels>
 const mockGetIOIntelligenceModels = getIOIntelligenceModels as Mock<typeof getIOIntelligenceModels>
 
@@ -139,23 +136,6 @@ describe("getModels with new GetModelsOptions", () => {
 		expect(result).toEqual(mockModels)
 	})
 
-	it("calls getGlamaModels for glama provider", async () => {
-		const mockModels = {
-			"glama/model": {
-				maxTokens: 4096,
-				contextWindow: 8192,
-				supportsPromptCache: false,
-				description: "Glama model",
-			},
-		}
-		mockGetGlamaModels.mockResolvedValue(mockModels)
-
-		const result = await getModels({ provider: "glama" })
-
-		expect(mockGetGlamaModels).toHaveBeenCalled()
-		expect(result).toEqual(mockModels)
-	})
-
 	it("calls getUnboundModels with optional API key", async () => {
 		const mockModels = {
 			"unbound/model": {
@@ -302,7 +282,7 @@ describe("getModelsFromCache disk fallback", () => {
 
 		const consoleErrorSpy = vi.spyOn(console, "error").mockImplementation(() => {})
 
-		const result = getModelsFromCache("glama")
+		const result = getModelsFromCache("openrouter")
 
 		expect(result).toBeUndefined()
 		expect(consoleErrorSpy).toHaveBeenCalled()

+ 0 - 42
src/api/providers/fetchers/glama.ts

@@ -1,42 +0,0 @@
-import axios from "axios"
-
-import type { ModelInfo } from "@roo-code/types"
-
-import { parseApiPrice } from "../../../shared/cost"
-
-export async function getGlamaModels(): Promise<Record<string, ModelInfo>> {
-	const models: Record<string, ModelInfo> = {}
-
-	try {
-		const response = await axios.get("https://glama.ai/api/gateway/v1/models")
-		const rawModels = response.data
-
-		for (const rawModel of rawModels) {
-			const modelInfo: ModelInfo = {
-				maxTokens: rawModel.maxTokensOutput,
-				contextWindow: rawModel.maxTokensInput,
-				supportsImages: rawModel.capabilities?.includes("input:image"),
-				supportsPromptCache: rawModel.capabilities?.includes("caching"),
-				inputPrice: parseApiPrice(rawModel.pricePerToken?.input),
-				outputPrice: parseApiPrice(rawModel.pricePerToken?.output),
-				description: undefined,
-				cacheWritesPrice: parseApiPrice(rawModel.pricePerToken?.cacheWrite),
-				cacheReadsPrice: parseApiPrice(rawModel.pricePerToken?.cacheRead),
-			}
-
-			switch (rawModel.id) {
-				case rawModel.id.startsWith("anthropic/"):
-					modelInfo.maxTokens = 8192
-					break
-				default:
-					break
-			}
-
-			models[rawModel.id] = modelInfo
-		}
-	} catch (error) {
-		console.error(`Error fetching Glama models: ${JSON.stringify(error, Object.getOwnPropertyNames(error), 2)}`)
-	}
-
-	return models
-}

+ 0 - 5
src/api/providers/fetchers/modelCache.ts

@@ -19,7 +19,6 @@ import { fileExistsAtPath } from "../../../utils/fs"
 import { getOpenRouterModels } from "./openrouter"
 import { getVercelAiGatewayModels } from "./vercel-ai-gateway"
 import { getRequestyModels } from "./requesty"
-import { getGlamaModels } from "./glama"
 import { getUnboundModels } from "./unbound"
 import { getLiteLLMModels } from "./litellm"
 import { GetModelsOptions } from "../../../shared/api"
@@ -74,9 +73,6 @@ async function fetchModelsFromProvider(options: GetModelsOptions): Promise<Model
 			// Requesty models endpoint requires an API key for per-user custom policies.
 			models = await getRequestyModels(options.baseUrl, options.apiKey)
 			break
-		case "glama":
-			models = await getGlamaModels()
-			break
 		case "unbound":
 			// Unbound models endpoint requires an API key to fetch application specific models.
 			models = await getUnboundModels(options.apiKey)
@@ -252,7 +248,6 @@ export async function initializeModelCacheRefresh(): Promise<void> {
 		// Providers that work without API keys
 		const publicProviders: Array<{ provider: RouterName; options: GetModelsOptions }> = [
 			{ provider: "openrouter", options: { provider: "openrouter" } },
-			{ provider: "glama", options: { provider: "glama" } },
 			{ provider: "vercel-ai-gateway", options: { provider: "vercel-ai-gateway" } },
 			{ provider: "chutes", options: { provider: "chutes" } },
 		]

+ 0 - 146
src/api/providers/glama.ts

@@ -1,146 +0,0 @@
-import { Anthropic } from "@anthropic-ai/sdk"
-import axios from "axios"
-import OpenAI from "openai"
-
-import { glamaDefaultModelId, glamaDefaultModelInfo, GLAMA_DEFAULT_TEMPERATURE } from "@roo-code/types"
-
-import { Package } from "../../shared/package"
-import { ApiHandlerOptions } from "../../shared/api"
-
-import { ApiStream } from "../transform/stream"
-import { convertToOpenAiMessages } from "../transform/openai-format"
-import { addCacheBreakpoints } from "../transform/caching/anthropic"
-
-import type { SingleCompletionHandler, ApiHandlerCreateMessageMetadata } from "../index"
-import { RouterProvider } from "./router-provider"
-
-const DEFAULT_HEADERS = {
-	"X-Glama-Metadata": JSON.stringify({
-		labels: [{ key: "app", value: `vscode.${Package.publisher}.${Package.name}` }],
-	}),
-}
-
-export class GlamaHandler extends RouterProvider implements SingleCompletionHandler {
-	constructor(options: ApiHandlerOptions) {
-		super({
-			options,
-			name: "glama",
-			baseURL: "https://glama.ai/api/gateway/openai/v1",
-			apiKey: options.glamaApiKey,
-			modelId: options.glamaModelId,
-			defaultModelId: glamaDefaultModelId,
-			defaultModelInfo: glamaDefaultModelInfo,
-		})
-	}
-
-	override async *createMessage(
-		systemPrompt: string,
-		messages: Anthropic.Messages.MessageParam[],
-		metadata?: ApiHandlerCreateMessageMetadata,
-	): ApiStream {
-		const { id: modelId, info } = await this.fetchModel()
-
-		const openAiMessages: OpenAI.Chat.ChatCompletionMessageParam[] = [
-			{ role: "system", content: systemPrompt },
-			...convertToOpenAiMessages(messages),
-		]
-
-		if (modelId.startsWith("anthropic/claude-3")) {
-			addCacheBreakpoints(systemPrompt, openAiMessages)
-		}
-
-		// Required by Anthropic; other providers default to max tokens allowed.
-		let maxTokens: number | undefined
-
-		if (modelId.startsWith("anthropic/")) {
-			maxTokens = info.maxTokens ?? undefined
-		}
-
-		const requestOptions: OpenAI.Chat.ChatCompletionCreateParams = {
-			model: modelId,
-			max_tokens: maxTokens,
-			messages: openAiMessages,
-			stream: true,
-		}
-
-		if (this.supportsTemperature(modelId)) {
-			requestOptions.temperature = this.options.modelTemperature ?? GLAMA_DEFAULT_TEMPERATURE
-		}
-
-		const { data: completion, response } = await this.client.chat.completions
-			.create(requestOptions, { headers: DEFAULT_HEADERS })
-			.withResponse()
-
-		const completionRequestId = response.headers.get("x-completion-request-id")
-
-		for await (const chunk of completion) {
-			const delta = chunk.choices[0]?.delta
-
-			if (delta?.content) {
-				yield { type: "text", text: delta.content }
-			}
-		}
-
-		try {
-			let attempt = 0
-
-			const delay = (ms: number) => new Promise((resolve) => setTimeout(resolve, ms))
-
-			while (attempt++ < 10) {
-				// In case of an interrupted request, we need to wait for the upstream API to finish processing the request
-				// before we can fetch information about the token usage and cost.
-				const response = await axios.get(
-					`https://glama.ai/api/gateway/v1/completion-requests/${completionRequestId}`,
-					{ headers: { Authorization: `Bearer ${this.options.glamaApiKey}` } },
-				)
-
-				const completionRequest = response.data
-
-				if (completionRequest.tokenUsage && completionRequest.totalCostUsd) {
-					yield {
-						type: "usage",
-						cacheWriteTokens: completionRequest.tokenUsage.cacheCreationInputTokens,
-						cacheReadTokens: completionRequest.tokenUsage.cacheReadInputTokens,
-						inputTokens: completionRequest.tokenUsage.promptTokens,
-						outputTokens: completionRequest.tokenUsage.completionTokens,
-						totalCost: parseFloat(completionRequest.totalCostUsd),
-					}
-
-					break
-				}
-
-				await delay(200)
-			}
-		} catch (error) {
-			console.error("Error fetching Glama completion details", error)
-		}
-	}
-
-	async completePrompt(prompt: string): Promise<string> {
-		const { id: modelId, info } = await this.fetchModel()
-
-		try {
-			const requestOptions: OpenAI.Chat.Completions.ChatCompletionCreateParamsNonStreaming = {
-				model: modelId,
-				messages: [{ role: "user", content: prompt }],
-			}
-
-			if (this.supportsTemperature(modelId)) {
-				requestOptions.temperature = this.options.modelTemperature ?? GLAMA_DEFAULT_TEMPERATURE
-			}
-
-			if (modelId.startsWith("anthropic/")) {
-				requestOptions.max_tokens = info.maxTokens
-			}
-
-			const response = await this.client.chat.completions.create(requestOptions)
-			return response.choices[0]?.message.content || ""
-		} catch (error) {
-			if (error instanceof Error) {
-				throw new Error(`Glama completion error: ${error.message}`)
-			}
-
-			throw error
-		}
-	}
-}

+ 0 - 1
src/api/providers/index.ts

@@ -9,7 +9,6 @@ export { DoubaoHandler } from "./doubao"
 export { MoonshotHandler } from "./moonshot"
 export { FakeAIHandler } from "./fake-ai"
 export { GeminiHandler } from "./gemini"
-export { GlamaHandler } from "./glama"
 export { GroqHandler } from "./groq"
 export { HuggingFaceHandler } from "./huggingface"
 export { HumanRelayHandler } from "./human-relay"

+ 0 - 34
src/core/webview/ClineProvider.ts

@@ -37,7 +37,6 @@ import {
 	RooCodeEventName,
 	requestyDefaultModelId,
 	openRouterDefaultModelId,
-	glamaDefaultModelId,
 	DEFAULT_TERMINAL_OUTPUT_CHARACTER_LIMIT,
 	DEFAULT_WRITE_DELAY_MS,
 	ORGANIZATION_ALLOW_ALL,
@@ -1526,39 +1525,6 @@ export class ClineProvider
 		await this.upsertProviderProfile(currentApiConfigName, newConfiguration)
 	}
 
-	// Glama
-
-	async handleGlamaCallback(code: string) {
-		let apiKey: string
-
-		try {
-			const response = await axios.post("https://glama.ai/api/gateway/v1/auth/exchange-code", { code })
-
-			if (response.data && response.data.apiKey) {
-				apiKey = response.data.apiKey
-			} else {
-				throw new Error("Invalid response from Glama API")
-			}
-		} catch (error) {
-			this.log(
-				`Error exchanging code for API key: ${JSON.stringify(error, Object.getOwnPropertyNames(error), 2)}`,
-			)
-
-			throw error
-		}
-
-		const { apiConfiguration, currentApiConfigName = "default" } = await this.getState()
-
-		const newConfiguration: ProviderSettings = {
-			...apiConfiguration,
-			apiProvider: "glama",
-			glamaApiKey: apiKey,
-			glamaModelId: apiConfiguration?.glamaModelId || glamaDefaultModelId,
-		}
-
-		await this.upsertProviderProfile(currentApiConfigName, newConfiguration)
-	}
-
 	// Requesty
 
 	async handleRequestyCallback(code: string, baseUrl: string | null) {

+ 0 - 1
src/core/webview/__tests__/ClineProvider.apiHandlerRebuild.spec.ts

@@ -567,7 +567,6 @@ describe("ClineProvider - API Handler Rebuild Guard", () => {
 				"claude-3-5-sonnet-20241022",
 			)
 			expect(getModelId({ apiProvider: "openai", openAiModelId: "gpt-4-turbo" })).toBe("gpt-4-turbo")
-			expect(getModelId({ apiProvider: "glama", glamaModelId: "some-model" })).toBe("some-model")
 			expect(getModelId({ apiProvider: "bedrock", apiModelId: "anthropic.claude-v2" })).toBe(
 				"anthropic.claude-v2",
 			)

+ 0 - 9
src/core/webview/__tests__/ClineProvider.spec.ts

@@ -2668,7 +2668,6 @@ describe("ClineProvider - Router Models", () => {
 			apiConfiguration: {
 				openRouterApiKey: "openrouter-key",
 				requestyApiKey: "requesty-key",
-				glamaApiKey: "glama-key",
 				unboundApiKey: "unbound-key",
 				litellmApiKey: "litellm-key",
 				litellmBaseUrl: "http://localhost:4000",
@@ -2698,7 +2697,6 @@ describe("ClineProvider - Router Models", () => {
 		// Verify getModels was called for each provider with correct options
 		expect(getModels).toHaveBeenCalledWith({ provider: "openrouter" })
 		expect(getModels).toHaveBeenCalledWith({ provider: "requesty", apiKey: "requesty-key" })
-		expect(getModels).toHaveBeenCalledWith({ provider: "glama" })
 		expect(getModels).toHaveBeenCalledWith({ provider: "unbound", apiKey: "unbound-key" })
 		expect(getModels).toHaveBeenCalledWith({ provider: "vercel-ai-gateway" })
 		expect(getModels).toHaveBeenCalledWith({ provider: "deepinfra" })
@@ -2722,7 +2720,6 @@ describe("ClineProvider - Router Models", () => {
 				deepinfra: mockModels,
 				openrouter: mockModels,
 				requesty: mockModels,
-				glama: mockModels,
 				unbound: mockModels,
 				roo: mockModels,
 				chutes: mockModels,
@@ -2745,7 +2742,6 @@ describe("ClineProvider - Router Models", () => {
 			apiConfiguration: {
 				openRouterApiKey: "openrouter-key",
 				requestyApiKey: "requesty-key",
-				glamaApiKey: "glama-key",
 				unboundApiKey: "unbound-key",
 				litellmApiKey: "litellm-key",
 				litellmBaseUrl: "http://localhost:4000",
@@ -2761,7 +2757,6 @@ describe("ClineProvider - Router Models", () => {
 		vi.mocked(getModels)
 			.mockResolvedValueOnce(mockModels) // openrouter success
 			.mockRejectedValueOnce(new Error("Requesty API error")) // requesty fail
-			.mockResolvedValueOnce(mockModels) // glama success
 			.mockRejectedValueOnce(new Error("Unbound API error")) // unbound fail
 			.mockResolvedValueOnce(mockModels) // vercel-ai-gateway success
 			.mockResolvedValueOnce(mockModels) // deepinfra success
@@ -2778,7 +2773,6 @@ describe("ClineProvider - Router Models", () => {
 				deepinfra: mockModels,
 				openrouter: mockModels,
 				requesty: {},
-				glama: mockModels,
 				unbound: {},
 				roo: mockModels,
 				chutes: {},
@@ -2838,7 +2832,6 @@ describe("ClineProvider - Router Models", () => {
 			apiConfiguration: {
 				openRouterApiKey: "openrouter-key",
 				requestyApiKey: "requesty-key",
-				glamaApiKey: "glama-key",
 				unboundApiKey: "unbound-key",
 				// No litellm config
 			},
@@ -2874,7 +2867,6 @@ describe("ClineProvider - Router Models", () => {
 			apiConfiguration: {
 				openRouterApiKey: "openrouter-key",
 				requestyApiKey: "requesty-key",
-				glamaApiKey: "glama-key",
 				unboundApiKey: "unbound-key",
 				// No litellm config
 			},
@@ -2902,7 +2894,6 @@ describe("ClineProvider - Router Models", () => {
 				deepinfra: mockModels,
 				openrouter: mockModels,
 				requesty: mockModels,
-				glama: mockModels,
 				unbound: mockModels,
 				roo: mockModels,
 				chutes: mockModels,

+ 0 - 2
src/core/webview/__tests__/webviewMessageHandler.routerModels.spec.ts

@@ -76,8 +76,6 @@ describe("webviewMessageHandler - requestRouterModels provider filter", () => {
 					return { "requesty/model": { contextWindow: 8192, supportsPromptCache: false } }
 				case "deepinfra":
 					return { "deepinfra/model": { contextWindow: 8192, supportsPromptCache: false } }
-				case "glama":
-					return { "glama/model": { contextWindow: 8192, supportsPromptCache: false } }
 				case "unbound":
 					return { "unbound/model": { contextWindow: 8192, supportsPromptCache: false } }
 				case "vercel-ai-gateway":

+ 0 - 16
src/core/webview/__tests__/webviewMessageHandler.spec.ts

@@ -187,7 +187,6 @@ describe("webviewMessageHandler - requestRouterModels", () => {
 			apiConfiguration: {
 				openRouterApiKey: "openrouter-key",
 				requestyApiKey: "requesty-key",
-				glamaApiKey: "glama-key",
 				unboundApiKey: "unbound-key",
 				litellmApiKey: "litellm-key",
 				litellmBaseUrl: "http://localhost:4000",
@@ -220,7 +219,6 @@ describe("webviewMessageHandler - requestRouterModels", () => {
 		// Verify getModels was called for each provider
 		expect(mockGetModels).toHaveBeenCalledWith({ provider: "openrouter" })
 		expect(mockGetModels).toHaveBeenCalledWith({ provider: "requesty", apiKey: "requesty-key" })
-		expect(mockGetModels).toHaveBeenCalledWith({ provider: "glama" })
 		expect(mockGetModels).toHaveBeenCalledWith({ provider: "unbound", apiKey: "unbound-key" })
 		expect(mockGetModels).toHaveBeenCalledWith({ provider: "vercel-ai-gateway" })
 		expect(mockGetModels).toHaveBeenCalledWith({ provider: "deepinfra" })
@@ -245,7 +243,6 @@ describe("webviewMessageHandler - requestRouterModels", () => {
 				deepinfra: mockModels,
 				openrouter: mockModels,
 				requesty: mockModels,
-				glama: mockModels,
 				unbound: mockModels,
 				litellm: mockModels,
 				roo: mockModels,
@@ -265,7 +262,6 @@ describe("webviewMessageHandler - requestRouterModels", () => {
 			apiConfiguration: {
 				openRouterApiKey: "openrouter-key",
 				requestyApiKey: "requesty-key",
-				glamaApiKey: "glama-key",
 				unboundApiKey: "unbound-key",
 				// Missing litellm config
 			},
@@ -303,7 +299,6 @@ describe("webviewMessageHandler - requestRouterModels", () => {
 			apiConfiguration: {
 				openRouterApiKey: "openrouter-key",
 				requestyApiKey: "requesty-key",
-				glamaApiKey: "glama-key",
 				unboundApiKey: "unbound-key",
 				// Missing litellm config
 			},
@@ -339,7 +334,6 @@ describe("webviewMessageHandler - requestRouterModels", () => {
 				deepinfra: mockModels,
 				openrouter: mockModels,
 				requesty: mockModels,
-				glama: mockModels,
 				unbound: mockModels,
 				roo: mockModels,
 				chutes: mockModels,
@@ -368,7 +362,6 @@ describe("webviewMessageHandler - requestRouterModels", () => {
 		mockGetModels
 			.mockResolvedValueOnce(mockModels) // openrouter
 			.mockRejectedValueOnce(new Error("Requesty API error")) // requesty
-			.mockResolvedValueOnce(mockModels) // glama
 			.mockRejectedValueOnce(new Error("Unbound API error")) // unbound
 			.mockResolvedValueOnce(mockModels) // vercel-ai-gateway
 			.mockResolvedValueOnce(mockModels) // deepinfra
@@ -416,7 +409,6 @@ describe("webviewMessageHandler - requestRouterModels", () => {
 				deepinfra: mockModels,
 				openrouter: mockModels,
 				requesty: {},
-				glama: mockModels,
 				unbound: {},
 				roo: mockModels,
 				chutes: {},
@@ -436,7 +428,6 @@ describe("webviewMessageHandler - requestRouterModels", () => {
 		mockGetModels
 			.mockRejectedValueOnce(new Error("Structured error message")) // openrouter
 			.mockRejectedValueOnce(new Error("Requesty API error")) // requesty
-			.mockRejectedValueOnce(new Error("Glama API error")) // glama
 			.mockRejectedValueOnce(new Error("Unbound API error")) // unbound
 			.mockRejectedValueOnce(new Error("Vercel AI Gateway error")) // vercel-ai-gateway
 			.mockRejectedValueOnce(new Error("DeepInfra API error")) // deepinfra
@@ -463,13 +454,6 @@ describe("webviewMessageHandler - requestRouterModels", () => {
 			values: { provider: "requesty" },
 		})
 
-		expect(mockClineProvider.postMessageToWebview).toHaveBeenCalledWith({
-			type: "singleRouterModelFetchResponse",
-			success: false,
-			error: "Glama API error",
-			values: { provider: "glama" },
-		})
-
 		expect(mockClineProvider.postMessageToWebview).toHaveBeenCalledWith({
 			type: "singleRouterModelFetchResponse",
 			success: false,

+ 0 - 2
src/core/webview/webviewMessageHandler.ts

@@ -797,7 +797,6 @@ export const webviewMessageHandler = async (
 						"io-intelligence": {},
 						requesty: {},
 						unbound: {},
-						glama: {},
 						ollama: {},
 						lmstudio: {},
 						roo: {},
@@ -828,7 +827,6 @@ export const webviewMessageHandler = async (
 						baseUrl: apiConfiguration.requestyBaseUrl,
 					},
 				},
-				{ key: "glama", options: { provider: "glama" } },
 				{ key: "unbound", options: { provider: "unbound", apiKey: apiConfiguration.unboundApiKey } },
 				{ key: "vercel-ai-gateway", options: { provider: "vercel-ai-gateway" } },
 				{

+ 0 - 2
src/shared/ProfileValidator.ts

@@ -82,8 +82,6 @@ export class ProfileValidator {
 				return profile.vsCodeLmModelSelector?.id
 			case "openrouter":
 				return profile.openRouterModelId
-			case "glama":
-				return profile.glamaModelId
 			case "ollama":
 				return profile.ollamaModelId
 			case "requesty":

+ 0 - 15
src/shared/__tests__/ProfileValidator.spec.ts

@@ -306,21 +306,6 @@ describe("ProfileValidator", () => {
 			expect(ProfileValidator.isProfileAllowed(profile, allowList)).toBe(true)
 		})
 
-		it("should extract glamaModelId for glama provider", () => {
-			const allowList: OrganizationAllowList = {
-				allowAll: false,
-				providers: {
-					glama: { allowAll: false, models: ["glama-model"] },
-				},
-			}
-			const profile: ProviderSettings = {
-				apiProvider: "glama",
-				glamaModelId: "glama-model",
-			}
-
-			expect(ProfileValidator.isProfileAllowed(profile, allowList)).toBe(true)
-		})
-
 		it("should extract requestyModelId for requesty provider", () => {
 			const allowList: OrganizationAllowList = {
 				allowAll: false,

+ 0 - 2
src/shared/__tests__/checkExistApiConfig.spec.ts

@@ -24,7 +24,6 @@ describe("checkExistKey", () => {
 	it("should return true when multiple keys are defined", () => {
 		const config: ProviderSettings = {
 			apiKey: "test-key",
-			glamaApiKey: "glama-key",
 			openRouterApiKey: "openrouter-key",
 		}
 		expect(checkExistKey(config)).toBe(true)
@@ -43,7 +42,6 @@ describe("checkExistKey", () => {
 	it("should return false when all key fields are undefined", () => {
 		const config: ProviderSettings = {
 			apiKey: undefined,
-			glamaApiKey: undefined,
 			openRouterApiKey: undefined,
 			awsRegion: undefined,
 			vertexProjectId: undefined,

+ 0 - 1
src/shared/api.ts

@@ -189,7 +189,6 @@ const dynamicProviderExtras = {
 	"io-intelligence": {} as { apiKey: string },
 	requesty: {} as { apiKey?: string; baseUrl?: string },
 	unbound: {} as { apiKey?: string },
-	glama: {} as {}, // eslint-disable-line @typescript-eslint/no-empty-object-type
 	ollama: {} as {}, // eslint-disable-line @typescript-eslint/no-empty-object-type
 	lmstudio: {} as {}, // eslint-disable-line @typescript-eslint/no-empty-object-type
 	roo: {} as { apiKey?: string; baseUrl?: string },

+ 1 - 16
webview-ui/src/components/settings/ApiOptions.tsx

@@ -10,7 +10,6 @@ import {
 	DEFAULT_CONSECUTIVE_MISTAKE_LIMIT,
 	openRouterDefaultModelId,
 	requestyDefaultModelId,
-	glamaDefaultModelId,
 	unboundDefaultModelId,
 	litellmDefaultModelId,
 	openAiNativeDefaultModelId,
@@ -76,7 +75,6 @@ import {
 	DeepSeek,
 	Doubao,
 	Gemini,
-	Glama,
 	Groq,
 	HuggingFace,
 	IOIntelligence,
@@ -306,7 +304,7 @@ const ApiOptions = ({
 
 			// It would be much easier to have a single attribute that stores
 			// the modelId, but we have a separate attribute for each of
-			// OpenRouter, Glama, Unbound, and Requesty.
+			// OpenRouter, Unbound, and Requesty.
 			// If you switch to one of these providers and the corresponding
 			// modelId is not set then you immediately end up in an error state.
 			// To address that we set the modelId to the default value for th
@@ -340,7 +338,6 @@ const ApiOptions = ({
 			> = {
 				deepinfra: { field: "deepInfraModelId", default: deepInfraDefaultModelId },
 				openrouter: { field: "openRouterModelId", default: openRouterDefaultModelId },
-				glama: { field: "glamaModelId", default: glamaDefaultModelId },
 				unbound: { field: "unboundModelId", default: unboundDefaultModelId },
 				requesty: { field: "requestyModelId", default: requestyDefaultModelId },
 				litellm: { field: "litellmModelId", default: litellmDefaultModelId },
@@ -534,18 +531,6 @@ const ApiOptions = ({
 				/>
 			)}
 
-			{selectedProvider === "glama" && (
-				<Glama
-					apiConfiguration={apiConfiguration}
-					setApiConfigurationField={setApiConfigurationField}
-					routerModels={routerModels}
-					uriScheme={uriScheme}
-					organizationAllowList={organizationAllowList}
-					modelValidationError={modelValidationError}
-					simplifySettings={fromWelcomeView}
-				/>
-			)}
-
 			{selectedProvider === "unbound" && (
 				<Unbound
 					apiConfiguration={apiConfiguration}

+ 0 - 1
webview-ui/src/components/settings/ModelPicker.tsx

@@ -28,7 +28,6 @@ import { ApiErrorMessage } from "./ApiErrorMessage"
 
 type ModelIdKey = keyof Pick<
 	ProviderSettings,
-	| "glamaModelId"
 	| "openRouterModelId"
 	| "unboundModelId"
 	| "requestyModelId"

+ 0 - 1
webview-ui/src/components/settings/__tests__/ApiOptions.provider-filtering.spec.tsx

@@ -156,7 +156,6 @@ describe("ApiOptions Provider Filtering", () => {
 		expect(providerValues).toContain("ollama")
 		expect(providerValues).toContain("lmstudio")
 		expect(providerValues).toContain("litellm")
-		expect(providerValues).toContain("glama")
 		expect(providerValues).toContain("unbound")
 		expect(providerValues).toContain("requesty")
 		expect(providerValues).toContain("io-intelligence")

+ 1 - 1
webview-ui/src/components/settings/__tests__/ModelPicker.spec.tsx

@@ -36,7 +36,7 @@ describe("ModelPicker", () => {
 	const defaultProps = {
 		apiConfiguration: {},
 		defaultModelId: "model1",
-		modelIdKey: "glamaModelId" as const,
+		modelIdKey: "openRouterModelId" as const,
 		serviceName: "Test Service",
 		serviceUrl: "https://test.service",
 		recommendedModel: "recommended-model",

+ 0 - 1
webview-ui/src/components/settings/constants.ts

@@ -61,7 +61,6 @@ export const PROVIDERS = [
 	{ value: "qwen-code", label: "Qwen Code" },
 	{ value: "vertex", label: "GCP Vertex AI" },
 	{ value: "bedrock", label: "Amazon Bedrock" },
-	{ value: "glama", label: "Glama" },
 	{ value: "vscode-lm", label: "VS Code LM API" },
 	{ value: "mistral", label: "Mistral" },
 	{ value: "lmstudio", label: "LM Studio" },

+ 0 - 79
webview-ui/src/components/settings/providers/Glama.tsx

@@ -1,79 +0,0 @@
-import { useCallback } from "react"
-import { VSCodeTextField } from "@vscode/webview-ui-toolkit/react"
-
-import { type ProviderSettings, type OrganizationAllowList, glamaDefaultModelId } from "@roo-code/types"
-
-import type { RouterModels } from "@roo/api"
-
-import { useAppTranslation } from "@src/i18n/TranslationContext"
-import { getGlamaAuthUrl } from "@src/oauth/urls"
-import { VSCodeButtonLink } from "@src/components/common/VSCodeButtonLink"
-
-import { inputEventTransform } from "../transforms"
-import { ModelPicker } from "../ModelPicker"
-
-type GlamaProps = {
-	apiConfiguration: ProviderSettings
-	setApiConfigurationField: (field: keyof ProviderSettings, value: ProviderSettings[keyof ProviderSettings]) => void
-	routerModels?: RouterModels
-	uriScheme?: string
-	organizationAllowList: OrganizationAllowList
-	modelValidationError?: string
-	simplifySettings?: boolean
-}
-
-export const Glama = ({
-	apiConfiguration,
-	setApiConfigurationField,
-	routerModels,
-	uriScheme,
-	organizationAllowList,
-	modelValidationError,
-	simplifySettings,
-}: GlamaProps) => {
-	const { t } = useAppTranslation()
-
-	const handleInputChange = useCallback(
-		<K extends keyof ProviderSettings, E>(
-			field: K,
-			transform: (event: E) => ProviderSettings[K] = inputEventTransform,
-		) =>
-			(event: E | Event) => {
-				setApiConfigurationField(field, transform(event as E))
-			},
-		[setApiConfigurationField],
-	)
-
-	return (
-		<>
-			<VSCodeTextField
-				value={apiConfiguration?.glamaApiKey || ""}
-				type="password"
-				onInput={handleInputChange("glamaApiKey")}
-				placeholder={t("settings:placeholders.apiKey")}
-				className="w-full">
-				<label className="block font-medium mb-1">{t("settings:providers.glamaApiKey")}</label>
-			</VSCodeTextField>
-			<div className="text-sm text-vscode-descriptionForeground -mt-2">
-				{t("settings:providers.apiKeyStorageNotice")}
-			</div>
-			{!apiConfiguration?.glamaApiKey && (
-				<VSCodeButtonLink href={getGlamaAuthUrl(uriScheme)} style={{ width: "100%" }} appearance="primary">
-					{t("settings:providers.getGlamaApiKey")}
-				</VSCodeButtonLink>
-			)}
-			<ModelPicker
-				apiConfiguration={apiConfiguration}
-				setApiConfigurationField={setApiConfigurationField}
-				defaultModelId={glamaDefaultModelId}
-				models={routerModels?.glama ?? {}}
-				modelIdKey="glamaModelId"
-				serviceName="Glama"
-				serviceUrl="https://glama.ai/models"
-				organizationAllowList={organizationAllowList}
-				errorMessage={modelValidationError}
-				simplifySettings={simplifySettings}
-			/>
-		</>
-	)
-}

+ 0 - 1
webview-ui/src/components/settings/providers/index.ts

@@ -6,7 +6,6 @@ export { ClaudeCode } from "./ClaudeCode"
 export { DeepSeek } from "./DeepSeek"
 export { Doubao } from "./Doubao"
 export { Gemini } from "./Gemini"
-export { Glama } from "./Glama"
 export { Groq } from "./Groq"
 export { HuggingFace } from "./HuggingFace"
 export { IOIntelligence } from "./IOIntelligence"

+ 1 - 9
webview-ui/src/components/ui/hooks/__tests__/useSelectedModel.spec.ts

@@ -55,7 +55,6 @@ describe("useSelectedModel", () => {
 						"test-model": baseModelInfo,
 					},
 					requesty: {},
-					glama: {},
 					unbound: {},
 					litellm: {},
 					"io-intelligence": {},
@@ -119,7 +118,6 @@ describe("useSelectedModel", () => {
 						},
 					},
 					requesty: {},
-					glama: {},
 					unbound: {},
 					litellm: {},
 					"io-intelligence": {},
@@ -187,7 +185,6 @@ describe("useSelectedModel", () => {
 						"test-model": baseModelInfo,
 					},
 					requesty: {},
-					glama: {},
 					unbound: {},
 					litellm: {},
 					"io-intelligence": {},
@@ -242,7 +239,6 @@ describe("useSelectedModel", () => {
 				data: {
 					openrouter: { "test-model": baseModelInfo },
 					requesty: {},
-					glama: {},
 					unbound: {},
 					litellm: {},
 					"io-intelligence": {},
@@ -286,7 +282,6 @@ describe("useSelectedModel", () => {
 						},
 					},
 					requesty: {},
-					glama: {},
 					unbound: {},
 					litellm: {},
 					"io-intelligence": {},
@@ -349,7 +344,7 @@ describe("useSelectedModel", () => {
 
 		it("should NOT set loading when openrouter provider metadata is loading but provider is static (anthropic)", () => {
 			mockUseRouterModels.mockReturnValue({
-				data: { openrouter: {}, requesty: {}, glama: {}, unbound: {}, litellm: {}, "io-intelligence": {} },
+				data: { openrouter: {}, requesty: {}, unbound: {}, litellm: {}, "io-intelligence": {} },
 				isLoading: false,
 				isError: false,
 			} as any)
@@ -417,7 +412,6 @@ describe("useSelectedModel", () => {
 				data: {
 					openrouter: {},
 					requesty: {},
-					glama: {},
 					unbound: {},
 					litellm: {},
 					"io-intelligence": {},
@@ -455,7 +449,6 @@ describe("useSelectedModel", () => {
 				data: {
 					openrouter: {},
 					requesty: {},
-					glama: {},
 					unbound: {},
 					litellm: {},
 					"io-intelligence": {},
@@ -490,7 +483,6 @@ describe("useSelectedModel", () => {
 				data: {
 					openrouter: {},
 					requesty: {},
-					glama: {},
 					unbound: {},
 					litellm: {},
 					"io-intelligence": {},

+ 0 - 5
webview-ui/src/components/ui/hooks/useSelectedModel.ts

@@ -157,11 +157,6 @@ function getSelectedModel({
 			const info = routerModels.requesty?.[id]
 			return { id, info }
 		}
-		case "glama": {
-			const id = getValidatedModelId(apiConfiguration.glamaModelId, routerModels.glama, defaultModelId)
-			const info = routerModels.glama?.[id]
-			return { id, info }
-		}
 		case "unbound": {
 			const id = getValidatedModelId(apiConfiguration.unboundModelId, routerModels.unbound, defaultModelId)
 			const info = routerModels.unbound?.[id]

+ 0 - 2
webview-ui/src/i18n/locales/ca/settings.json

@@ -283,8 +283,6 @@
 		"vercelAiGatewayApiKey": "Clau API de Vercel AI Gateway",
 		"getVercelAiGatewayApiKey": "Obtenir clau API de Vercel AI Gateway",
 		"apiKeyStorageNotice": "Les claus API s'emmagatzemen de forma segura a l'Emmagatzematge Secret de VSCode",
-		"glamaApiKey": "Clau API de Glama",
-		"getGlamaApiKey": "Obtenir clau API de Glama",
 		"useCustomBaseUrl": "Utilitzar URL base personalitzada",
 		"useReasoning": "Activar raonament",
 		"useHostHeader": "Utilitzar capçalera Host personalitzada",

+ 0 - 2
webview-ui/src/i18n/locales/de/settings.json

@@ -285,8 +285,6 @@
 		"doubaoApiKey": "Doubao API-Schlüssel",
 		"getDoubaoApiKey": "Doubao API-Schlüssel erhalten",
 		"apiKeyStorageNotice": "API-Schlüssel werden sicher im VSCode Secret Storage gespeichert",
-		"glamaApiKey": "Glama API-Schlüssel",
-		"getGlamaApiKey": "Glama API-Schlüssel erhalten",
 		"useCustomBaseUrl": "Benutzerdefinierte Basis-URL verwenden",
 		"useReasoning": "Reasoning aktivieren",
 		"useHostHeader": "Benutzerdefinierten Host-Header verwenden",

+ 0 - 2
webview-ui/src/i18n/locales/en/settings.json

@@ -288,8 +288,6 @@
 		"vercelAiGatewayApiKey": "Vercel AI Gateway API Key",
 		"getVercelAiGatewayApiKey": "Get Vercel AI Gateway API Key",
 		"apiKeyStorageNotice": "API keys are stored securely in VSCode's Secret Storage",
-		"glamaApiKey": "Glama API Key",
-		"getGlamaApiKey": "Get Glama API Key",
 		"useCustomBaseUrl": "Use custom base URL",
 		"useReasoning": "Enable reasoning",
 		"useHostHeader": "Use custom Host header",

+ 0 - 2
webview-ui/src/i18n/locales/es/settings.json

@@ -283,8 +283,6 @@
 		"vercelAiGatewayApiKey": "Clave API de Vercel AI Gateway",
 		"getVercelAiGatewayApiKey": "Obtener clave API de Vercel AI Gateway",
 		"apiKeyStorageNotice": "Las claves API se almacenan de forma segura en el Almacenamiento Secreto de VSCode",
-		"glamaApiKey": "Clave API de Glama",
-		"getGlamaApiKey": "Obtener clave API de Glama",
 		"useCustomBaseUrl": "Usar URL base personalizada",
 		"useReasoning": "Habilitar razonamiento",
 		"useHostHeader": "Usar encabezado Host personalizado",

+ 0 - 2
webview-ui/src/i18n/locales/fr/settings.json

@@ -283,8 +283,6 @@
 		"vercelAiGatewayApiKey": "Clé API Vercel AI Gateway",
 		"getVercelAiGatewayApiKey": "Obtenir la clé API Vercel AI Gateway",
 		"apiKeyStorageNotice": "Les clés API sont stockées en toute sécurité dans le stockage sécurisé de VSCode",
-		"glamaApiKey": "Clé API Glama",
-		"getGlamaApiKey": "Obtenir la clé API Glama",
 		"useCustomBaseUrl": "Utiliser une URL de base personnalisée",
 		"useReasoning": "Activer le raisonnement",
 		"useHostHeader": "Utiliser un en-tête Host personnalisé",

+ 0 - 2
webview-ui/src/i18n/locales/hi/settings.json

@@ -283,8 +283,6 @@
 		"vercelAiGatewayApiKey": "Vercel AI Gateway API कुंजी",
 		"getVercelAiGatewayApiKey": "Vercel AI Gateway API कुंजी प्राप्त करें",
 		"apiKeyStorageNotice": "API कुंजियाँ VSCode के सुरक्षित स्टोरेज में सुरक्षित रूप से संग्रहीत हैं",
-		"glamaApiKey": "Glama API कुंजी",
-		"getGlamaApiKey": "Glama API कुंजी प्राप्त करें",
 		"useCustomBaseUrl": "कस्टम बेस URL का उपयोग करें",
 		"useReasoning": "तर्क सक्षम करें",
 		"useHostHeader": "कस्टम होस्ट हेडर का उपयोग करें",

+ 0 - 2
webview-ui/src/i18n/locales/id/settings.json

@@ -287,8 +287,6 @@
 		"vercelAiGatewayApiKey": "Vercel AI Gateway API Key",
 		"getVercelAiGatewayApiKey": "Dapatkan Vercel AI Gateway API Key",
 		"apiKeyStorageNotice": "API key disimpan dengan aman di Secret Storage VSCode",
-		"glamaApiKey": "Glama API Key",
-		"getGlamaApiKey": "Dapatkan Glama API Key",
 		"useCustomBaseUrl": "Gunakan base URL kustom",
 		"useReasoning": "Aktifkan reasoning",
 		"useHostHeader": "Gunakan Host header kustom",

+ 0 - 2
webview-ui/src/i18n/locales/it/settings.json

@@ -283,8 +283,6 @@
 		"vercelAiGatewayApiKey": "Chiave API Vercel AI Gateway",
 		"getVercelAiGatewayApiKey": "Ottieni chiave API Vercel AI Gateway",
 		"apiKeyStorageNotice": "Le chiavi API sono memorizzate in modo sicuro nell'Archivio Segreto di VSCode",
-		"glamaApiKey": "Chiave API Glama",
-		"getGlamaApiKey": "Ottieni chiave API Glama",
 		"useCustomBaseUrl": "Usa URL base personalizzato",
 		"useReasoning": "Abilita ragionamento",
 		"useHostHeader": "Usa intestazione Host personalizzata",

+ 0 - 2
webview-ui/src/i18n/locales/ja/settings.json

@@ -283,8 +283,6 @@
 		"vercelAiGatewayApiKey": "Vercel AI Gateway APIキー",
 		"getVercelAiGatewayApiKey": "Vercel AI Gateway APIキーを取得",
 		"apiKeyStorageNotice": "APIキーはVSCodeのシークレットストレージに安全に保存されます",
-		"glamaApiKey": "Glama APIキー",
-		"getGlamaApiKey": "Glama APIキーを取得",
 		"useCustomBaseUrl": "カスタムベースURLを使用",
 		"useReasoning": "推論を有効化",
 		"useHostHeader": "カスタムHostヘッダーを使用",

+ 0 - 2
webview-ui/src/i18n/locales/ko/settings.json

@@ -283,8 +283,6 @@
 		"vercelAiGatewayApiKey": "Vercel AI Gateway API 키",
 		"getVercelAiGatewayApiKey": "Vercel AI Gateway API 키 받기",
 		"apiKeyStorageNotice": "API 키는 VSCode의 보안 저장소에 안전하게 저장됩니다",
-		"glamaApiKey": "Glama API 키",
-		"getGlamaApiKey": "Glama API 키 받기",
 		"useCustomBaseUrl": "사용자 정의 기본 URL 사용",
 		"useReasoning": "추론 활성화",
 		"useHostHeader": "사용자 정의 Host 헤더 사용",

+ 0 - 2
webview-ui/src/i18n/locales/nl/settings.json

@@ -283,8 +283,6 @@
 		"vercelAiGatewayApiKey": "Vercel AI Gateway API-sleutel",
 		"getVercelAiGatewayApiKey": "Vercel AI Gateway API-sleutel ophalen",
 		"apiKeyStorageNotice": "API-sleutels worden veilig opgeslagen in de geheime opslag van VSCode",
-		"glamaApiKey": "Glama API-sleutel",
-		"getGlamaApiKey": "Glama API-sleutel ophalen",
 		"useCustomBaseUrl": "Aangepaste basis-URL gebruiken",
 		"useReasoning": "Redenering inschakelen",
 		"useHostHeader": "Aangepaste Host-header gebruiken",

+ 0 - 2
webview-ui/src/i18n/locales/pl/settings.json

@@ -283,8 +283,6 @@
 		"vercelAiGatewayApiKey": "Klucz API Vercel AI Gateway",
 		"getVercelAiGatewayApiKey": "Uzyskaj klucz API Vercel AI Gateway",
 		"apiKeyStorageNotice": "Klucze API są bezpiecznie przechowywane w Tajnym Magazynie VSCode",
-		"glamaApiKey": "Klucz API Glama",
-		"getGlamaApiKey": "Uzyskaj klucz API Glama",
 		"useCustomBaseUrl": "Użyj niestandardowego URL bazowego",
 		"useReasoning": "Włącz rozumowanie",
 		"useHostHeader": "Użyj niestandardowego nagłówka Host",

+ 0 - 2
webview-ui/src/i18n/locales/pt-BR/settings.json

@@ -283,8 +283,6 @@
 		"vercelAiGatewayApiKey": "Chave API do Vercel AI Gateway",
 		"getVercelAiGatewayApiKey": "Obter chave API do Vercel AI Gateway",
 		"apiKeyStorageNotice": "As chaves de API são armazenadas com segurança no Armazenamento Secreto do VSCode",
-		"glamaApiKey": "Chave de API Glama",
-		"getGlamaApiKey": "Obter chave de API Glama",
 		"useCustomBaseUrl": "Usar URL base personalizado",
 		"useReasoning": "Habilitar raciocínio",
 		"useHostHeader": "Usar cabeçalho Host personalizado",

+ 0 - 2
webview-ui/src/i18n/locales/ru/settings.json

@@ -283,8 +283,6 @@
 		"vercelAiGatewayApiKey": "Ключ API Vercel AI Gateway",
 		"getVercelAiGatewayApiKey": "Получить ключ API Vercel AI Gateway",
 		"apiKeyStorageNotice": "API-ключи хранятся безопасно в Secret Storage VSCode",
-		"glamaApiKey": "Glama API-ключ",
-		"getGlamaApiKey": "Получить Glama API-ключ",
 		"useCustomBaseUrl": "Использовать пользовательский базовый URL",
 		"useReasoning": "Включить рассуждения",
 		"useHostHeader": "Использовать пользовательский Host-заголовок",

+ 0 - 2
webview-ui/src/i18n/locales/tr/settings.json

@@ -283,8 +283,6 @@
 		"vercelAiGatewayApiKey": "Vercel AI Gateway API Anahtarı",
 		"getVercelAiGatewayApiKey": "Vercel AI Gateway API Anahtarı Al",
 		"apiKeyStorageNotice": "API anahtarları VSCode'un Gizli Depolamasında güvenli bir şekilde saklanır",
-		"glamaApiKey": "Glama API Anahtarı",
-		"getGlamaApiKey": "Glama API Anahtarı Al",
 		"useCustomBaseUrl": "Özel temel URL kullan",
 		"useReasoning": "Akıl yürütmeyi etkinleştir",
 		"useHostHeader": "Özel Host başlığı kullan",

+ 0 - 2
webview-ui/src/i18n/locales/vi/settings.json

@@ -283,8 +283,6 @@
 		"vercelAiGatewayApiKey": "Khóa API Vercel AI Gateway",
 		"getVercelAiGatewayApiKey": "Lấy khóa API Vercel AI Gateway",
 		"apiKeyStorageNotice": "Khóa API được lưu trữ an toàn trong Bộ lưu trữ bí mật của VSCode",
-		"glamaApiKey": "Khóa API Glama",
-		"getGlamaApiKey": "Lấy khóa API Glama",
 		"useCustomBaseUrl": "Sử dụng URL cơ sở tùy chỉnh",
 		"useReasoning": "Bật lý luận",
 		"useHostHeader": "Sử dụng tiêu đề Host tùy chỉnh",

+ 0 - 2
webview-ui/src/i18n/locales/zh-CN/settings.json

@@ -283,8 +283,6 @@
 		"vercelAiGatewayApiKey": "Vercel AI Gateway API 密钥",
 		"getVercelAiGatewayApiKey": "获取 Vercel AI Gateway API 密钥",
 		"apiKeyStorageNotice": "API 密钥安全存储在 VSCode 的密钥存储中",
-		"glamaApiKey": "Glama API 密钥",
-		"getGlamaApiKey": "获取 Glama API 密钥",
 		"useCustomBaseUrl": "使用自定义基础 URL",
 		"useReasoning": "启用推理",
 		"useHostHeader": "使用自定义 Host 标头",

+ 0 - 2
webview-ui/src/i18n/locales/zh-TW/settings.json

@@ -283,8 +283,6 @@
 		"vercelAiGatewayApiKey": "Vercel AI Gateway API 金鑰",
 		"getVercelAiGatewayApiKey": "取得 Vercel AI Gateway API 金鑰",
 		"apiKeyStorageNotice": "API 金鑰安全儲存於 VSCode 金鑰儲存中",
-		"glamaApiKey": "Glama API 金鑰",
-		"getGlamaApiKey": "取得 Glama API 金鑰",
 		"useCustomBaseUrl": "使用自訂基礎 URL",
 		"useReasoning": "啟用推理",
 		"useHostHeader": "使用自訂 Host 標頭",

+ 0 - 4
webview-ui/src/oauth/urls.ts

@@ -4,10 +4,6 @@ export function getCallbackUrl(provider: string, uriScheme?: string) {
 	return encodeURIComponent(`${uriScheme || "vscode"}://${Package.publisher}.${Package.name}/${provider}`)
 }
 
-export function getGlamaAuthUrl(uriScheme?: string) {
-	return `https://glama.ai/oauth/authorize?callback_url=${getCallbackUrl("glama", uriScheme)}`
-}
-
 export function getOpenRouterAuthUrl(uriScheme?: string) {
 	return `https://openrouter.ai/auth?callback_url=${getCallbackUrl("openrouter", uriScheme)}`
 }

+ 0 - 50
webview-ui/src/utils/__tests__/validate.test.ts

@@ -24,16 +24,6 @@ describe("Model Validation Functions", () => {
 				outputPrice: 5.0,
 			},
 		},
-		glama: {
-			"valid-model": {
-				maxTokens: 8192,
-				contextWindow: 200000,
-				supportsImages: true,
-				supportsPromptCache: false,
-				inputPrice: 3.0,
-				outputPrice: 15.0,
-			},
-		},
 		requesty: {},
 		unbound: {},
 		litellm: {},
@@ -93,26 +83,6 @@ describe("Model Validation Functions", () => {
 			expect(result).toContain("model")
 		})
 
-		it("returns undefined for valid Glama model", () => {
-			const config: ProviderSettings = {
-				apiProvider: "glama",
-				glamaModelId: "valid-model",
-			}
-
-			const result = getModelValidationError(config, mockRouterModels, allowAllOrganization)
-			expect(result).toBeUndefined()
-		})
-
-		it("returns error for invalid Glama model", () => {
-			const config: ProviderSettings = {
-				apiProvider: "glama",
-				glamaModelId: "invalid-model",
-			}
-
-			const result = getModelValidationError(config, mockRouterModels, allowAllOrganization)
-			expect(result).toBeUndefined()
-		})
-
 		it("returns undefined for OpenAI models when no router models provided", () => {
 			const config: ProviderSettings = {
 				apiProvider: "openai",
@@ -192,25 +162,5 @@ describe("Model Validation Functions", () => {
 			)
 			expect(result).toBeUndefined() // Should exclude model-specific org errors
 		})
-
-		it("returns undefined for valid IO Intelligence model", () => {
-			const config: ProviderSettings = {
-				apiProvider: "io-intelligence",
-				glamaModelId: "valid-model",
-			}
-
-			const result = getModelValidationError(config, mockRouterModels, allowAllOrganization)
-			expect(result).toBeUndefined()
-		})
-
-		it("returns error for invalid IO Intelligence model", () => {
-			const config: ProviderSettings = {
-				apiProvider: "io-intelligence",
-				glamaModelId: "invalid-model",
-			}
-
-			const result = getModelValidationError(config, mockRouterModels, allowAllOrganization)
-			expect(result).toBeUndefined()
-		})
 	})
 })

+ 0 - 5
webview-ui/src/utils/validate.ts

@@ -43,11 +43,6 @@ function validateModelsAndKeysProvided(apiConfiguration: ProviderSettings): stri
 				return i18next.t("settings:validation.apiKey")
 			}
 			break
-		case "glama":
-			if (!apiConfiguration.glamaApiKey) {
-				return i18next.t("settings:validation.apiKey")
-			}
-			break
 		case "unbound":
 			if (!apiConfiguration.unboundApiKey) {
 				return i18next.t("settings:validation.apiKey")