Browse Source

Set temperature to 0.2

Saoud Rizwan 1 year ago
parent
commit
4d1db22fe3

+ 2 - 0
src/api/anthropic.ts

@@ -37,6 +37,7 @@ export class AnthropicHandler implements ApiHandler {
 					{
 						model: modelId,
 						max_tokens: this.getModel().info.maxTokens,
+						temperature: 0.2,
 						system: [{ text: systemPrompt, type: "text", cache_control: { type: "ephemeral" } }], // setting cache breakpoint for system prompt so new tasks can reuse it
 						messages: messages.map((message, index) => {
 							if (index === lastUserMsgIndex || index === secondLastMsgUserIndex) {
@@ -89,6 +90,7 @@ export class AnthropicHandler implements ApiHandler {
 				const message = await this.client.messages.create({
 					model: modelId,
 					max_tokens: this.getModel().info.maxTokens,
+					temperature: 0.2,
 					system: [{ text: systemPrompt, type: "text" }],
 					messages,
 					tools,

+ 1 - 0
src/api/bedrock.ts

@@ -31,6 +31,7 @@ export class AwsBedrockHandler implements ApiHandler {
 		const message = await this.client.messages.create({
 			model: this.getModel().id,
 			max_tokens: this.getModel().info.maxTokens,
+			temperature: 0.2,
 			system: systemPrompt,
 			messages,
 			tools,

+ 1 - 0
src/api/gemini.ts

@@ -39,6 +39,7 @@ export class GeminiHandler implements ApiHandler {
 			contents: messages.map(convertAnthropicMessageToGemini),
 			generationConfig: {
 				maxOutputTokens: this.getModel().info.maxTokens,
+				temperature: 0.2,
 			},
 		})
 		const message = convertGeminiResponseToAnthropic(result.response)

+ 1 - 0
src/api/ollama.ts

@@ -36,6 +36,7 @@ export class OllamaHandler implements ApiHandler {
 		const createParams: OpenAI.Chat.Completions.ChatCompletionCreateParamsNonStreaming = {
 			model: this.options.ollamaModelId ?? "",
 			messages: openAiMessages,
+			temperature: 0.2,
 			tools: openAiTools,
 			tool_choice: "auto",
 		}

+ 2 - 0
src/api/openai-native.ts

@@ -48,6 +48,7 @@ export class OpenAiNativeHandler implements ApiHandler {
 				createParams = {
 					model: this.getModel().id,
 					max_completion_tokens: this.getModel().info.maxTokens,
+					temperature: 0.2,
 					messages: convertToO1Messages(convertToOpenAiMessages(messages), systemPrompt),
 				}
 				break
@@ -55,6 +56,7 @@ export class OpenAiNativeHandler implements ApiHandler {
 				createParams = {
 					model: this.getModel().id,
 					max_completion_tokens: this.getModel().info.maxTokens,
+					temperature: 0.2,
 					messages: openAiMessages,
 					tools: openAiTools,
 					tool_choice: "auto",

+ 1 - 0
src/api/openai.ts

@@ -47,6 +47,7 @@ export class OpenAiHandler implements ApiHandler {
 		const createParams: OpenAI.Chat.Completions.ChatCompletionCreateParamsNonStreaming = {
 			model: this.options.openAiModelId ?? "",
 			messages: openAiMessages,
+			temperature: 0.2,
 			tools: openAiTools,
 			tool_choice: "auto",
 		}

+ 2 - 0
src/api/openrouter.ts

@@ -95,6 +95,7 @@ export class OpenRouterHandler implements ApiHandler {
 				createParams = {
 					model: this.getModel().id,
 					max_tokens: this.getModel().info.maxTokens,
+					temperature: 0.2,
 					messages: convertToO1Messages(convertToOpenAiMessages(messages), systemPrompt),
 				}
 				break
@@ -102,6 +103,7 @@ export class OpenRouterHandler implements ApiHandler {
 				createParams = {
 					model: this.getModel().id,
 					max_tokens: this.getModel().info.maxTokens,
+					temperature: 0.2,
 					messages: openAiMessages,
 					tools: openAiTools,
 					tool_choice: "auto",

+ 1 - 0
src/api/vertex.ts

@@ -25,6 +25,7 @@ export class VertexHandler implements ApiHandler {
 		const message = await this.client.messages.create({
 			model: this.getModel().id,
 			max_tokens: this.getModel().info.maxTokens,
+			temperature: 0.2,
 			system: systemPrompt,
 			messages,
 			tools,