Ver código fonte

优化chatgpt工具,新增gpt-3.5模型的支持

[email protected] 3 anos atrás
pai
commit
8f0b537d73

+ 2 - 2
apps/background/inject-tools.js

@@ -37,7 +37,7 @@ export default (() => {
                             }, function () {
                                 chrome.scripting.executeScript({
                                     target: {tabId, allFrames: codeConfig.allFrames},
-                                    func: function(code){evalCore.getEvalInstance(window)(code)},
+                                    func: function(code){try{evalCore.getEvalInstance(window)(code)}catch(x){}},
                                     args: [codeConfig.js]
                                 }, function () {
                                     callback && callback.apply(this, arguments);
@@ -56,7 +56,7 @@ export default (() => {
                         // 注入js脚本
                         chrome.scripting.executeScript({
                             target: {tabId, allFrames: codeConfig.allFrames},
-                            func: function(code){evalCore.getEvalInstance(window)(code)},
+                            func: function(code){try{evalCore.getEvalInstance(window)(code)}catch(x){}},
                             args: [codeConfig.js]
                         }, function () {
                             callback && callback.apply(this, arguments);

+ 1 - 1
apps/background/monkey.js

@@ -40,7 +40,7 @@ export default (() => {
                         let scripts = '(' + ((monkey) => {
                             let injectFunc = () => {
                                 // 执行脚本
-                                evalCore.getEvalInstance(window)(monkey.mScript);
+                                try{evalCore.getEvalInstance(window)(monkey.mScript)}catch(x){}
 
                                 parseInt(monkey.mRefresh) && setTimeout(() => {
                                     location.reload(true);

+ 2 - 0
apps/chatgpt/index.html

@@ -26,6 +26,8 @@
                     <fieldset>
                         <legend>智能模型</legend>
                         <div>
+                            <input type="radio" name="rdoChatModel" value="gpt-3.5-turbo" v-model="chatModel" id="cm_t4"><label for="cm_t4">gpt-3.5-turbo</label>
+                            <input type="radio" name="rdoChatModel" value="gpt-3.5-turbo-0301" v-model="chatModel" id="cm_t5"><label for="cm_t5">gpt-3.5-turbo-0301</label>
                             <input type="radio" name="rdoChatModel" value="text-davinci-002" v-model="chatModel" id="cm_t2"><label for="cm_t2">text-davinci-002</label>
                             <input type="radio" name="rdoChatModel" value="text-davinci-003" v-model="chatModel" id="cm_t3"><label for="cm_t3">text-davinci-003</label>
                         </div>

+ 20 - 4
apps/chatgpt/index.js

@@ -11,8 +11,9 @@ new Vue({
     data: {
         prompt: '',
         imgSize: '512x512',
-        chatModel: 'text-davinci-003',
+        chatModel: 'gpt-3.5-turbo',
         showSettingPanel:false,
+        isGPT35:true,
         demos: [
             'FeHelper是什么?怎么安装?',
             '用Js写一个冒泡排序的Demo',
@@ -61,6 +62,7 @@ new Vue({
         });
         Awesome.StorageMgr.get('CHATGPT_CHAT_MODEL').then(model => {
             this.chatModel = model || 'text-davinci-003';
+            this.isGPT35 = /^gpt\-3\.5\-/.test(this.chatModel);
         });
         Awesome.StorageMgr.get('CHATGPT_IMAGE_SIZE').then(size => {
             this.imgSize = size || '512x512';
@@ -142,12 +144,26 @@ new Vue({
             if(/画一幅/.test(message)) {
                 return this.drawImage(message);
             }
+            let url = `https://api.openai.com/v1/completions`;
+            let data = {model:this.chatModel,temperature:0,max_tokens:2048};
+            if(this.isGPT35){
+                url = `https://api.openai.com/v1/chat/completions`;
+                data.messages = [{role:'user',content: message}];
+            }else{
+                data.prompt = message;
+            }
             this.chatWithOpenAI({
-                url:'https://api.openai.com/v1/completions',
-                data: {model:'text-davinci-003',temperature:0,max_tokens:2048,prompt:message},
+                url:url,
+                data: data,
                 buildResponse: json => {
                     return new Promise(resolve => {
-                        return resolve(marked(json.choices[0].text.replace(/^\?\n\n/,'')));
+                        let respText ;
+                        if(this.isGPT35) {
+                            respText = json.choices[0].message.coontent.replace(/^\?\n\n/,'')
+                        }else{
+                            respText = json.choices[0].text.replace(/^\?\n\n/,'');
+                        }
+                        return resolve(marked(respText));
                     });
                 }
             });