mirror of
https://github.com/The-Low-Code-Foundation/OpenNoodl.git
synced 2026-01-12 15:22:55 +01:00
Changed gpt-4 to gpt-4o-mini, verify api no longer working
This commit is contained in:
@@ -4,7 +4,7 @@
|
|||||||
"description": "Full stack low code React app builder",
|
"description": "Full stack low code React app builder",
|
||||||
"author": "The Low Code Foundation",
|
"author": "The Low Code Foundation",
|
||||||
"homepage": "https://thelowcodefoundation.com",
|
"homepage": "https://thelowcodefoundation.com",
|
||||||
"version": "1.0.1",
|
"version": "1.1.0",
|
||||||
"main": "src/main/main.js",
|
"main": "src/main/main.js",
|
||||||
"scripts": {
|
"scripts": {
|
||||||
"build": "npx ts-node -P ./tsconfig.build.json ./scripts/build.ts",
|
"build": "npx ts-node -P ./tsconfig.build.json ./scripts/build.ts",
|
||||||
|
|||||||
@@ -39,7 +39,7 @@ export abstract class ReActAgent<TParams = unknown> {
|
|||||||
...history
|
...history
|
||||||
],
|
],
|
||||||
provider: {
|
provider: {
|
||||||
model: 'gpt-4',
|
model: 'gpt-4o-mini',
|
||||||
temperature: 0
|
temperature: 0
|
||||||
},
|
},
|
||||||
onStream: (_, text) => {
|
onStream: (_, text) => {
|
||||||
|
|||||||
@@ -38,7 +38,7 @@ export namespace AiAssistantApi {
|
|||||||
version: '0.0.0',
|
version: '0.0.0',
|
||||||
models: [
|
models: [
|
||||||
{
|
{
|
||||||
name: 'gpt-4',
|
name: 'gpt-4o-mini',
|
||||||
displayName: 'gpt-4 (8k context)',
|
displayName: 'gpt-4 (8k context)',
|
||||||
promptTokenCost: 0.03,
|
promptTokenCost: 0.03,
|
||||||
completionTokenCost: 0.06
|
completionTokenCost: 0.06
|
||||||
|
|||||||
@@ -13,14 +13,14 @@ export type AiCopilotTextProviders = {
|
|||||||
max_tokens?: number;
|
max_tokens?: number;
|
||||||
}
|
}
|
||||||
|
|
||||||
export type ModelName = 'gpt-3.5-turbo' | 'gpt-4';
|
export type ModelName = 'gpt-3.5-turbo' | 'gpt-4o-mini';
|
||||||
|
|
||||||
export type AiCopilotChatProviders = {
|
export type AiCopilotChatProviders = {
|
||||||
model: 'gpt-3.5-turbo',
|
model: 'gpt-3.5-turbo',
|
||||||
temperature?: number;
|
temperature?: number;
|
||||||
max_tokens?: number;
|
max_tokens?: number;
|
||||||
} | {
|
} | {
|
||||||
model: 'gpt-4',
|
model: 'gpt-4o-mini',
|
||||||
temperature?: number;
|
temperature?: number;
|
||||||
max_tokens?: number;
|
max_tokens?: number;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -49,7 +49,7 @@ export const template: AiNodeTemplate = {
|
|||||||
|
|
||||||
const fullCodeText = await chatStream({
|
const fullCodeText = await chatStream({
|
||||||
provider: {
|
provider: {
|
||||||
model: 'gpt-4',
|
model: 'gpt-4o-mini',
|
||||||
temperature: 0.0,
|
temperature: 0.0,
|
||||||
max_tokens: 2048
|
max_tokens: 2048
|
||||||
},
|
},
|
||||||
|
|||||||
@@ -71,7 +71,7 @@ export const template: AiNodeTemplate = {
|
|||||||
const fullText = await chatStreamXml({
|
const fullText = await chatStreamXml({
|
||||||
messages,
|
messages,
|
||||||
provider: {
|
provider: {
|
||||||
model: 'gpt-4',
|
model: 'gpt-4o-mini',
|
||||||
// model: 'gpt-3.5-turbo',
|
// model: 'gpt-3.5-turbo',
|
||||||
// The next context doesnt work with GPT-3.5
|
// The next context doesnt work with GPT-3.5
|
||||||
temperature: 0.5,
|
temperature: 0.5,
|
||||||
|
|||||||
@@ -41,7 +41,7 @@ export async function execute(
|
|||||||
|
|
||||||
const fullCodeText = await chatStream({
|
const fullCodeText = await chatStream({
|
||||||
provider: {
|
provider: {
|
||||||
model: 'gpt-4',
|
model: 'gpt-4o-mini',
|
||||||
temperature: 0.0,
|
temperature: 0.0,
|
||||||
max_tokens: 2048
|
max_tokens: 2048
|
||||||
},
|
},
|
||||||
@@ -112,7 +112,7 @@ export async function execute(
|
|||||||
{ role: 'user', content: codeText }
|
{ role: 'user', content: codeText }
|
||||||
],
|
],
|
||||||
provider: {
|
provider: {
|
||||||
model: 'gpt-4',
|
model: 'gpt-4o-mini',
|
||||||
temperature: 0.0,
|
temperature: 0.0,
|
||||||
max_tokens: 2048
|
max_tokens: 2048
|
||||||
},
|
},
|
||||||
|
|||||||
@@ -23,7 +23,7 @@ export const template: AiNodeTemplate = {
|
|||||||
console.log('using version: ', version);
|
console.log('using version: ', version);
|
||||||
|
|
||||||
try {
|
try {
|
||||||
if ((version === 'enterprise' && OpenAiStore.getModel() === 'gpt-4') || version === 'full-beta') {
|
if ((version === 'enterprise' && OpenAiStore.getModel() === 'gpt-4o-mini') || version === 'full-beta') {
|
||||||
await GPT4.execute(context);
|
await GPT4.execute(context);
|
||||||
} else {
|
} else {
|
||||||
await GPT3.execute(context);
|
await GPT3.execute(context);
|
||||||
|
|||||||
@@ -56,7 +56,7 @@ A["FUNCTION"]`;
|
|||||||
provider: {
|
provider: {
|
||||||
// NOTE: Tried with GPT 3.5 here before.
|
// NOTE: Tried with GPT 3.5 here before.
|
||||||
// Then this question doesnt work: "Can you make a function that starts recording from the microphone when it gets a start signal and stops recording when it gets a stop signal"
|
// Then this question doesnt work: "Can you make a function that starts recording from the microphone when it gets a start signal and stops recording when it gets a stop signal"
|
||||||
model: 'gpt-4',
|
model: 'gpt-4o-mini',
|
||||||
temperature: 0.0
|
temperature: 0.0
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
@@ -100,7 +100,7 @@ A["FUNCTION"]`;
|
|||||||
|
|
||||||
const fullText = await chatStream({
|
const fullText = await chatStream({
|
||||||
provider: {
|
provider: {
|
||||||
model: 'gpt-4',
|
model: 'gpt-4o-mini',
|
||||||
temperature: 0.0,
|
temperature: 0.0,
|
||||||
max_tokens: 2048
|
max_tokens: 2048
|
||||||
},
|
},
|
||||||
@@ -156,7 +156,7 @@ A["FUNCTION"]`;
|
|||||||
|
|
||||||
const fullCodeText = await chatStream({
|
const fullCodeText = await chatStream({
|
||||||
provider: {
|
provider: {
|
||||||
model: 'gpt-4',
|
model: 'gpt-4o-mini',
|
||||||
temperature: 0.0,
|
temperature: 0.0,
|
||||||
max_tokens: 2048
|
max_tokens: 2048
|
||||||
},
|
},
|
||||||
@@ -251,7 +251,7 @@ A["FUNCTION"]`;
|
|||||||
}
|
}
|
||||||
],
|
],
|
||||||
provider: {
|
provider: {
|
||||||
model: 'gpt-4',
|
model: 'gpt-4o-mini',
|
||||||
temperature: 0.0,
|
temperature: 0.0,
|
||||||
max_tokens: 2048
|
max_tokens: 2048
|
||||||
},
|
},
|
||||||
|
|||||||
@@ -20,7 +20,7 @@ const AI_ASSISTANT_MODEL_KEY = 'aiAssistant.model';
|
|||||||
|
|
||||||
export type AiVersion = 'disabled' | 'full-beta' | 'enterprise';
|
export type AiVersion = 'disabled' | 'full-beta' | 'enterprise';
|
||||||
|
|
||||||
export type AiModel = 'gpt-3' | 'gpt-4';
|
export type AiModel = 'gpt-3' | 'gpt-4o-mini';
|
||||||
|
|
||||||
export const OpenAiStore = {
|
export const OpenAiStore = {
|
||||||
isEnabled(): boolean {
|
isEnabled(): boolean {
|
||||||
|
|||||||
@@ -79,7 +79,7 @@ export default function Clippy() {
|
|||||||
const version = OpenAiStore.getVersion();
|
const version = OpenAiStore.getVersion();
|
||||||
if (version === 'enterprise') {
|
if (version === 'enterprise') {
|
||||||
setHasApiKey(true);
|
setHasApiKey(true);
|
||||||
setHasGPT4(OpenAiStore.getModel() === 'gpt-4');
|
setHasGPT4(OpenAiStore.getModel() === 'gpt-4o-mini');
|
||||||
} else if (version === 'full-beta') {
|
} else if (version === 'full-beta') {
|
||||||
setHasApiKey(OpenAiStore.getIsAiApiKeyVerified());
|
setHasApiKey(OpenAiStore.getIsAiApiKeyVerified());
|
||||||
} else {
|
} else {
|
||||||
@@ -94,10 +94,10 @@ export default function Clippy() {
|
|||||||
async function doIt() {
|
async function doIt() {
|
||||||
const version = OpenAiStore.getVersion();
|
const version = OpenAiStore.getVersion();
|
||||||
if (version === 'enterprise') {
|
if (version === 'enterprise') {
|
||||||
setHasGPT4(OpenAiStore.getModel() === 'gpt-4');
|
setHasGPT4(OpenAiStore.getModel() === 'gpt-4o-mini');
|
||||||
} else {
|
} else {
|
||||||
const models = await verifyOpenAiApiKey(OpenAiStore.getApiKey());
|
const models = await verifyOpenAiApiKey(OpenAiStore.getApiKey());
|
||||||
setHasGPT4(!!models['gpt-4']);
|
setHasGPT4(!!models['gpt-4o-mini']);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -44,7 +44,7 @@ export async function handleSuggestionCommand(prompt: string, statusCallback: (s
|
|||||||
{ role: 'user', content: p }
|
{ role: 'user', content: p }
|
||||||
];
|
];
|
||||||
|
|
||||||
const response = await makeChatRequest('gpt-4', messages);
|
const response = await makeChatRequest('gpt-4o-mini', messages);
|
||||||
console.log(response);
|
console.log(response);
|
||||||
|
|
||||||
return JSON.parse(response.content);
|
return JSON.parse(response.content);
|
||||||
|
|||||||
@@ -137,7 +137,7 @@ export async function handleUICommand(
|
|||||||
await ctx.chatStreamXml({
|
await ctx.chatStreamXml({
|
||||||
messages: messages,
|
messages: messages,
|
||||||
provider: {
|
provider: {
|
||||||
model: 'gpt-4',
|
model: 'gpt-4o-mini',
|
||||||
// The next context doesnt work with GPT-3.5
|
// The next context doesnt work with GPT-3.5
|
||||||
temperature: 0.1
|
temperature: 0.1
|
||||||
},
|
},
|
||||||
|
|||||||
@@ -70,8 +70,8 @@ export async function makeChatRequest(model: string, messages: unknown[]) {
|
|||||||
console.error(json.error);
|
console.error(json.error);
|
||||||
return null;
|
return null;
|
||||||
} else {
|
} else {
|
||||||
const promptTokenCost = model === 'gpt-4' ? 0.03 : 0.002;
|
const promptTokenCost = model === 'gpt-4o-mini' ? 0.03 : 0.002;
|
||||||
const completionTokenCost = model === 'gpt-4' ? 0.06 : 0.002;
|
const completionTokenCost = model === 'gpt-4o-mini' ? 0.06 : 0.002;
|
||||||
let cost =
|
let cost =
|
||||||
(json.usage.completion_tokens * completionTokenCost) / 1000 + (json.usage.prompt_tokens * promptTokenCost) / 1000;
|
(json.usage.completion_tokens * completionTokenCost) / 1000 + (json.usage.prompt_tokens * promptTokenCost) / 1000;
|
||||||
|
|
||||||
|
|||||||
@@ -29,7 +29,7 @@ export function OpenAiSection() {
|
|||||||
async function onVerifyApiKey() {
|
async function onVerifyApiKey() {
|
||||||
const models = await verifyOpenAiApiKey(apiKey);
|
const models = await verifyOpenAiApiKey(apiKey);
|
||||||
if (models) {
|
if (models) {
|
||||||
const haveGpt4 = !!models['gpt-4'];
|
const haveGpt4 = !!models['gpt-4o-mini'];
|
||||||
if (haveGpt4) {
|
if (haveGpt4) {
|
||||||
OpenAiStore.setIsAiApiKeyVerified(true);
|
OpenAiStore.setIsAiApiKeyVerified(true);
|
||||||
ToastLayer.showSuccess('OpenAI API Key is valid with GPT-4!');
|
ToastLayer.showSuccess('OpenAI API Key is valid with GPT-4!');
|
||||||
@@ -78,7 +78,7 @@ export function OpenAiSection() {
|
|||||||
properties={{
|
properties={{
|
||||||
options: [
|
options: [
|
||||||
{ label: 'gpt-3', value: 'gpt-3' },
|
{ label: 'gpt-3', value: 'gpt-3' },
|
||||||
{ label: 'gpt-4', value: 'gpt-4' }
|
{ label: 'gpt-4', value: 'gpt-4o-mini' }
|
||||||
]
|
]
|
||||||
}}
|
}}
|
||||||
onChange={(value: AiModel) => {
|
onChange={(value: AiModel) => {
|
||||||
@@ -121,7 +121,7 @@ export function OpenAiSection() {
|
|||||||
properties={{
|
properties={{
|
||||||
options: [
|
options: [
|
||||||
{ label: 'gpt-3', value: 'gpt-3' },
|
{ label: 'gpt-3', value: 'gpt-3' },
|
||||||
{ label: 'gpt-4', value: 'gpt-4' }
|
{ label: 'gpt-4', value: 'gpt-4o-mini' }
|
||||||
]
|
]
|
||||||
}}
|
}}
|
||||||
onChange={(value: AiModel) => {
|
onChange={(value: AiModel) => {
|
||||||
|
|||||||
Reference in New Issue
Block a user