diff --git a/packages/noodl-editor/package.json b/packages/noodl-editor/package.json index 0321822..1fcad3d 100644 --- a/packages/noodl-editor/package.json +++ b/packages/noodl-editor/package.json @@ -4,7 +4,7 @@ "description": "Full stack low code React app builder", "author": "The Low Code Foundation", "homepage": "https://thelowcodefoundation.com", - "version": "1.0.1", + "version": "1.1.0", "main": "src/main/main.js", "scripts": { "build": "npx ts-node -P ./tsconfig.build.json ./scripts/build.ts", diff --git a/packages/noodl-editor/src/editor/src/models/AiAssistant/_backend/ReAct.ts b/packages/noodl-editor/src/editor/src/models/AiAssistant/_backend/ReAct.ts index de7def1..230deb7 100644 --- a/packages/noodl-editor/src/editor/src/models/AiAssistant/_backend/ReAct.ts +++ b/packages/noodl-editor/src/editor/src/models/AiAssistant/_backend/ReAct.ts @@ -39,7 +39,7 @@ export abstract class ReActAgent { ...history ], provider: { - model: 'gpt-4', + model: 'gpt-4o-mini', temperature: 0 }, onStream: (_, text) => { diff --git a/packages/noodl-editor/src/editor/src/models/AiAssistant/api.ts b/packages/noodl-editor/src/editor/src/models/AiAssistant/api.ts index a714a27..03ccb57 100644 --- a/packages/noodl-editor/src/editor/src/models/AiAssistant/api.ts +++ b/packages/noodl-editor/src/editor/src/models/AiAssistant/api.ts @@ -38,7 +38,7 @@ export namespace AiAssistantApi { version: '0.0.0', models: [ { - name: 'gpt-4', + name: 'gpt-4o-mini', displayName: 'gpt-4 (8k context)', promptTokenCost: 0.03, completionTokenCost: 0.06 diff --git a/packages/noodl-editor/src/editor/src/models/AiAssistant/interfaces.ts b/packages/noodl-editor/src/editor/src/models/AiAssistant/interfaces.ts index 1892a22..6e68fc5 100644 --- a/packages/noodl-editor/src/editor/src/models/AiAssistant/interfaces.ts +++ b/packages/noodl-editor/src/editor/src/models/AiAssistant/interfaces.ts @@ -13,14 +13,14 @@ export type AiCopilotTextProviders = { max_tokens?: number; } -export type ModelName = 'gpt-3.5-turbo' | 'gpt-4'; +export type ModelName = 'gpt-3.5-turbo' | 'gpt-4o-mini'; export type AiCopilotChatProviders = { model: 'gpt-3.5-turbo', temperature?: number; max_tokens?: number; } | { - model: 'gpt-4', + model: 'gpt-4o-mini', temperature?: number; max_tokens?: number; } diff --git a/packages/noodl-editor/src/editor/src/models/AiAssistant/templates/chart.ts b/packages/noodl-editor/src/editor/src/models/AiAssistant/templates/chart.ts index 97db00e..82c7979 100644 --- a/packages/noodl-editor/src/editor/src/models/AiAssistant/templates/chart.ts +++ b/packages/noodl-editor/src/editor/src/models/AiAssistant/templates/chart.ts @@ -49,7 +49,7 @@ export const template: AiNodeTemplate = { const fullCodeText = await chatStream({ provider: { - model: 'gpt-4', + model: 'gpt-4o-mini', temperature: 0.0, max_tokens: 2048 }, diff --git a/packages/noodl-editor/src/editor/src/models/AiAssistant/templates/function-crud.ts b/packages/noodl-editor/src/editor/src/models/AiAssistant/templates/function-crud.ts index 78ea0c3..5536001 100644 --- a/packages/noodl-editor/src/editor/src/models/AiAssistant/templates/function-crud.ts +++ b/packages/noodl-editor/src/editor/src/models/AiAssistant/templates/function-crud.ts @@ -71,7 +71,7 @@ export const template: AiNodeTemplate = { const fullText = await chatStreamXml({ messages, provider: { - model: 'gpt-4', + model: 'gpt-4o-mini', // model: 'gpt-3.5-turbo', // The next context doesnt work with GPT-3.5 temperature: 0.5, diff --git a/packages/noodl-editor/src/editor/src/models/AiAssistant/templates/function-query-database/gpt-4-version.ts b/packages/noodl-editor/src/editor/src/models/AiAssistant/templates/function-query-database/gpt-4-version.ts index b17ee55..0a159e5 100644 --- a/packages/noodl-editor/src/editor/src/models/AiAssistant/templates/function-query-database/gpt-4-version.ts +++ b/packages/noodl-editor/src/editor/src/models/AiAssistant/templates/function-query-database/gpt-4-version.ts @@ -41,7 +41,7 @@ export async function execute( const fullCodeText = await chatStream({ provider: { - model: 'gpt-4', + model: 'gpt-4o-mini', temperature: 0.0, max_tokens: 2048 }, @@ -112,7 +112,7 @@ export async function execute( { role: 'user', content: codeText } ], provider: { - model: 'gpt-4', + model: 'gpt-4o-mini', temperature: 0.0, max_tokens: 2048 }, diff --git a/packages/noodl-editor/src/editor/src/models/AiAssistant/templates/function.ts b/packages/noodl-editor/src/editor/src/models/AiAssistant/templates/function.ts index 61ea2f2..77fae5b 100644 --- a/packages/noodl-editor/src/editor/src/models/AiAssistant/templates/function.ts +++ b/packages/noodl-editor/src/editor/src/models/AiAssistant/templates/function.ts @@ -23,7 +23,7 @@ export const template: AiNodeTemplate = { console.log('using version: ', version); try { - if ((version === 'enterprise' && OpenAiStore.getModel() === 'gpt-4') || version === 'full-beta') { + if ((version === 'enterprise' && OpenAiStore.getModel() === 'gpt-4o-mini') || version === 'full-beta') { await GPT4.execute(context); } else { await GPT3.execute(context); diff --git a/packages/noodl-editor/src/editor/src/models/AiAssistant/templates/function/gpt-4-version.ts b/packages/noodl-editor/src/editor/src/models/AiAssistant/templates/function/gpt-4-version.ts index b820717..bcdd73e 100644 --- a/packages/noodl-editor/src/editor/src/models/AiAssistant/templates/function/gpt-4-version.ts +++ b/packages/noodl-editor/src/editor/src/models/AiAssistant/templates/function/gpt-4-version.ts @@ -56,7 +56,7 @@ A["FUNCTION"]`; provider: { // NOTE: Tried with GPT 3.5 here before. // Then this question doesnt work: "Can you make a function that starts recording from the microphone when it gets a start signal and stops recording when it gets a stop signal" - model: 'gpt-4', + model: 'gpt-4o-mini', temperature: 0.0 } }); @@ -100,7 +100,7 @@ A["FUNCTION"]`; const fullText = await chatStream({ provider: { - model: 'gpt-4', + model: 'gpt-4o-mini', temperature: 0.0, max_tokens: 2048 }, @@ -156,7 +156,7 @@ A["FUNCTION"]`; const fullCodeText = await chatStream({ provider: { - model: 'gpt-4', + model: 'gpt-4o-mini', temperature: 0.0, max_tokens: 2048 }, @@ -251,7 +251,7 @@ A["FUNCTION"]`; } ], provider: { - model: 'gpt-4', + model: 'gpt-4o-mini', temperature: 0.0, max_tokens: 2048 }, diff --git a/packages/noodl-editor/src/editor/src/store/AiAssistantStore.ts b/packages/noodl-editor/src/editor/src/store/AiAssistantStore.ts index 34b03e3..9e625a2 100644 --- a/packages/noodl-editor/src/editor/src/store/AiAssistantStore.ts +++ b/packages/noodl-editor/src/editor/src/store/AiAssistantStore.ts @@ -20,7 +20,7 @@ const AI_ASSISTANT_MODEL_KEY = 'aiAssistant.model'; export type AiVersion = 'disabled' | 'full-beta' | 'enterprise'; -export type AiModel = 'gpt-3' | 'gpt-4'; +export type AiModel = 'gpt-3' | 'gpt-4o-mini'; export const OpenAiStore = { isEnabled(): boolean { diff --git a/packages/noodl-editor/src/editor/src/views/Clippy/Clippy.tsx b/packages/noodl-editor/src/editor/src/views/Clippy/Clippy.tsx index c15c7e5..43721b5 100644 --- a/packages/noodl-editor/src/editor/src/views/Clippy/Clippy.tsx +++ b/packages/noodl-editor/src/editor/src/views/Clippy/Clippy.tsx @@ -79,7 +79,7 @@ export default function Clippy() { const version = OpenAiStore.getVersion(); if (version === 'enterprise') { setHasApiKey(true); - setHasGPT4(OpenAiStore.getModel() === 'gpt-4'); + setHasGPT4(OpenAiStore.getModel() === 'gpt-4o-mini'); } else if (version === 'full-beta') { setHasApiKey(OpenAiStore.getIsAiApiKeyVerified()); } else { @@ -94,10 +94,10 @@ export default function Clippy() { async function doIt() { const version = OpenAiStore.getVersion(); if (version === 'enterprise') { - setHasGPT4(OpenAiStore.getModel() === 'gpt-4'); + setHasGPT4(OpenAiStore.getModel() === 'gpt-4o-mini'); } else { const models = await verifyOpenAiApiKey(OpenAiStore.getApiKey()); - setHasGPT4(!!models['gpt-4']); + setHasGPT4(!!models['gpt-4o-mini']); } } diff --git a/packages/noodl-editor/src/editor/src/views/Clippy/Commands/SuggestCommand.tsx b/packages/noodl-editor/src/editor/src/views/Clippy/Commands/SuggestCommand.tsx index 6c94bd1..2b77fde 100644 --- a/packages/noodl-editor/src/editor/src/views/Clippy/Commands/SuggestCommand.tsx +++ b/packages/noodl-editor/src/editor/src/views/Clippy/Commands/SuggestCommand.tsx @@ -44,7 +44,7 @@ export async function handleSuggestionCommand(prompt: string, statusCallback: (s { role: 'user', content: p } ]; - const response = await makeChatRequest('gpt-4', messages); + const response = await makeChatRequest('gpt-4o-mini', messages); console.log(response); return JSON.parse(response.content); diff --git a/packages/noodl-editor/src/editor/src/views/Clippy/Commands/UICommand.tsx b/packages/noodl-editor/src/editor/src/views/Clippy/Commands/UICommand.tsx index b11c67e..0e6b389 100644 --- a/packages/noodl-editor/src/editor/src/views/Clippy/Commands/UICommand.tsx +++ b/packages/noodl-editor/src/editor/src/views/Clippy/Commands/UICommand.tsx @@ -137,7 +137,7 @@ export async function handleUICommand( await ctx.chatStreamXml({ messages: messages, provider: { - model: 'gpt-4', + model: 'gpt-4o-mini', // The next context doesnt work with GPT-3.5 temperature: 0.1 }, diff --git a/packages/noodl-editor/src/editor/src/views/Clippy/Commands/utils.tsx b/packages/noodl-editor/src/editor/src/views/Clippy/Commands/utils.tsx index 0b8ed33..b0bedfe 100644 --- a/packages/noodl-editor/src/editor/src/views/Clippy/Commands/utils.tsx +++ b/packages/noodl-editor/src/editor/src/views/Clippy/Commands/utils.tsx @@ -70,8 +70,8 @@ export async function makeChatRequest(model: string, messages: unknown[]) { console.error(json.error); return null; } else { - const promptTokenCost = model === 'gpt-4' ? 0.03 : 0.002; - const completionTokenCost = model === 'gpt-4' ? 0.06 : 0.002; + const promptTokenCost = model === 'gpt-4o-mini' ? 0.03 : 0.002; + const completionTokenCost = model === 'gpt-4o-mini' ? 0.06 : 0.002; let cost = (json.usage.completion_tokens * completionTokenCost) / 1000 + (json.usage.prompt_tokens * promptTokenCost) / 1000; diff --git a/packages/noodl-editor/src/editor/src/views/panels/EditorSettingsPanel/sections/OpenAiSection.tsx b/packages/noodl-editor/src/editor/src/views/panels/EditorSettingsPanel/sections/OpenAiSection.tsx index 66fadbe..74b90bf 100644 --- a/packages/noodl-editor/src/editor/src/views/panels/EditorSettingsPanel/sections/OpenAiSection.tsx +++ b/packages/noodl-editor/src/editor/src/views/panels/EditorSettingsPanel/sections/OpenAiSection.tsx @@ -29,7 +29,7 @@ export function OpenAiSection() { async function onVerifyApiKey() { const models = await verifyOpenAiApiKey(apiKey); if (models) { - const haveGpt4 = !!models['gpt-4']; + const haveGpt4 = !!models['gpt-4o-mini']; if (haveGpt4) { OpenAiStore.setIsAiApiKeyVerified(true); ToastLayer.showSuccess('OpenAI API Key is valid with GPT-4!'); @@ -78,7 +78,7 @@ export function OpenAiSection() { properties={{ options: [ { label: 'gpt-3', value: 'gpt-3' }, - { label: 'gpt-4', value: 'gpt-4' } + { label: 'gpt-4', value: 'gpt-4o-mini' } ] }} onChange={(value: AiModel) => { @@ -121,7 +121,7 @@ export function OpenAiSection() { properties={{ options: [ { label: 'gpt-3', value: 'gpt-3' }, - { label: 'gpt-4', value: 'gpt-4' } + { label: 'gpt-4', value: 'gpt-4o-mini' } ] }} onChange={(value: AiModel) => {