Changed gpt-4 to gpt-4o-mini, verify api no longer working

This commit is contained in:
Richard Osborne
2024-09-10 10:27:23 +02:00
parent 46918e5e4b
commit 321b6a367c
15 changed files with 25 additions and 25 deletions

View File

@@ -4,7 +4,7 @@
"description": "Full stack low code React app builder",
"author": "The Low Code Foundation",
"homepage": "https://thelowcodefoundation.com",
"version": "1.0.1",
"version": "1.1.0",
"main": "src/main/main.js",
"scripts": {
"build": "npx ts-node -P ./tsconfig.build.json ./scripts/build.ts",

View File

@@ -39,7 +39,7 @@ export abstract class ReActAgent<TParams = unknown> {
...history
],
provider: {
model: 'gpt-4',
model: 'gpt-4o-mini',
temperature: 0
},
onStream: (_, text) => {

View File

@@ -38,7 +38,7 @@ export namespace AiAssistantApi {
version: '0.0.0',
models: [
{
name: 'gpt-4',
name: 'gpt-4o-mini',
displayName: 'gpt-4 (8k context)',
promptTokenCost: 0.03,
completionTokenCost: 0.06

View File

@@ -13,14 +13,14 @@ export type AiCopilotTextProviders = {
max_tokens?: number;
}
export type ModelName = 'gpt-3.5-turbo' | 'gpt-4';
export type ModelName = 'gpt-3.5-turbo' | 'gpt-4o-mini';
export type AiCopilotChatProviders = {
model: 'gpt-3.5-turbo',
temperature?: number;
max_tokens?: number;
} | {
model: 'gpt-4',
model: 'gpt-4o-mini',
temperature?: number;
max_tokens?: number;
}

View File

@@ -49,7 +49,7 @@ export const template: AiNodeTemplate = {
const fullCodeText = await chatStream({
provider: {
model: 'gpt-4',
model: 'gpt-4o-mini',
temperature: 0.0,
max_tokens: 2048
},

View File

@@ -71,7 +71,7 @@ export const template: AiNodeTemplate = {
const fullText = await chatStreamXml({
messages,
provider: {
model: 'gpt-4',
model: 'gpt-4o-mini',
// model: 'gpt-3.5-turbo',
// The next context doesnt work with GPT-3.5
temperature: 0.5,

View File

@@ -41,7 +41,7 @@ export async function execute(
const fullCodeText = await chatStream({
provider: {
model: 'gpt-4',
model: 'gpt-4o-mini',
temperature: 0.0,
max_tokens: 2048
},
@@ -112,7 +112,7 @@ export async function execute(
{ role: 'user', content: codeText }
],
provider: {
model: 'gpt-4',
model: 'gpt-4o-mini',
temperature: 0.0,
max_tokens: 2048
},

View File

@@ -23,7 +23,7 @@ export const template: AiNodeTemplate = {
console.log('using version: ', version);
try {
if ((version === 'enterprise' && OpenAiStore.getModel() === 'gpt-4') || version === 'full-beta') {
if ((version === 'enterprise' && OpenAiStore.getModel() === 'gpt-4o-mini') || version === 'full-beta') {
await GPT4.execute(context);
} else {
await GPT3.execute(context);

View File

@@ -56,7 +56,7 @@ A["FUNCTION"]`;
provider: {
// NOTE: Tried with GPT 3.5 here before.
// Then this question doesnt work: "Can you make a function that starts recording from the microphone when it gets a start signal and stops recording when it gets a stop signal"
model: 'gpt-4',
model: 'gpt-4o-mini',
temperature: 0.0
}
});
@@ -100,7 +100,7 @@ A["FUNCTION"]`;
const fullText = await chatStream({
provider: {
model: 'gpt-4',
model: 'gpt-4o-mini',
temperature: 0.0,
max_tokens: 2048
},
@@ -156,7 +156,7 @@ A["FUNCTION"]`;
const fullCodeText = await chatStream({
provider: {
model: 'gpt-4',
model: 'gpt-4o-mini',
temperature: 0.0,
max_tokens: 2048
},
@@ -251,7 +251,7 @@ A["FUNCTION"]`;
}
],
provider: {
model: 'gpt-4',
model: 'gpt-4o-mini',
temperature: 0.0,
max_tokens: 2048
},

View File

@@ -20,7 +20,7 @@ const AI_ASSISTANT_MODEL_KEY = 'aiAssistant.model';
export type AiVersion = 'disabled' | 'full-beta' | 'enterprise';
export type AiModel = 'gpt-3' | 'gpt-4';
export type AiModel = 'gpt-3' | 'gpt-4o-mini';
export const OpenAiStore = {
isEnabled(): boolean {

View File

@@ -79,7 +79,7 @@ export default function Clippy() {
const version = OpenAiStore.getVersion();
if (version === 'enterprise') {
setHasApiKey(true);
setHasGPT4(OpenAiStore.getModel() === 'gpt-4');
setHasGPT4(OpenAiStore.getModel() === 'gpt-4o-mini');
} else if (version === 'full-beta') {
setHasApiKey(OpenAiStore.getIsAiApiKeyVerified());
} else {
@@ -94,10 +94,10 @@ export default function Clippy() {
async function doIt() {
const version = OpenAiStore.getVersion();
if (version === 'enterprise') {
setHasGPT4(OpenAiStore.getModel() === 'gpt-4');
setHasGPT4(OpenAiStore.getModel() === 'gpt-4o-mini');
} else {
const models = await verifyOpenAiApiKey(OpenAiStore.getApiKey());
setHasGPT4(!!models['gpt-4']);
setHasGPT4(!!models['gpt-4o-mini']);
}
}

View File

@@ -44,7 +44,7 @@ export async function handleSuggestionCommand(prompt: string, statusCallback: (s
{ role: 'user', content: p }
];
const response = await makeChatRequest('gpt-4', messages);
const response = await makeChatRequest('gpt-4o-mini', messages);
console.log(response);
return JSON.parse(response.content);

View File

@@ -137,7 +137,7 @@ export async function handleUICommand(
await ctx.chatStreamXml({
messages: messages,
provider: {
model: 'gpt-4',
model: 'gpt-4o-mini',
// The next context doesnt work with GPT-3.5
temperature: 0.1
},

View File

@@ -70,8 +70,8 @@ export async function makeChatRequest(model: string, messages: unknown[]) {
console.error(json.error);
return null;
} else {
const promptTokenCost = model === 'gpt-4' ? 0.03 : 0.002;
const completionTokenCost = model === 'gpt-4' ? 0.06 : 0.002;
const promptTokenCost = model === 'gpt-4o-mini' ? 0.03 : 0.002;
const completionTokenCost = model === 'gpt-4o-mini' ? 0.06 : 0.002;
let cost =
(json.usage.completion_tokens * completionTokenCost) / 1000 + (json.usage.prompt_tokens * promptTokenCost) / 1000;

View File

@@ -29,7 +29,7 @@ export function OpenAiSection() {
async function onVerifyApiKey() {
const models = await verifyOpenAiApiKey(apiKey);
if (models) {
const haveGpt4 = !!models['gpt-4'];
const haveGpt4 = !!models['gpt-4o-mini'];
if (haveGpt4) {
OpenAiStore.setIsAiApiKeyVerified(true);
ToastLayer.showSuccess('OpenAI API Key is valid with GPT-4!');
@@ -78,7 +78,7 @@ export function OpenAiSection() {
properties={{
options: [
{ label: 'gpt-3', value: 'gpt-3' },
{ label: 'gpt-4', value: 'gpt-4' }
{ label: 'gpt-4', value: 'gpt-4o-mini' }
]
}}
onChange={(value: AiModel) => {
@@ -121,7 +121,7 @@ export function OpenAiSection() {
properties={{
options: [
{ label: 'gpt-3', value: 'gpt-3' },
{ label: 'gpt-4', value: 'gpt-4' }
{ label: 'gpt-4', value: 'gpt-4o-mini' }
]
}}
onChange={(value: AiModel) => {