support GPT-4o

This commit is contained in:
Abi Raja 2024-05-13 15:24:47 -04:00
parent a5fe0960d8
commit 8e6a9c48f8
5 changed files with 14 additions and 5 deletions

View File

@ -13,6 +13,7 @@ from utils import pprint_prompt
class Llm(Enum): class Llm(Enum):
GPT_4_VISION = "gpt-4-vision-preview" GPT_4_VISION = "gpt-4-vision-preview"
GPT_4_TURBO_2024_04_09 = "gpt-4-turbo-2024-04-09" GPT_4_TURBO_2024_04_09 = "gpt-4-turbo-2024-04-09"
GPT_4O_2024_05_13 = "gpt-4o-2024-05-13"
CLAUDE_3_SONNET = "claude-3-sonnet-20240229" CLAUDE_3_SONNET = "claude-3-sonnet-20240229"
CLAUDE_3_OPUS = "claude-3-opus-20240229" CLAUDE_3_OPUS = "claude-3-opus-20240229"
CLAUDE_3_HAIKU = "claude-3-haiku-20240307" CLAUDE_3_HAIKU = "claude-3-haiku-20240307"
@ -47,7 +48,11 @@ async def stream_openai_response(
} }
# Add 'max_tokens' only if the model is a GPT4 vision or Turbo model # Add 'max_tokens' only if the model is a GPT4 vision or Turbo model
if model == Llm.GPT_4_VISION or model == Llm.GPT_4_TURBO_2024_04_09: if (
model == Llm.GPT_4_VISION
or model == Llm.GPT_4_TURBO_2024_04_09
or model == Llm.GPT_4O_2024_05_13
):
params["max_tokens"] = 4096 params["max_tokens"] = 4096
stream = await client.chat.completions.create(**params) # type: ignore stream = await client.chat.completions.create(**params) # type: ignore

View File

@ -85,7 +85,7 @@ async def stream_code(websocket: WebSocket):
# Read the model from the request. Fall back to default if not provided. # Read the model from the request. Fall back to default if not provided.
code_generation_model_str = params.get( code_generation_model_str = params.get(
"codeGenerationModel", Llm.GPT_4_VISION.value "codeGenerationModel", Llm.GPT_4O_2024_05_13.value
) )
try: try:
code_generation_model = convert_frontend_str_to_llm(code_generation_model_str) code_generation_model = convert_frontend_str_to_llm(code_generation_model_str)
@ -112,6 +112,7 @@ async def stream_code(websocket: WebSocket):
if not openai_api_key and ( if not openai_api_key and (
code_generation_model == Llm.GPT_4_VISION code_generation_model == Llm.GPT_4_VISION
or code_generation_model == Llm.GPT_4_TURBO_2024_04_09 or code_generation_model == Llm.GPT_4_TURBO_2024_04_09
or code_generation_model == Llm.GPT_4O_2024_05_13
): ):
print("OpenAI API key not found") print("OpenAI API key not found")
await throw_error( await throw_error(

View File

@ -13,8 +13,8 @@ from evals.config import EVALS_DIR
from evals.core import generate_code_core from evals.core import generate_code_core
from evals.utils import image_to_data_url from evals.utils import image_to_data_url
STACK = "html_tailwind" STACK = "ionic_tailwind"
MODEL = Llm.GPT_4_TURBO_2024_04_09 MODEL = Llm.GPT_4O_2024_05_13
N = 1 # Number of outputs to generate N = 1 # Number of outputs to generate

View File

@ -63,7 +63,7 @@ function App() {
isImageGenerationEnabled: true, isImageGenerationEnabled: true,
editorTheme: EditorTheme.COBALT, editorTheme: EditorTheme.COBALT,
generatedCodeConfig: Stack.HTML_TAILWIND, generatedCodeConfig: Stack.HTML_TAILWIND,
codeGenerationModel: CodeGenerationModel.GPT_4_TURBO_2024_04_09, codeGenerationModel: CodeGenerationModel.GPT_4O_2024_05_13,
// Only relevant for hosted version // Only relevant for hosted version
isTermOfServiceAccepted: false, isTermOfServiceAccepted: false,
}, },

View File

@ -1,5 +1,7 @@
// Keep in sync with backend (llm.py) // Keep in sync with backend (llm.py)
// Order here matches dropdown order
export enum CodeGenerationModel { export enum CodeGenerationModel {
GPT_4O_2024_05_13 = "gpt-4o-2024-05-13",
GPT_4_TURBO_2024_04_09 = "gpt-4-turbo-2024-04-09", GPT_4_TURBO_2024_04_09 = "gpt-4-turbo-2024-04-09",
GPT_4_VISION = "gpt_4_vision", GPT_4_VISION = "gpt_4_vision",
CLAUDE_3_SONNET = "claude_3_sonnet", CLAUDE_3_SONNET = "claude_3_sonnet",
@ -9,6 +11,7 @@ export enum CodeGenerationModel {
export const CODE_GENERATION_MODEL_DESCRIPTIONS: { export const CODE_GENERATION_MODEL_DESCRIPTIONS: {
[key in CodeGenerationModel]: { name: string; inBeta: boolean }; [key in CodeGenerationModel]: { name: string; inBeta: boolean };
} = { } = {
"gpt-4o-2024-05-13": { name: "GPT-4O 🌟", inBeta: false },
"gpt-4-turbo-2024-04-09": { name: "GPT-4 Turbo (Apr 2024)", inBeta: false }, "gpt-4-turbo-2024-04-09": { name: "GPT-4 Turbo (Apr 2024)", inBeta: false },
gpt_4_vision: { name: "GPT-4 Vision (Nov 2023)", inBeta: false }, gpt_4_vision: { name: "GPT-4 Vision (Nov 2023)", inBeta: false },
claude_3_sonnet: { name: "Claude 3 Sonnet", inBeta: false }, claude_3_sonnet: { name: "Claude 3 Sonnet", inBeta: false },