organize evals code into the evals dir
This commit is contained in:
parent
aff0ad0b91
commit
b8bce72d23
@ -1 +0,0 @@
|
||||
EVALS_DIR = "./evals"
|
||||
0
backend/evals/__init__.py
Normal file
0
backend/evals/__init__.py
Normal file
1
backend/evals/config.py
Normal file
1
backend/evals/config.py
Normal file
@ -0,0 +1 @@
|
||||
EVALS_DIR = "./evals_data"
|
||||
29
backend/evals/core.py
Normal file
29
backend/evals/core.py
Normal file
@ -0,0 +1,29 @@
|
||||
import os
|
||||
|
||||
from llm import stream_openai_response
|
||||
from prompts import assemble_prompt
|
||||
from prompts.types import Stack
|
||||
from utils import pprint_prompt
|
||||
|
||||
|
||||
async def generate_code_core(image_url: str, stack: Stack) -> str:
|
||||
prompt_messages = assemble_prompt(image_url, stack)
|
||||
openai_api_key = os.environ.get("OPENAI_API_KEY")
|
||||
openai_base_url = None
|
||||
|
||||
pprint_prompt(prompt_messages)
|
||||
|
||||
async def process_chunk(content: str):
|
||||
pass
|
||||
|
||||
if not openai_api_key:
|
||||
raise Exception("OpenAI API key not found")
|
||||
|
||||
completion = await stream_openai_response(
|
||||
prompt_messages,
|
||||
api_key=openai_api_key,
|
||||
base_url=openai_base_url,
|
||||
callback=lambda x: process_chunk(x),
|
||||
)
|
||||
|
||||
return completion
|
||||
@ -1,8 +1,8 @@
|
||||
import os
|
||||
from fastapi import APIRouter
|
||||
from pydantic import BaseModel
|
||||
from eval_utils import image_to_data_url
|
||||
from eval_config import EVALS_DIR
|
||||
from evals.utils import image_to_data_url
|
||||
from evals.config import EVALS_DIR
|
||||
|
||||
|
||||
router = APIRouter()
|
||||
|
||||
@ -1,42 +1,15 @@
|
||||
# Load environment variables first
|
||||
from typing import Any, Coroutine
|
||||
from dotenv import load_dotenv
|
||||
|
||||
from eval_config import EVALS_DIR
|
||||
from eval_utils import image_to_data_url
|
||||
from prompts.types import Stack
|
||||
|
||||
load_dotenv()
|
||||
|
||||
import os
|
||||
from llm import stream_openai_response
|
||||
from prompts import assemble_prompt
|
||||
from typing import Any, Coroutine
|
||||
import asyncio
|
||||
|
||||
from utils import pprint_prompt
|
||||
|
||||
|
||||
async def generate_code_core(image_url: str, stack: Stack) -> str:
|
||||
prompt_messages = assemble_prompt(image_url, stack)
|
||||
openai_api_key = os.environ.get("OPENAI_API_KEY")
|
||||
openai_base_url = None
|
||||
|
||||
pprint_prompt(prompt_messages)
|
||||
|
||||
async def process_chunk(content: str):
|
||||
pass
|
||||
|
||||
if not openai_api_key:
|
||||
raise Exception("OpenAI API key not found")
|
||||
|
||||
completion = await stream_openai_response(
|
||||
prompt_messages,
|
||||
api_key=openai_api_key,
|
||||
base_url=openai_base_url,
|
||||
callback=lambda x: process_chunk(x),
|
||||
)
|
||||
|
||||
return completion
|
||||
from evals.config import EVALS_DIR
|
||||
from evals.core import generate_code_core
|
||||
from evals.utils import image_to_data_url
|
||||
|
||||
|
||||
async def main():
|
||||
Loading…
Reference in New Issue
Block a user