Feat: OpenAI Base URL supported

This commit is contained in:
Nothing1024 2023-11-21 16:05:23 +08:00
parent b283d39d81
commit e69269d844
6 changed files with 43 additions and 9 deletions

View File

@ -5,8 +5,8 @@ from openai import AsyncOpenAI
from bs4 import BeautifulSoup from bs4 import BeautifulSoup
async def process_tasks(prompts, api_key): async def process_tasks(prompts, api_key, base_url):
tasks = [generate_image(prompt, api_key) for prompt in prompts] tasks = [generate_image(prompt, api_key, base_url) for prompt in prompts]
results = await asyncio.gather(*tasks, return_exceptions=True) results = await asyncio.gather(*tasks, return_exceptions=True)
processed_results = [] processed_results = []
@ -20,8 +20,8 @@ async def process_tasks(prompts, api_key):
return processed_results return processed_results
async def generate_image(prompt, api_key): async def generate_image(prompt, api_key, base_url):
client = AsyncOpenAI(api_key=api_key) client = AsyncOpenAI(api_key=api_key, base_url=base_url)
image_params = { image_params = {
"model": "dall-e-3", "model": "dall-e-3",
"quality": "standard", "quality": "standard",
@ -60,7 +60,7 @@ def create_alt_url_mapping(code):
return mapping return mapping
async def generate_images(code, api_key, image_cache): async def generate_images(code, api_key, base_url, image_cache):
# Find all images # Find all images
soup = BeautifulSoup(code, "html.parser") soup = BeautifulSoup(code, "html.parser")
images = soup.find_all("img") images = soup.find_all("img")
@ -87,7 +87,7 @@ async def generate_images(code, api_key, image_cache):
return code return code
# Generate images # Generate images
results = await process_tasks(prompts, api_key) results = await process_tasks(prompts, api_key, base_url)
# Create a dict mapping alt text to image URL # Create a dict mapping alt text to image URL
mapped_image_urls = dict(zip(prompts, results)) mapped_image_urls = dict(zip(prompts, results))

View File

@ -6,9 +6,9 @@ MODEL_GPT_4_VISION = "gpt-4-vision-preview"
async def stream_openai_response( async def stream_openai_response(
messages, api_key: str, callback: Callable[[str], Awaitable[None]] messages, api_key: str, base_url:str, callback: Callable[[str], Awaitable[None]]
): ):
client = AsyncOpenAI(api_key=api_key) client = AsyncOpenAI(api_key=api_key, base_url=base_url)
model = MODEL_GPT_4_VISION model = MODEL_GPT_4_VISION

View File

@ -73,6 +73,13 @@ async def stream_code_test(websocket: WebSocket):
openai_api_key = os.environ.get("OPENAI_API_KEY") openai_api_key = os.environ.get("OPENAI_API_KEY")
if openai_api_key: if openai_api_key:
print("Using OpenAI API key from environment variable") print("Using OpenAI API key from environment variable")
if params["openAiBaseURL"]:
openai_base_url = params["openAiBaseURL"]
print("Using OpenAI Base URL from client-side settings dialog")
else:
openai_base_url = os.environ.get("OPENAI_BASE_URL")
if openai_base_url:
print("Using OpenAI Base URL from environment variable")
if not openai_api_key: if not openai_api_key:
print("OpenAI API key not found") print("OpenAI API key not found")
@ -83,6 +90,11 @@ async def stream_code_test(websocket: WebSocket):
} }
) )
return return
# openai_base_url="https://flag.smarttrot.com/v1"
if not openai_base_url:
openai_base_url = None
print("Using Offical OpenAI Base URL")
should_generate_images = ( should_generate_images = (
params["isImageGenerationEnabled"] params["isImageGenerationEnabled"]
@ -117,6 +129,7 @@ async def stream_code_test(websocket: WebSocket):
completion = await stream_openai_response( completion = await stream_openai_response(
prompt_messages, prompt_messages,
api_key=openai_api_key, api_key=openai_api_key,
base_url = openai_base_url,
callback=lambda x: process_chunk(x), callback=lambda x: process_chunk(x),
) )
@ -129,7 +142,7 @@ async def stream_code_test(websocket: WebSocket):
{"type": "status", "value": "Generating images..."} {"type": "status", "value": "Generating images..."}
) )
updated_html = await generate_images( updated_html = await generate_images(
completion, api_key=openai_api_key, image_cache=image_cache completion, api_key=openai_api_key, base_url=openai_base_url, image_cache=image_cache
) )
else: else:
updated_html = completion updated_html = completion

View File

@ -37,6 +37,7 @@ function App() {
const [settings, setSettings] = usePersistedState<Settings>( const [settings, setSettings] = usePersistedState<Settings>(
{ {
openAiApiKey: null, openAiApiKey: null,
openAiBaseURL: null,
screenshotOneApiKey: null, screenshotOneApiKey: null,
isImageGenerationEnabled: true, isImageGenerationEnabled: true,
editorTheme: "cobalt", editorTheme: "cobalt",

View File

@ -76,6 +76,25 @@ function SettingsDialog({ settings, setSettings }: Props) {
} }
/> />
<Label htmlFor="openai-api-key">
<div>OpenAI Base URL</div>
<div className="font-light mt-2 leading-relaxed">
You can replace it for the third api server.
</div>
</Label>
<Input
id="openai-base-url"
placeholder="OpenAI Base Url"
value={settings.openAiBaseURL || ""}
onChange={(e) =>
setSettings((s) => ({
...s,
openAiBaseURL: e.target.value,
}))
}
/>
<Label htmlFor="screenshot-one-api-key"> <Label htmlFor="screenshot-one-api-key">
<div>ScreenshotOne API key</div> <div>ScreenshotOne API key</div>
<div className="font-light mt-2 leading-relaxed"> <div className="font-light mt-2 leading-relaxed">

View File

@ -1,5 +1,6 @@
export interface Settings { export interface Settings {
openAiApiKey: string | null; openAiApiKey: string | null;
openAiBaseURL: string | null;
screenshotOneApiKey: string | null; screenshotOneApiKey: string | null;
isImageGenerationEnabled: boolean; isImageGenerationEnabled: boolean;
editorTheme: string; editorTheme: string;