diff --git a/Troubleshooting.md b/Troubleshooting.md
index 0704859..20fa815 100644
--- a/Troubleshooting.md
+++ b/Troubleshooting.md
@@ -1,4 +1,4 @@
-### Getting an OpenAI API key
+### Getting an OpenAI API key with GPT4-Vision model access
You don't need a ChatGPT Pro account. Screenshot to code uses API keys from your OpenAI developer account. In order to get access to the GPT4 Vision model, log into your OpenAI account and then, follow these instructions:
@@ -10,6 +10,7 @@ You don't need a ChatGPT Pro account. Screenshot to code uses API keys from your
5. Go to Settings > Limits and check at the bottom of the page, your current tier has to be "Tier 1" to have GPT4 access
+6. Go to Screenshot to code and paste it in the Settings dialog under OpenAI key (gear icon). Your key is only stored in your browser. Never stored on our servers.
Some users have also reported that it can take upto 30 minutes after your credit purchase for the GPT4 vision model to be activated.
diff --git a/backend/access_token.py b/backend/access_token.py
new file mode 100644
index 0000000..e61ef12
--- /dev/null
+++ b/backend/access_token.py
@@ -0,0 +1,27 @@
+import json
+import os
+import httpx
+
+
+async def validate_access_token(access_code: str):
+ async with httpx.AsyncClient() as client:
+ url = (
+ "https://backend.buildpicoapps.com/screenshot_to_code/validate_access_token"
+ )
+ data = json.dumps(
+ {
+ "access_code": access_code,
+ "secret": os.environ.get("PICO_BACKEND_SECRET"),
+ }
+ )
+ headers = {"Content-Type": "application/json"}
+
+ response = await client.post(url, content=data, headers=headers)
+ response_data = response.json()
+
+ if response_data["success"]:
+ print("Access token is valid.")
+ return True
+ else:
+ print(f"Access token validation failed: {response_data['failure_reason']}")
+ return False
diff --git a/backend/image_generation.py b/backend/image_generation.py
index 080334f..ad21772 100644
--- a/backend/image_generation.py
+++ b/backend/image_generation.py
@@ -5,8 +5,8 @@ from openai import AsyncOpenAI
from bs4 import BeautifulSoup
-async def process_tasks(prompts, api_key):
- tasks = [generate_image(prompt, api_key) for prompt in prompts]
+async def process_tasks(prompts, api_key, base_url):
+ tasks = [generate_image(prompt, api_key, base_url) for prompt in prompts]
results = await asyncio.gather(*tasks, return_exceptions=True)
processed_results = []
@@ -20,8 +20,8 @@ async def process_tasks(prompts, api_key):
return processed_results
-async def generate_image(prompt, api_key):
- client = AsyncOpenAI(api_key=api_key)
+async def generate_image(prompt, api_key, base_url):
+ client = AsyncOpenAI(api_key=api_key, base_url=base_url)
image_params = {
"model": "dall-e-3",
"quality": "standard",
@@ -60,7 +60,7 @@ def create_alt_url_mapping(code):
return mapping
-async def generate_images(code, api_key, image_cache):
+async def generate_images(code, api_key, base_url, image_cache):
# Find all images
soup = BeautifulSoup(code, "html.parser")
images = soup.find_all("img")
@@ -87,7 +87,7 @@ async def generate_images(code, api_key, image_cache):
return code
# Generate images
- results = await process_tasks(prompts, api_key)
+ results = await process_tasks(prompts, api_key, base_url)
# Create a dict mapping alt text to image URL
mapped_image_urls = dict(zip(prompts, results))
diff --git a/backend/llm.py b/backend/llm.py
index b52c3c9..fdb1ba0 100644
--- a/backend/llm.py
+++ b/backend/llm.py
@@ -6,9 +6,12 @@ MODEL_GPT_4_VISION = "gpt-4-vision-preview"
async def stream_openai_response(
- messages, api_key: str, callback: Callable[[str], Awaitable[None]]
+ messages,
+ api_key: str,
+ base_url: str | None,
+ callback: Callable[[str], Awaitable[None]],
):
- client = AsyncOpenAI(api_key=api_key)
+ client = AsyncOpenAI(api_key=api_key, base_url=base_url)
model = MODEL_GPT_4_VISION
diff --git a/backend/main.py b/backend/main.py
index b8e3f4f..2808a6d 100644
--- a/backend/main.py
+++ b/backend/main.py
@@ -1,6 +1,5 @@
# Load environment variables first
from dotenv import load_dotenv
-from pydantic import BaseModel
load_dotenv()
@@ -16,6 +15,7 @@ from mock import mock_completion
from image_generation import create_alt_url_mapping, generate_images
from prompts import assemble_prompt
from routes import screenshot
+from access_token import validate_access_token
app = FastAPI(openapi_url=None, docs_url=None, redoc_url=None)
@@ -81,13 +81,27 @@ async def stream_code(websocket: WebSocket):
# Get the OpenAI API key from the request. Fall back to environment variable if not provided.
# If neither is provided, we throw an error.
- if params["openAiApiKey"]:
- openai_api_key = params["openAiApiKey"]
- print("Using OpenAI API key from client-side settings dialog")
+ openai_api_key = None
+ if "accessCode" in params and params["accessCode"]:
+ print("Access code - using platform API key")
+ if await validate_access_token(params["accessCode"]):
+ openai_api_key = os.environ.get("PLATFORM_OPENAI_API_KEY")
+ else:
+ await websocket.send_json(
+ {
+ "type": "error",
+ "value": "Invalid access code or you're out of credits. Please try again.",
+ }
+ )
+ return
else:
- openai_api_key = os.environ.get("OPENAI_API_KEY")
- if openai_api_key:
- print("Using OpenAI API key from environment variable")
+ if params["openAiApiKey"]:
+ openai_api_key = params["openAiApiKey"]
+ print("Using OpenAI API key from client-side settings dialog")
+ else:
+ openai_api_key = os.environ.get("OPENAI_API_KEY")
+ if openai_api_key:
+ print("Using OpenAI API key from environment variable")
if not openai_api_key:
print("OpenAI API key not found")
@@ -99,6 +113,22 @@ async def stream_code(websocket: WebSocket):
)
return
+ # Get the OpenAI Base URL from the request. Fall back to environment variable if not provided.
+ openai_base_url = None
+ # Disable user-specified OpenAI Base URL in prod
+ if not os.environ.get("IS_PROD"):
+ if "openAiBaseURL" in params and params["openAiBaseURL"]:
+ openai_base_url = params["openAiBaseURL"]
+ print("Using OpenAI Base URL from client-side settings dialog")
+ else:
+ openai_base_url = os.environ.get("OPENAI_BASE_URL")
+ if openai_base_url:
+ print("Using OpenAI Base URL from environment variable")
+
+ if not openai_base_url:
+ print("Using official OpenAI URL")
+
+ # Get the image generation flag from the request. Fall back to True if not provided.
should_generate_images = (
params["isImageGenerationEnabled"]
if "isImageGenerationEnabled" in params
@@ -137,6 +167,7 @@ async def stream_code(websocket: WebSocket):
completion = await stream_openai_response(
prompt_messages,
api_key=openai_api_key,
+ base_url=openai_base_url,
callback=lambda x: process_chunk(x),
)
@@ -149,7 +180,10 @@ async def stream_code(websocket: WebSocket):
{"type": "status", "value": "Generating images..."}
)
updated_html = await generate_images(
- completion, api_key=openai_api_key, image_cache=image_cache
+ completion,
+ api_key=openai_api_key,
+ base_url=openai_base_url,
+ image_cache=image_cache,
)
else:
updated_html = completion
diff --git a/frontend/index.html b/frontend/index.html
index f747064..2a7fa0e 100644
--- a/frontend/index.html
+++ b/frontend/index.html
@@ -21,6 +21,34 @@
<%- injectHead %>
- To use Screenshot to Code, you need an OpenAI API key with GPT4 vision
- access.{" "}
+ To use Screenshot to Code,{" "}
+
+ buy some credits (100 generations for $36)
+ {" "}
+ or use your own OpenAI API key with GPT4 vision access.{" "}
Follow these instructions to get yourself a key.
{" "}
- Then, paste it in the Settings dialog (gear icon above).
-
-
- Your key is only stored in your browser. Never stored on our servers. If
- you prefer, you can also run this app completely locally.{" "}
-
- See the Github project for instructions.
-
+ and paste it in the Settings dialog (gear icon above). Your key is only
+ stored in your browser. Never stored on our servers.
);
diff --git a/frontend/src/components/OutputSettingsSection.tsx b/frontend/src/components/OutputSettingsSection.tsx
index 8f4200a..9b73405 100644
--- a/frontend/src/components/OutputSettingsSection.tsx
+++ b/frontend/src/components/OutputSettingsSection.tsx
@@ -5,16 +5,18 @@ import {
SelectItem,
SelectTrigger,
} from "./ui/select";
-import { CSSOption, UIComponentOption, JSFrameworkOption, OutputSettings } from "../types";
import {
- Accordion,
- AccordionContent,
- AccordionItem,
- AccordionTrigger,
-} from "./ui/accordion";
+ CSSOption,
+ UIComponentOption,
+ JSFrameworkOption,
+ OutputSettings,
+} from "../types";
import { capitalize } from "../lib/utils";
import toast from "react-hot-toast";
import { useEffect } from "react";
+import { Label } from "@radix-ui/react-label";
+import { Button } from "./ui/button";
+import { Popover, PopoverTrigger, PopoverContent } from "./ui/popover";
function displayCSSOption(option: CSSOption) {
switch (option) {
@@ -27,6 +29,17 @@ function displayCSSOption(option: CSSOption) {
}
}
+function displayJSOption(option: JSFrameworkOption) {
+ switch (option) {
+ case JSFrameworkOption.REACT:
+ return "React";
+ case JSFrameworkOption.NO_FRAMEWORK:
+ return "No Framework";
+ default:
+ return option;
+ }
+}
+
function convertStringToCSSOption(option: string) {
switch (option) {
case "tailwind":
@@ -38,24 +51,63 @@ function convertStringToCSSOption(option: string) {
}
}
+function generateDisplayString(settings: OutputSettings) {
+ if (
+ settings.js === JSFrameworkOption.REACT &&
+ settings.css === CSSOption.TAILWIND
+ ) {
+ return (
+