diff --git a/.dockerignore b/.dockerignore deleted file mode 100644 index 28928c33f309c4ef4a8c5c1fc4ac4ea2e38fdeb5..0000000000000000000000000000000000000000 --- a/.dockerignore +++ /dev/null @@ -1,67 +0,0 @@ -# Python -__pycache__/ -*.py[cod] -*$py.class -*.so -.Python -venv/ -.venv/ -ENV/ -env/ -*.egg-info/ -dist/ -build/ - -# Node -node_modules/ -npm-debug.log* -yarn-debug.log* -yarn-error.log* -.pnpm-debug.log* - -# Next.js -frontend/.next/ -frontend/out/ -frontend/build/ - -# Git -.git/ -.gitignore - -# IDE -.vscode/ -.idea/ -*.swp -*.swo -*~ - -# OS -.DS_Store -Thumbs.db - -# Documentation -*.md -!README.md - -# Docker -Dockerfile* -docker-compose*.yml -.dockerignore - -# Logs -*.log -logs/ -log/ - -# Generated -generated_projects/ - -# Tests -test/ -tests/ -__tests__/ - -# Lock files (will be regenerated) -uv.lock -poetry.lock - diff --git a/.gitattributes b/.gitattributes index 1abc6d15eb80582bc2bc48e8b790dd790b00d25b..a6344aac8c09253b3b630fb776ae94478aa0275b 100644 --- a/.gitattributes +++ b/.gitattributes @@ -1 +1,35 @@ -Animated_Logo_Video_Ready.gif filter=lfs diff=lfs merge=lfs -text +*.7z filter=lfs diff=lfs merge=lfs -text +*.arrow filter=lfs diff=lfs merge=lfs -text +*.bin filter=lfs diff=lfs merge=lfs -text +*.bz2 filter=lfs diff=lfs merge=lfs -text +*.ckpt filter=lfs diff=lfs merge=lfs -text +*.ftz filter=lfs diff=lfs merge=lfs -text +*.gz filter=lfs diff=lfs merge=lfs -text +*.h5 filter=lfs diff=lfs merge=lfs -text +*.joblib filter=lfs diff=lfs merge=lfs -text +*.lfs.* filter=lfs diff=lfs merge=lfs -text +*.mlmodel filter=lfs diff=lfs merge=lfs -text +*.model filter=lfs diff=lfs merge=lfs -text +*.msgpack filter=lfs diff=lfs merge=lfs -text +*.npy filter=lfs diff=lfs merge=lfs -text +*.npz filter=lfs diff=lfs merge=lfs -text +*.onnx filter=lfs diff=lfs merge=lfs -text +*.ot filter=lfs diff=lfs merge=lfs -text +*.parquet filter=lfs diff=lfs merge=lfs -text +*.pb filter=lfs diff=lfs merge=lfs -text +*.pickle filter=lfs diff=lfs merge=lfs -text +*.pkl filter=lfs diff=lfs merge=lfs -text +*.pt filter=lfs diff=lfs merge=lfs -text +*.pth filter=lfs diff=lfs merge=lfs -text +*.rar filter=lfs diff=lfs merge=lfs -text +*.safetensors filter=lfs diff=lfs merge=lfs -text +saved_model/**/* filter=lfs diff=lfs merge=lfs -text +*.tar.* filter=lfs diff=lfs merge=lfs -text +*.tar filter=lfs diff=lfs merge=lfs -text +*.tflite filter=lfs diff=lfs merge=lfs -text +*.tgz filter=lfs diff=lfs merge=lfs -text +*.wasm filter=lfs diff=lfs merge=lfs -text +*.xz filter=lfs diff=lfs merge=lfs -text +*.zip filter=lfs diff=lfs merge=lfs -text +*.zst filter=lfs diff=lfs merge=lfs -text +*tfevents* filter=lfs diff=lfs merge=lfs -text diff --git a/.gitignore b/.gitignore index d417b3f614c560cff6191af108ba3d3da38ad8e4..0f25b866ecf0fddab1b5836b418c1966b98cb00f 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,5 @@ +.gradio/ + # Byte-compiled / optimized / DLL files __pycache__/ *.py[cod] @@ -14,26 +16,21 @@ dist/ downloads/ eggs/ .eggs/ -# Ignore Python lib directories but NOT frontend/src/lib -/lib/ -/lib64/ -venv/lib/ -venv/lib64/ +lib/ +lib64/ parts/ sdist/ var/ +wheels/ +share/python-wheels/ *.egg-info/ .installed.cfg *.egg MANIFEST -# Virtual environments -venv/ -env/ -ENV/ -.venv/ - # PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. *.manifest *.spec @@ -51,44 +48,115 @@ htmlcov/ nosetests.xml coverage.xml *.cover +*.py,cover .hypothesis/ .pytest_cache/ +cover/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 +db.sqlite3-journal + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +.pybuilder/ +target/ # Jupyter Notebook .ipynb_checkpoints +# IPython +profile_default/ +ipython_config.py + # pyenv -.python-version +# For a library or package, you might want to ignore these files since the code is +# intended to run in multiple environments; otherwise, check them in: +# .python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# poetry +# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. +# This is especially recommended for binary packages to ensure reproducibility, and is more +# commonly ignored for libraries. +# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control +#poetry.lock + +# pdm +# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. +#pdm.lock +# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it +# in version control. +# https://pdm.fming.dev/#use-with-ide +.pdm.toml + +# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm +__pypackages__/ + +# Celery stuff +celerybeat-schedule +celerybeat.pid + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site # mypy .mypy_cache/ .dmypy.json +dmypy.json # Pyre type checker .pyre/ -# Gradio cache -log/ -logs/ - -# Documentation cache files (backend) -.backend_gradio_docs_cache.txt -.backend_gradio_docs_last_update.txt -.gradio_docs_cache.txt -.gradio_docs_last_update.txt -.comfyui_docs_cache.txt -.comfyui_docs_last_update.txt -.fastrtc_docs_cache.txt -.fastrtc_docs_last_update.txt - -# System files -.DS_Store -Thumbs.db - -# Lock files -uv.lock -poetry.lock -Pipfile.lock - -# VSCode -.vscode/ \ No newline at end of file +# pytype static type analyzer +.pytype/ + +# Cython debug symbols +cython_debug/ + +# PyCharm +# JetBrains specific template is maintained in a separate JetBrains.gitignore that can +# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore +# and can be added to the global gitignore or merged into this file. For a more nuclear +# option (not recommended) you can uncomment the following to ignore the entire idea folder. +#.idea/ diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..4bc0c623a659d38b912b9efd8443b18bc4922d49 --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,32 @@ +repos: + - repo: https://github.com/pre-commit/pre-commit-hooks + rev: v5.0.0 + hooks: + - id: check-executables-have-shebangs + - id: check-json + - id: check-merge-conflict + - id: check-shebang-scripts-are-executable + - id: check-toml + - id: check-yaml + - id: end-of-file-fixer + - id: mixed-line-ending + args: ["--fix=lf"] + - id: requirements-txt-fixer + - id: trailing-whitespace + - repo: https://github.com/astral-sh/ruff-pre-commit + rev: v0.8.6 + hooks: + - id: ruff + args: ["--fix"] + - repo: https://github.com/pre-commit/mirrors-mypy + rev: v1.14.1 + hooks: + - id: mypy + args: ["--ignore-missing-imports"] + additional_dependencies: + [ + "types-python-slugify", + "types-requests", + "types-PyYAML", + "types-pytz", + ] diff --git a/.python-version b/.python-version new file mode 100644 index 0000000000000000000000000000000000000000..c8cfe3959183f8e9a50f83f54cd723f2dc9c252d --- /dev/null +++ b/.python-version @@ -0,0 +1 @@ +3.10 diff --git a/.vscode/extensions.json b/.vscode/extensions.json new file mode 100644 index 0000000000000000000000000000000000000000..854d34838a8695341a86e1cfa239be0014098058 --- /dev/null +++ b/.vscode/extensions.json @@ -0,0 +1,8 @@ +{ + "recommendations": [ + "ms-python.python", + "charliermarsh.ruff", + "streetsidesoftware.code-spell-checker", + "tamasfe.even-better-toml" + ] +} diff --git a/.vscode/settings.json b/.vscode/settings.json new file mode 100644 index 0000000000000000000000000000000000000000..6b1ad68d2449998e1085249c2f4828e886ff47e5 --- /dev/null +++ b/.vscode/settings.json @@ -0,0 +1,16 @@ +{ + "editor.formatOnSave": true, + "files.insertFinalNewline": false, + "[python]": { + "editor.defaultFormatter": "charliermarsh.ruff", + "editor.formatOnType": true, + "editor.codeActionsOnSave": { + "source.fixAll.ruff": "explicit" + } + }, + "[jupyter]": { + "files.insertFinalNewline": false + }, + "notebook.output.scrolling": true, + "notebook.formatOnSave.enabled": true +} diff --git a/Dockerfile b/Dockerfile deleted file mode 100644 index 5a770c486ff435366b475dc6743ade5056af3d67..0000000000000000000000000000000000000000 --- a/Dockerfile +++ /dev/null @@ -1,105 +0,0 @@ -# Multi-stage build for AnyCoder Docker Space - -# Stage 1: Build frontend -FROM node:22-slim AS frontend-builder - -WORKDIR /build - -# Copy frontend package files -COPY frontend/package*.json ./ -RUN npm ci - -# Copy all frontend source files and configs -COPY frontend/src ./src -COPY frontend/public ./public -COPY frontend/next.config.js ./ -COPY frontend/tsconfig.json ./ -COPY frontend/tailwind.config.js ./ -COPY frontend/postcss.config.js ./ -# Note: next-env.d.ts is auto-generated by Next.js, not needed for build - -# Build frontend -RUN npm run build - -# Stage 2: Production image -FROM python:3.11-slim - -# Install system dependencies as root (git for pip, nodejs for frontend) -# Install Node.js 22 from NodeSource (Debian repo only has v18) -RUN apt-get update && \ - apt-get install -y --no-install-recommends curl ca-certificates gnupg git && \ - curl -fsSL https://deb.nodesource.com/setup_22.x | bash - && \ - apt-get install -y --no-install-recommends nodejs && \ - rm -rf /var/lib/apt/lists/* - -# Set up a new user named "user" with user ID 1000 -RUN useradd -m -u 1000 user - -# Switch to the "user" user -USER user - -# Set home to the user's home directory -ENV HOME=/home/user \ - PATH=/home/user/.local/bin:$PATH \ - PYTHONUNBUFFERED=1 - -# Set the working directory to the user's home directory -WORKDIR $HOME/app - -# Copy Python requirements and install dependencies -COPY --chown=user:user requirements.txt . -RUN pip install --no-cache-dir --upgrade pip && \ - pip install --no-cache-dir -r requirements.txt - -# Copy application code -COPY --chown=user:user backend_api.py . -COPY --chown=user:user backend_models.py . -COPY --chown=user:user backend_docs_manager.py . -COPY --chown=user:user backend_prompts.py . -COPY --chown=user:user backend_parsers.py . -COPY --chown=user:user backend_deploy.py . -COPY --chown=user:user backend_search_replace.py . -COPY --chown=user:user project_importer.py . - -# Copy built frontend from builder stage -COPY --chown=user:user --from=frontend-builder /build/.next ./frontend/.next -COPY --chown=user:user --from=frontend-builder /build/public ./frontend/public -COPY --chown=user:user --from=frontend-builder /build/package*.json ./frontend/ -COPY --chown=user:user --from=frontend-builder /build/next.config.js ./frontend/ -COPY --chown=user:user --from=frontend-builder /build/node_modules ./frontend/node_modules - -# Set environment variables for the application -# BACKEND_HOST is used by Next.js server for proxying -# Do NOT set NEXT_PUBLIC_API_URL - let frontend use relative URLs -ENV BACKEND_HOST=http://localhost:8000 \ - PORT=7860 - -# Create startup script that runs both services -# Backend on 8000, Frontend on 7860 (exposed port) -RUN echo '#!/bin/bash\n\ -set -e\n\ -\n\ -echo "🚀 Starting AnyCoder Docker Space..."\n\ -\n\ -# Start backend on port 8000 in background\n\ -echo "📡 Starting FastAPI backend on port 8000..."\n\ -cd $HOME/app\n\ -uvicorn backend_api:app --host 0.0.0.0 --port 8000 &\n\ -BACKEND_PID=$!\n\ -\n\ -# Wait for backend to be ready\n\ -echo "⏳ Waiting for backend to start..."\n\ -sleep 5\n\ -\n\ -# Start frontend on port 7860 (HF Spaces exposed port)\n\ -echo "🎨 Starting Next.js frontend on port 7860..."\n\ -cd $HOME/app/frontend\n\ -PORT=7860 BACKEND_HOST=http://localhost:8000 npm start\n\ -' > $HOME/app/start.sh && chmod +x $HOME/app/start.sh - -# Expose port 7860 (HF Spaces default) -EXPOSE 7860 - -# Run the startup script -CMD ["./start.sh"] - diff --git a/README.md b/README.md index 18f1158361563a06d6ae639a6db590aacc9e0474..282f3700195e90b2e5a2446f8d96915eeb7cd3b9 100644 --- a/README.md +++ b/README.md @@ -1,137 +1,13 @@ --- -title: AnyCoder -emoji: 🏆 -colorFrom: blue -colorTo: purple -sdk: docker -app_port: 7860 +title: Anycoder +emoji: 🏢 +colorFrom: indigo +colorTo: indigo +sdk: gradio +sdk_version: 5.23.3 +app_file: app.py pinned: false -disable_embedding: false -hf_oauth: true -hf_oauth_expiration_minutes: 43200 -hf_oauth_scopes: -- manage-repos -- write-discussions +disable_embedding: true --- - -# AnyCoder - AI Code Generator with React Frontend - -AnyCoder is a full-stack AI-powered code generator with a modern React/TypeScript frontend and FastAPI backend. Generate applications by describing them in plain English, with support for multiple AI models and one-click deployment to Hugging Face Spaces. - -## 🎨 Features - -- **Modern React UI**: Apple-inspired design with VS Code layout -- **Real-time Streaming**: Server-Sent Events for live code generation -- **Multi-Model Support**: MiniMax M2, DeepSeek V3, and more via HuggingFace InferenceClient -- **Multiple Languages**: HTML, Gradio, Streamlit, React, Transformers.js, ComfyUI -- **Authentication**: HuggingFace OAuth + Dev mode for local testing -- **One-Click Deployment**: Deploy generated apps directly to HF Spaces - -## 🏗️ Architecture - -``` -anycoder/ -├── backend_api.py # FastAPI backend with streaming -├── frontend/ # Next.js React frontend -│ ├── src/ -│ │ ├── app/ # Pages (page.tsx, layout.tsx, globals.css) -│ │ ├── components/ # React components -│ │ ├── lib/ # API client, auth utilities -│ │ └── types/ # TypeScript types -│ └── package.json -├── requirements.txt # Python dependencies -├── Dockerfile # Docker Space configuration -└── start_fullstack.sh # Local development script -``` - -## 🚀 Quick Start - -### Local Development - -1. **Backend**: -```bash -export HF_TOKEN="your_huggingface_token" -export GEMINI_API_KEY="your_gemini_api_key" -python backend_api.py -``` - -2. **Frontend** (new terminal): -```bash -cd frontend -npm install -npm run dev -``` - -3. Open `http://localhost:3000` - -### Using start script: -```bash -export HF_TOKEN="your_token" -export GEMINI_API_KEY="your_gemini_api_key" -./start_fullstack.sh -``` - -## 🐳 Docker Space Deployment - -This app runs as a Docker Space on HuggingFace. The Dockerfile: -- Builds the Next.js frontend -- Runs FastAPI backend on port 7860 -- Uses proper user permissions (UID 1000) -- Handles environment variables securely - -## 🔑 Authentication - -- **Dev Mode** (localhost): Mock login for testing -- **Production**: HuggingFace OAuth with manage-repos scope - -## 📝 Supported Languages - -- `html` - Static HTML pages -- `gradio` - Python Gradio apps -- `streamlit` - Python Streamlit apps -- `react` - React/Next.js apps -- `transformers.js` - Browser ML apps -- `comfyui` - ComfyUI workflows - -## 🤖 Available Models - -- **Gemini 3 Pro Preview** (Default) - Google's latest with deep thinking & Google Search -- MiniMax M2 (via HF router with Novita) -- DeepSeek V3/V3.1 -- DeepSeek R1 -- And more via HuggingFace InferenceClient - -## 🎯 Usage - -1. Sign in with HuggingFace (or use Dev Login locally) -2. Select a language and AI model -3. Describe your app in the chat -4. Watch code generate in real-time -5. Click **🚀 Deploy** to publish to HF Spaces - -## 🛠️ Environment Variables - -- `HF_TOKEN` - HuggingFace API token (required) -- `GEMINI_API_KEY` - Google Gemini API key (required for Gemini 3 Pro Preview) -- `POE_API_KEY` - Poe API key (optional, for GPT-5 and Claude models) -- `DASHSCOPE_API_KEY` - DashScope API key (optional, for Qwen models) -- `OPENROUTER_API_KEY` - OpenRouter API key (optional, for Sherlock models) -- `MISTRAL_API_KEY` - Mistral API key (optional, for Mistral models) - -## 📦 Tech Stack - -**Frontend:** -- Next.js 14 -- TypeScript -- Tailwind CSS -- Monaco Editor - -**Backend:** -- FastAPI -- HuggingFace Hub -- Server-Sent Events (SSE) - -## 📄 License - -MIT \ No newline at end of file +Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference \ No newline at end of file diff --git a/app.py b/app.py new file mode 100644 index 0000000000000000000000000000000000000000..7017c18c09b63735c96780a229fed2c8da9329c9 --- /dev/null +++ b/app.py @@ -0,0 +1,54 @@ +from app_huggingface import demo as demo_huggingface +from app_gemini_coder import demo as demo_gemini +from utils import get_app +import gradio as gr + +# Create mapping of providers to their code snippets +PROVIDER_SNIPPETS = { + "Hugging Face": """ +import gradio as gr +import ai_gradio +gr.load( + name='huggingface:deepseek-ai/DeepSeek-R1', + src=ai_gradio.registry, + coder=True, + provider="together" +).launch()""", + "Gemini Coder": """ +import gradio as gr +import ai_gradio +gr.load( + name='gemini:gemini-2.5-pro-exp-03-25', + src=ai_gradio.registry, + coder=True, + provider="together" +).launch() + """, +} +# Create mapping of providers to their demos +PROVIDERS = { + "Hugging Face": demo_huggingface, + "Gemini Coder": demo_gemini, +} + +# Modified get_app implementation +demo = gr.Blocks() +with demo: + + provider_dropdown = gr.Dropdown(choices=list(PROVIDERS.keys()), value="Hugging Face", label="Select code snippet") + code_display = gr.Code(label="Provider Code Snippet", language="python", value=PROVIDER_SNIPPETS["Hugging Face"]) + + def update_code(provider): + return PROVIDER_SNIPPETS.get(provider, "Code snippet not available") + + provider_dropdown.change(fn=update_code, inputs=[provider_dropdown], outputs=[code_display]) + + selected_demo = get_app( + models=list(PROVIDERS.keys()), + default_model="Hugging Face", + src=PROVIDERS, + dropdown_label="Select Provider", + ) + +if __name__ == "__main__": + demo.queue(api_open=False).launch(show_api=False) diff --git a/app_allenai.py b/app_allenai.py new file mode 100644 index 0000000000000000000000000000000000000000..d80373412f44ae2813f75ff4bfd863791f463a10 --- /dev/null +++ b/app_allenai.py @@ -0,0 +1,67 @@ +import gradio as gr +from gradio_client import Client + +MODELS = {"OLMo-2-1124-13B-Instruct": "akhaliq/olmo-anychat", "Llama-3.1-Tulu-3-8B": "akhaliq/allen-test"} + + +def create_chat_fn(client): + def chat(message, history): + response = client.predict( + message=message, + system_prompt="You are a helpful AI assistant.", + temperature=0.7, + max_new_tokens=1024, + top_k=40, + repetition_penalty=1.1, + top_p=0.95, + api_name="/chat", + ) + return response + + return chat + + +def set_client_for_session(model_name, request: gr.Request): + headers = {} + if request and hasattr(request, "request") and hasattr(request.request, "headers"): + x_ip_token = request.request.headers.get("x-ip-token") + if x_ip_token: + headers["X-IP-Token"] = x_ip_token + + return Client(MODELS[model_name], headers=headers) + + +def safe_chat_fn(message, history, client): + if client is None: + return "Error: Client not initialized. Please refresh the page." + return create_chat_fn(client)(message, history) + + +with gr.Blocks() as demo: + client = gr.State() + + model_dropdown = gr.Dropdown( + choices=list(MODELS.keys()), value="OLMo-2-1124-13B-Instruct", label="Select Model", interactive=True + ) + + chat_interface = gr.ChatInterface(fn=safe_chat_fn, additional_inputs=[client]) + + # Update client when model changes + def update_model(model_name, request): + return set_client_for_session(model_name, request) + + model_dropdown.change( + fn=update_model, + inputs=[model_dropdown], + outputs=[client], + ) + + # Initialize client on page load + demo.load( + fn=set_client_for_session, + inputs=gr.State("OLMo-2-1124-13B-Instruct"), + outputs=client, + ) + +if __name__ == "__main__": + demo.launch() diff --git a/app_cerebras.py b/app_cerebras.py new file mode 100644 index 0000000000000000000000000000000000000000..8765b1b25553d8c3b6ac941a9b5aee837700620d --- /dev/null +++ b/app_cerebras.py @@ -0,0 +1,19 @@ +import os + +import cerebras_gradio + +from utils import get_app + +demo = get_app( + models=[ + "llama3.1-8b", + "llama3.1-70b", + "llama3.1-405b", + ], + default_model="llama3.1-70b", + src=cerebras_gradio.registry, + accept_token=not os.getenv("CEREBRAS_API_KEY"), +) + +if __name__ == "__main__": + demo.launch() diff --git a/app_claude.py b/app_claude.py new file mode 100644 index 0000000000000000000000000000000000000000..92d31efe49a69bdbd2aec422326dbd5e83fe8314 --- /dev/null +++ b/app_claude.py @@ -0,0 +1,21 @@ +import os + +import anthropic_gradio + +from utils import get_app + +demo = get_app( + models=[ + "claude-3-5-sonnet-20241022", + "claude-3-5-haiku-20241022", + "claude-3-opus-20240229", + "claude-3-sonnet-20240229", + "claude-3-haiku-20240307", + ], + default_model="claude-3-5-sonnet-20241022", + src=anthropic_gradio.registry, + accept_token=not os.getenv("ANTHROPIC_API_KEY"), +) + +if __name__ == "__main__": + demo.launch() diff --git a/app_cohere.py b/app_cohere.py new file mode 100644 index 0000000000000000000000000000000000000000..412f1a2b87159f880a3077f6e382b0be8ea7d4be --- /dev/null +++ b/app_cohere.py @@ -0,0 +1,21 @@ +import os + +import cohere_gradio + +from utils import get_app + +demo = get_app( + models=[ + "command-r", + "command-r-08-2024", + "command-r-plus", + "command-r-plus-08-2024", + "command-r7b-12-2024", + ], + default_model="command-r7b-12-2024", + src=cohere_gradio.registry, + accept_token=not os.getenv("COHERE_API_KEY"), +) + +if __name__ == "__main__": + demo.launch() diff --git a/app_compare.py b/app_compare.py new file mode 100644 index 0000000000000000000000000000000000000000..ac5650bfa875563c5bf525b596174f4225d2f585 --- /dev/null +++ b/app_compare.py @@ -0,0 +1,210 @@ +import os + +import google.generativeai as genai +import gradio as gr +import openai +from anthropic import Anthropic +from openai import OpenAI # Add explicit OpenAI import + + +def get_all_models(): + """Get all available models from the registries.""" + return [ + "SambaNova: Meta-Llama-3.2-1B-Instruct", + "SambaNova: Meta-Llama-3.2-3B-Instruct", + "SambaNova: Llama-3.2-11B-Vision-Instruct", + "SambaNova: Llama-3.2-90B-Vision-Instruct", + "SambaNova: Meta-Llama-3.1-8B-Instruct", + "SambaNova: Meta-Llama-3.1-70B-Instruct", + "SambaNova: Meta-Llama-3.1-405B-Instruct", + "Hyperbolic: Qwen/Qwen2.5-Coder-32B-Instruct", + "Hyperbolic: meta-llama/Llama-3.2-3B-Instruct", + "Hyperbolic: meta-llama/Meta-Llama-3.1-8B-Instruct", + "Hyperbolic: meta-llama/Meta-Llama-3.1-70B-Instruct", + "Hyperbolic: meta-llama/Meta-Llama-3-70B-Instruct", + "Hyperbolic: NousResearch/Hermes-3-Llama-3.1-70B", + "Hyperbolic: Qwen/Qwen2.5-72B-Instruct", + "Hyperbolic: deepseek-ai/DeepSeek-V2.5", + "Hyperbolic: meta-llama/Meta-Llama-3.1-405B-Instruct", + ] + + +def generate_discussion_prompt(original_question: str, previous_responses: list[str]) -> str: + """Generate a prompt for models to discuss and build upon previous + responses. + """ + prompt = f"""You are participating in a multi-AI discussion about this question: "{original_question}" + +Previous responses from other AI models: +{chr(10).join(f"- {response}" for response in previous_responses)} + +Please provide your perspective while: +1. Acknowledging key insights from previous responses +2. Adding any missing important points +3. Respectfully noting if you disagree with anything and explaining why +4. Building towards a complete answer + +Keep your response focused and concise (max 3-4 paragraphs).""" + return prompt + + +def generate_consensus_prompt(original_question: str, discussion_history: list[str]) -> str: + """Generate a prompt for final consensus building.""" + return f"""Review this multi-AI discussion about: "{original_question}" + +Discussion history: +{chr(10).join(discussion_history)} + +As a final synthesizer, please: +1. Identify the key points where all models agreed +2. Explain how any disagreements were resolved +3. Present a clear, unified answer that represents our collective best understanding +4. Note any remaining uncertainties or caveats + +Keep the final consensus concise but complete.""" + + +def chat_with_openai(model: str, messages: list[dict], api_key: str | None) -> str: + import openai + + client = openai.OpenAI(api_key=api_key) + response = client.chat.completions.create(model=model, messages=messages) + return response.choices[0].message.content + + +def chat_with_anthropic(messages: list[dict], api_key: str | None) -> str: + """Chat with Anthropic's Claude model.""" + client = Anthropic(api_key=api_key) + response = client.messages.create(model="claude-3-sonnet-20240229", messages=messages, max_tokens=1024) + return response.content[0].text + + +def chat_with_gemini(messages: list[dict], api_key: str | None) -> str: + """Chat with Gemini Pro model.""" + genai.configure(api_key=api_key) + model = genai.GenerativeModel("gemini-pro") + + # Convert messages to Gemini format + gemini_messages = [] + for msg in messages: + role = "user" if msg["role"] == "user" else "model" + gemini_messages.append({"role": role, "parts": [msg["content"]]}) + + response = model.generate_content([m["parts"][0] for m in gemini_messages]) + return response.text + + +def chat_with_sambanova( + messages: list[dict], api_key: str | None, model_name: str = "Llama-3.2-90B-Vision-Instruct" +) -> str: + """Chat with SambaNova's models using their OpenAI-compatible API.""" + client = openai.OpenAI( + api_key=api_key, + base_url="https://api.sambanova.ai/v1", + ) + + response = client.chat.completions.create( + model=model_name, + messages=messages, + temperature=0.1, + top_p=0.1, # Use the specific model name passed in + ) + return response.choices[0].message.content + + +def chat_with_hyperbolic( + messages: list[dict], api_key: str | None, model_name: str = "Qwen/Qwen2.5-Coder-32B-Instruct" +) -> str: + """Chat with Hyperbolic's models using their OpenAI-compatible API.""" + client = OpenAI(api_key=api_key, base_url="https://api.hyperbolic.xyz/v1") + + # Add system message to the start of the messages list + full_messages = [ + {"role": "system", "content": "You are a helpful assistant. Be descriptive and clear."}, + *messages, + ] + + response = client.chat.completions.create( + model=model_name, # Use the specific model name passed in + messages=full_messages, + temperature=0.7, + max_tokens=1024, + ) + return response.choices[0].message.content + + +def multi_model_consensus( + question: str, selected_models: list[str], rounds: int = 3, progress: gr.Progress = gr.Progress() +) -> list[tuple[str, str]]: + if not selected_models: + raise gr.Error("Please select at least one model to chat with.") + + chat_history = [] + progress(0, desc="Getting responses from all models...") + + # Get responses from all models in parallel + for i, model in enumerate(selected_models): + provider, model_name = model.split(": ", 1) + progress((i + 1) / len(selected_models), desc=f"Getting response from {model}...") + + try: + if provider == "Anthropic": + api_key = os.getenv("ANTHROPIC_API_KEY") + response = chat_with_anthropic(messages=[{"role": "user", "content": question}], api_key=api_key) + elif provider == "SambaNova": + api_key = os.getenv("SAMBANOVA_API_KEY") + response = chat_with_sambanova( + messages=[ + {"role": "system", "content": "You are a helpful assistant"}, + {"role": "user", "content": question}, + ], + api_key=api_key, + model_name=model_name, + ) + elif provider == "Hyperbolic": + api_key = os.getenv("HYPERBOLIC_API_KEY") + response = chat_with_hyperbolic( + messages=[{"role": "user", "content": question}], + api_key=api_key, + model_name=model_name, + ) + else: # Gemini + api_key = os.getenv("GEMINI_API_KEY") + response = chat_with_gemini(messages=[{"role": "user", "content": question}], api_key=api_key) + + chat_history.append((model, response)) + except Exception as e: + chat_history.append((model, f"Error: {e!s}")) + + progress(1.0, desc="Done!") + return chat_history + + +with gr.Blocks() as demo: + gr.Markdown("# Model Response Comparison") + gr.Markdown("""Select multiple models to compare their responses""") + + with gr.Row(): + with gr.Column(): + model_selector = gr.Dropdown( + choices=get_all_models(), + multiselect=True, + label="Select Models", + info="Choose models to compare", + value=["SambaNova: Llama-3.2-90B-Vision-Instruct", "Hyperbolic: Qwen/Qwen2.5-Coder-32B-Instruct"], + ) + + chatbot = gr.Chatbot(height=600, label="Model Responses") + msg = gr.Textbox(label="Prompt", placeholder="Ask a question to compare model responses...") + + def respond(message, selected_models): + chat_history = multi_model_consensus(message, selected_models, rounds=1) + return chat_history + + msg.submit(respond, [msg, model_selector], [chatbot]) + +for fn in demo.fns.values(): + fn.api_name = False + +if __name__ == "__main__": + demo.launch() diff --git a/app_crew.py b/app_crew.py new file mode 100644 index 0000000000000000000000000000000000000000..31b0e1457beb2a5dac2cdc4223c9caf54cc8d22a --- /dev/null +++ b/app_crew.py @@ -0,0 +1,8 @@ +import ai_gradio +import gradio as gr + +demo = gr.load( + name="crewai:gpt-4-turbo", + crew_type="article", # or 'support' + src=ai_gradio.registry, +) diff --git a/app_deepseek.py b/app_deepseek.py new file mode 100644 index 0000000000000000000000000000000000000000..4e09a88b894646a15aa3b2c64ac7985aff406e9e --- /dev/null +++ b/app_deepseek.py @@ -0,0 +1,23 @@ +import ai_gradio + +from utils_ai_gradio import get_app + +# Get the hyperbolic models but keep their full names for loading +DEEPSEEK_MODELS_FULL = [k for k in ai_gradio.registry.keys() if k.startswith("deepseek:")] + +# Create display names without the prefix +DEEPSEEK_MODELS_DISPLAY = [k.replace("deepseek:", "") for k in DEEPSEEK_MODELS_FULL] + + +# Create and launch the interface using get_app utility +demo = get_app( + models=DEEPSEEK_MODELS_FULL, # Use the full names with prefix + default_model=DEEPSEEK_MODELS_FULL[-1], + dropdown_label="Select DeepSeek Model", + choices=DEEPSEEK_MODELS_DISPLAY, # Display names without prefix + fill_height=True, + coder=True, +) + +if __name__ == "__main__": + demo.launch() diff --git a/app_experimental.py b/app_experimental.py new file mode 100644 index 0000000000000000000000000000000000000000..db24b5c41e14a619fe40fc698ceb691314e80150 --- /dev/null +++ b/app_experimental.py @@ -0,0 +1,300 @@ +import os +import random + +import google.generativeai as genai +import gradio as gr +import openai +from anthropic import Anthropic +from openai import OpenAI # Add explicit OpenAI import + + +def get_all_models(): + """Get all available models from the registries.""" + return [ + "SambaNova: Meta-Llama-3.2-1B-Instruct", + "SambaNova: Meta-Llama-3.2-3B-Instruct", + "SambaNova: Llama-3.2-11B-Vision-Instruct", + "SambaNova: Llama-3.2-90B-Vision-Instruct", + "SambaNova: Meta-Llama-3.1-8B-Instruct", + "SambaNova: Meta-Llama-3.1-70B-Instruct", + "SambaNova: Meta-Llama-3.1-405B-Instruct", + "Hyperbolic: Qwen/Qwen2.5-Coder-32B-Instruct", + "Hyperbolic: meta-llama/Llama-3.2-3B-Instruct", + "Hyperbolic: meta-llama/Meta-Llama-3.1-8B-Instruct", + "Hyperbolic: meta-llama/Meta-Llama-3.1-70B-Instruct", + "Hyperbolic: meta-llama/Meta-Llama-3-70B-Instruct", + "Hyperbolic: NousResearch/Hermes-3-Llama-3.1-70B", + "Hyperbolic: Qwen/Qwen2.5-72B-Instruct", + "Hyperbolic: deepseek-ai/DeepSeek-V2.5", + "Hyperbolic: meta-llama/Meta-Llama-3.1-405B-Instruct", + ] + + +def generate_discussion_prompt(original_question: str, previous_responses: list[str]) -> str: + """Generate a prompt for models to discuss and build upon previous + responses. + """ + prompt = f"""You are participating in a multi-AI discussion about this question: "{original_question}" + +Previous responses from other AI models: +{chr(10).join(f"- {response}" for response in previous_responses)} + +Please provide your perspective while: +1. Acknowledging key insights from previous responses +2. Adding any missing important points +3. Respectfully noting if you disagree with anything and explaining why +4. Building towards a complete answer + +Keep your response focused and concise (max 3-4 paragraphs).""" + return prompt + + +def generate_consensus_prompt(original_question: str, discussion_history: list[str]) -> str: + """Generate a prompt for final consensus building.""" + return f"""Review this multi-AI discussion about: "{original_question}" + +Discussion history: +{chr(10).join(discussion_history)} + +As a final synthesizer, please: +1. Identify the key points where all models agreed +2. Explain how any disagreements were resolved +3. Present a clear, unified answer that represents our collective best understanding +4. Note any remaining uncertainties or caveats + +Keep the final consensus concise but complete.""" + + +def chat_with_openai(model: str, messages: list[dict], api_key: str | None) -> str: + import openai + + client = openai.OpenAI(api_key=api_key) + response = client.chat.completions.create(model=model, messages=messages) + return response.choices[0].message.content + + +def chat_with_anthropic(messages: list[dict], api_key: str | None) -> str: + """Chat with Anthropic's Claude model.""" + client = Anthropic(api_key=api_key) + response = client.messages.create(model="claude-3-sonnet-20240229", messages=messages, max_tokens=1024) + return response.content[0].text + + +def chat_with_gemini(messages: list[dict], api_key: str | None) -> str: + """Chat with Gemini Pro model.""" + genai.configure(api_key=api_key) + model = genai.GenerativeModel("gemini-pro") + + # Convert messages to Gemini format + gemini_messages = [] + for msg in messages: + role = "user" if msg["role"] == "user" else "model" + gemini_messages.append({"role": role, "parts": [msg["content"]]}) + + response = model.generate_content([m["parts"][0] for m in gemini_messages]) + return response.text + + +def chat_with_sambanova( + messages: list[dict], api_key: str | None, model_name: str = "Llama-3.2-90B-Vision-Instruct" +) -> str: + """Chat with SambaNova's models using their OpenAI-compatible API.""" + client = openai.OpenAI( + api_key=api_key, + base_url="https://api.sambanova.ai/v1", + ) + + response = client.chat.completions.create( + model=model_name, + messages=messages, + temperature=0.1, + top_p=0.1, # Use the specific model name passed in + ) + return response.choices[0].message.content + + +def chat_with_hyperbolic( + messages: list[dict], api_key: str | None, model_name: str = "Qwen/Qwen2.5-Coder-32B-Instruct" +) -> str: + """Chat with Hyperbolic's models using their OpenAI-compatible API.""" + client = OpenAI(api_key=api_key, base_url="https://api.hyperbolic.xyz/v1") + + # Add system message to the start of the messages list + full_messages = [ + {"role": "system", "content": "You are a helpful assistant. Be descriptive and clear."}, + *messages, + ] + + response = client.chat.completions.create( + model=model_name, # Use the specific model name passed in + messages=full_messages, + temperature=0.7, + max_tokens=1024, + ) + return response.choices[0].message.content + + +def multi_model_consensus( + question: str, selected_models: list[str], rounds: int = 3, progress: gr.Progress = gr.Progress() +) -> list[tuple[str, str]]: + if not selected_models: + raise gr.Error("Please select at least one model to chat with.") + + chat_history = [] + discussion_history = [] + + # Initial responses + progress(0, desc="Getting initial responses...") + initial_responses = [] + for i, model in enumerate(selected_models): + provider, model_name = model.split(": ", 1) + + try: + if provider == "Anthropic": + api_key = os.getenv("ANTHROPIC_API_KEY") + response = chat_with_anthropic(messages=[{"role": "user", "content": question}], api_key=api_key) + elif provider == "SambaNova": + api_key = os.getenv("SAMBANOVA_API_KEY") + response = chat_with_sambanova( + messages=[ + {"role": "system", "content": "You are a helpful assistant"}, + {"role": "user", "content": question}, + ], + api_key=api_key, + ) + elif provider == "Hyperbolic": # Add Hyperbolic case + api_key = os.getenv("HYPERBOLIC_API_KEY") + response = chat_with_hyperbolic(messages=[{"role": "user", "content": question}], api_key=api_key) + else: # Gemini + api_key = os.getenv("GEMINI_API_KEY") + response = chat_with_gemini(messages=[{"role": "user", "content": question}], api_key=api_key) + + initial_responses.append(f"{model}: {response}") + discussion_history.append(f"Initial response from {model}:\n{response}") + chat_history.append((f"Initial response from {model}", response)) + except Exception as e: + chat_history.append((f"Error from {model}", str(e))) + + # Discussion rounds + for round_num in range(rounds): + progress((round_num + 1) / (rounds + 2), desc=f"Discussion round {round_num + 1}...") + round_responses = [] + + random.shuffle(selected_models) # Randomize order each round + for model in selected_models: + provider, model_name = model.split(": ", 1) + + try: + discussion_prompt = generate_discussion_prompt(question, discussion_history) + if provider == "Anthropic": + api_key = os.getenv("ANTHROPIC_API_KEY") + response = chat_with_anthropic( + messages=[{"role": "user", "content": discussion_prompt}], api_key=api_key + ) + elif provider == "SambaNova": + api_key = os.getenv("SAMBANOVA_API_KEY") + response = chat_with_sambanova( + messages=[ + {"role": "system", "content": "You are a helpful assistant"}, + {"role": "user", "content": discussion_prompt}, + ], + api_key=api_key, + ) + elif provider == "Hyperbolic": # Add Hyperbolic case + api_key = os.getenv("HYPERBOLIC_API_KEY") + response = chat_with_hyperbolic( + messages=[{"role": "user", "content": discussion_prompt}], api_key=api_key + ) + else: # Gemini + api_key = os.getenv("GEMINI_API_KEY") + response = chat_with_gemini( + messages=[{"role": "user", "content": discussion_prompt}], api_key=api_key + ) + + round_responses.append(f"{model}: {response}") + discussion_history.append(f"Round {round_num + 1} - {model}:\n{response}") + chat_history.append((f"Round {round_num + 1} - {model}", response)) + except Exception as e: + chat_history.append((f"Error from {model} in round {round_num + 1}", str(e))) + + # Final consensus + progress(0.9, desc="Building final consensus...") + model = selected_models[0] + provider, model_name = model.split(": ", 1) + + try: + consensus_prompt = generate_consensus_prompt(question, discussion_history) + if provider == "Anthropic": + api_key = os.getenv("ANTHROPIC_API_KEY") + final_consensus = chat_with_anthropic( + messages=[{"role": "user", "content": consensus_prompt}], api_key=api_key + ) + elif provider == "SambaNova": + api_key = os.getenv("SAMBANOVA_API_KEY") + final_consensus = chat_with_sambanova( + messages=[ + {"role": "system", "content": "You are a helpful assistant"}, + {"role": "user", "content": consensus_prompt}, + ], + api_key=api_key, + ) + elif provider == "Hyperbolic": # Add Hyperbolic case + api_key = os.getenv("HYPERBOLIC_API_KEY") + final_consensus = chat_with_hyperbolic( + messages=[{"role": "user", "content": consensus_prompt}], api_key=api_key + ) + else: # Gemini + api_key = os.getenv("GEMINI_API_KEY") + final_consensus = chat_with_gemini( + messages=[{"role": "user", "content": consensus_prompt}], api_key=api_key + ) + except Exception as e: + final_consensus = f"Error getting consensus from {model}: {e!s}" + + chat_history.append(("Final Consensus", final_consensus)) + + progress(1.0, desc="Done!") + return chat_history + + +with gr.Blocks() as demo: + gr.Markdown("# Experimental Multi-Model Consensus Chat") + gr.Markdown( + """Select multiple models to collaborate on answering your question. + The models will discuss with each other and attempt to reach a consensus. + Maximum 3 models can be selected at once.""" + ) + + with gr.Row(): + with gr.Column(): + model_selector = gr.Dropdown( + choices=get_all_models(), + multiselect=True, + label="Select Models (max 3)", + info="Choose up to 3 models to participate in the discussion", + value=["SambaNova: Llama-3.2-90B-Vision-Instruct", "Hyperbolic: Qwen/Qwen2.5-Coder-32B-Instruct"], + max_choices=3, + ) + rounds_slider = gr.Slider( + minimum=1, + maximum=2, + value=1, + step=1, + label="Discussion Rounds", + info="Number of rounds of discussion between models", + ) + + chatbot = gr.Chatbot(height=600, label="Multi-Model Discussion") + msg = gr.Textbox(label="Your Question", placeholder="Ask a question for the models to discuss...") + + def respond(message, selected_models, rounds): + chat_history = multi_model_consensus(message, selected_models, rounds) + return chat_history + + msg.submit(respond, [msg, model_selector, rounds_slider], [chatbot], api_name="consensus_chat") + +for fn in demo.fns.values(): + fn.api_name = False + +if __name__ == "__main__": + demo.launch() diff --git a/app_fal.py b/app_fal.py new file mode 100644 index 0000000000000000000000000000000000000000..67a7b012e28db957a6abd67cf0d577abe17ba971 --- /dev/null +++ b/app_fal.py @@ -0,0 +1,16 @@ +import fal_gradio + +from utils import get_app + +demo = get_app( + models=[ + "fal-ai/ltx-video", + "fal-ai/ltx-video/image-to-video", + "fal-ai/luma-photon", + ], + default_model="fal-ai/luma-photon", + src=fal_gradio.registry, +) + +if __name__ == "__main__": + demo.launch() diff --git a/app_fireworks.py b/app_fireworks.py new file mode 100644 index 0000000000000000000000000000000000000000..eb9c086457b9a72753658de6f84b11a43cdeac04 --- /dev/null +++ b/app_fireworks.py @@ -0,0 +1,19 @@ +import os + +import fireworks_gradio + +from utils import get_app + +demo = get_app( + models=[ + "f1-preview", + "f1-mini-preview", + "llama-v3p3-70b-instruct", + ], + default_model="llama-v3p3-70b-instruct", + src=fireworks_gradio.registry, + accept_token=not os.getenv("FIREWORKS_API_KEY"), +) + +if __name__ == "__main__": + demo.launch() diff --git a/app_gemini.py b/app_gemini.py new file mode 100644 index 0000000000000000000000000000000000000000..5bbad95c315c84bd09e6583c5038f1068b202114 --- /dev/null +++ b/app_gemini.py @@ -0,0 +1,22 @@ +import ai_gradio + +from utils_ai_gradio import get_app + +# Get the Gemini models but keep their full names for loading +GEMINI_MODELS_FULL = [k for k in ai_gradio.registry if k.startswith("gemini:")] + +# Create display names without the prefix +GEMINI_MODELS_DISPLAY = [k.replace("gemini:", "") for k in GEMINI_MODELS_FULL] + +# Create and launch the interface using get_app utility +demo = get_app( + models=GEMINI_MODELS_FULL, # Use the full names with prefix + default_model=GEMINI_MODELS_FULL[-1], + dropdown_label="Select Gemini Model", + choices=GEMINI_MODELS_DISPLAY, # Display names without prefix + src=ai_gradio.registry, + fill_height=True, +) + +if __name__ == "__main__": + demo.launch() diff --git a/app_gemini_camera.py b/app_gemini_camera.py new file mode 100644 index 0000000000000000000000000000000000000000..08d2fc99acd12081f07a06e260cbf95fac3c221e --- /dev/null +++ b/app_gemini_camera.py @@ -0,0 +1,23 @@ +import ai_gradio + +from utils_ai_gradio import get_app + +# Get the Gemini models but keep their full names for loading +GEMINI_MODELS_FULL = [k for k in ai_gradio.registry if k.startswith("gemini:")] + +# Create display names without the prefix +GEMINI_MODELS_DISPLAY = [k.replace("gemini:", "") for k in GEMINI_MODELS_FULL] + +# Create and launch the interface using get_app utility +demo = get_app( + models=GEMINI_MODELS_FULL, # Use the full names with prefix + default_model=GEMINI_MODELS_FULL[-2], + dropdown_label="Select Gemini Model", + choices=GEMINI_MODELS_DISPLAY, # Display names without prefix + src=ai_gradio.registry, + camera=True, + fill_height=True, +) + +if __name__ == "__main__": + demo.launch() diff --git a/app_gemini_coder.py b/app_gemini_coder.py new file mode 100644 index 0000000000000000000000000000000000000000..3628a12e12c2cba19f5c10979ead16c0c3abe9f6 --- /dev/null +++ b/app_gemini_coder.py @@ -0,0 +1,23 @@ +import ai_gradio + +from utils_ai_gradio import get_app + +# Get the Gemini models but keep their full names for loading +GEMINI_MODELS_FULL = [k for k in ai_gradio.registry if k.startswith("gemini:")] + +# Create display names without the prefix +GEMINI_MODELS_DISPLAY = [k.replace("gemini:", "") for k in GEMINI_MODELS_FULL] + +# Create and launch the interface using get_app utility +demo = get_app( + models=GEMINI_MODELS_FULL, # Use the full names with prefix + default_model=GEMINI_MODELS_FULL[0], + dropdown_label="Select Gemini Model", + choices=GEMINI_MODELS_DISPLAY, # Display names without prefix + src=ai_gradio.registry, + fill_height=True, + coder=True, +) + +if __name__ == "__main__": + demo.launch() diff --git a/app_gemini_voice.py b/app_gemini_voice.py new file mode 100644 index 0000000000000000000000000000000000000000..e2e28cccf28491beec9a2e2dfaf9afd864d26323 --- /dev/null +++ b/app_gemini_voice.py @@ -0,0 +1,23 @@ +import ai_gradio + +from utils_ai_gradio import get_app + +# Get the Gemini models but keep their full names for loading +GEMINI_MODELS_FULL = [k for k in ai_gradio.registry if k.startswith("gemini:")] + +# Create display names without the prefix +GEMINI_MODELS_DISPLAY = [k.replace("gemini:", "") for k in GEMINI_MODELS_FULL] + +# Create and launch the interface using get_app utility +demo = get_app( + models=GEMINI_MODELS_FULL, # Use the full names with prefix + default_model=GEMINI_MODELS_FULL[-2], + dropdown_label="Select Gemini Model", + choices=GEMINI_MODELS_DISPLAY, # Display names without prefix + src=ai_gradio.registry, + enable_voice=True, + fill_height=True, +) + +if __name__ == "__main__": + demo.launch() diff --git a/app_groq.py b/app_groq.py new file mode 100644 index 0000000000000000000000000000000000000000..36ec4e68f8b4fe37bad230335129637835666c3a --- /dev/null +++ b/app_groq.py @@ -0,0 +1,21 @@ +import ai_gradio + +from utils_ai_gradio import get_app + +# Get the Groq models from the registry +GROQ_MODELS_FULL = [k for k in ai_gradio.registry.keys() if k.startswith("groq:")] + +# Create display names without the prefix +GROQ_MODELS_DISPLAY = [k.replace("groq:", "") for k in GROQ_MODELS_FULL] + +demo = get_app( + models=GROQ_MODELS_FULL, + default_model=GROQ_MODELS_FULL[-2], + src=ai_gradio.registry, + dropdown_label="Select Groq Model", + choices=GROQ_MODELS_DISPLAY, + fill_height=True, +) + +if __name__ == "__main__": + demo.launch() diff --git a/app_groq_coder.py b/app_groq_coder.py new file mode 100644 index 0000000000000000000000000000000000000000..19f1c6921ca5c93019236c4a92b1af50db843e5b --- /dev/null +++ b/app_groq_coder.py @@ -0,0 +1,23 @@ +import ai_gradio + +from utils_ai_gradio import get_app + +# Get the Groq models but keep their full names for loading +GROQ_MODELS_FULL = [k for k in ai_gradio.registry.keys() if k.startswith("groq:")] + +# Create display names without the prefix +GROQ_MODELS_DISPLAY = [k.replace("groq:", "") for k in GROQ_MODELS_FULL] + +# Create and launch the interface using get_app utility +demo = get_app( + models=GROQ_MODELS_FULL, # Use the full names with prefix + default_model=GROQ_MODELS_FULL[-1], + dropdown_label="Select Groq Model", + choices=GROQ_MODELS_DISPLAY, # Display names without prefix + fill_height=True, + coder=True, +) + + +if __name__ == "__main__": + demo.launch() diff --git a/app_hf.py b/app_hf.py new file mode 100644 index 0000000000000000000000000000000000000000..bb2a82e24c198a225e1ef553eb4fa28fdaab90d0 --- /dev/null +++ b/app_hf.py @@ -0,0 +1,17 @@ +from utils import get_app + +demo = get_app( + models=[ + "microsoft/Phi-3.5-mini-instruct", + "HuggingFaceTB/SmolLM2-1.7B-Instruct", + "google/gemma-2-2b-it", + "openai-community/gpt2", + "microsoft/phi-2", + "TinyLlama/TinyLlama-1.1B-Chat-v1.0", + ], + default_model="HuggingFaceTB/SmolLM2-1.7B-Instruct", + src="models", +) + +if __name__ == "__main__": + demo.launch() diff --git a/app_huggingface.py b/app_huggingface.py new file mode 100644 index 0000000000000000000000000000000000000000..46b51e230e89ccaf16245fbf40fd5a6ef11c32a9 --- /dev/null +++ b/app_huggingface.py @@ -0,0 +1,22 @@ +import ai_gradio + +from utils_ai_gradio import get_app + +# Get the hyperbolic models but keep their full names for loading +HUGGINGFACE_MODELS_FULL = [k for k in ai_gradio.registry.keys() if k.startswith("huggingface:")] + +# Create display names without the prefix +HUGGINGFACE_MODELS_DISPLAY = [k.replace("huggingface:", "") for k in HUGGINGFACE_MODELS_FULL] + + +# Create and launch the interface using get_app utility +demo = get_app( + models=HUGGINGFACE_MODELS_FULL, # Use the full names with prefix + default_model=HUGGINGFACE_MODELS_FULL[0], + dropdown_label="Select Huggingface Model", + choices=HUGGINGFACE_MODELS_DISPLAY, # Display names without prefix + fill_height=True, + coder=True, + provider="fireworks-ai", + bill_to="huggingface" +) diff --git a/app_hyperbolic.py b/app_hyperbolic.py new file mode 100644 index 0000000000000000000000000000000000000000..880d8c61ac3946a91c18a6bf3bee4bb4bebcf436 --- /dev/null +++ b/app_hyperbolic.py @@ -0,0 +1,19 @@ +import ai_gradio + +from utils_ai_gradio import get_app + +# Get the hyperbolic models but keep their full names for loading +HYPERBOLIC_MODELS_FULL = [k for k in ai_gradio.registry.keys() if k.startswith("hyperbolic:")] + +# Create display names without the prefix +HYPERBOLIC_MODELS_DISPLAY = [k.replace("hyperbolic:", "") for k in HYPERBOLIC_MODELS_FULL] + + +# Create and launch the interface using get_app utility +demo = get_app( + models=HYPERBOLIC_MODELS_FULL, # Use the full names with prefix + default_model=HYPERBOLIC_MODELS_FULL[-2], + dropdown_label="Select Hyperbolic Model", + choices=HYPERBOLIC_MODELS_DISPLAY, # Display names without prefix + fill_height=True, +) diff --git a/app_hyperbolic_coder.py b/app_hyperbolic_coder.py new file mode 100644 index 0000000000000000000000000000000000000000..bb875ce72cec5e1c29e480c000cfaf052e6d3faa --- /dev/null +++ b/app_hyperbolic_coder.py @@ -0,0 +1,20 @@ +import ai_gradio + +from utils_ai_gradio import get_app + +# Get the hyperbolic models but keep their full names for loading +HYPERBOLIC_MODELS_FULL = [k for k in ai_gradio.registry.keys() if k.startswith("hyperbolic:")] + +# Create display names without the prefix +HYPERBOLIC_MODELS_DISPLAY = [k.replace("hyperbolic:", "") for k in HYPERBOLIC_MODELS_FULL] + + +# Create and launch the interface using get_app utility +demo = get_app( + models=HYPERBOLIC_MODELS_FULL, # Use the full names with prefix + default_model=HYPERBOLIC_MODELS_FULL[-2], + dropdown_label="Select Hyperbolic Model", + choices=HYPERBOLIC_MODELS_DISPLAY, # Display names without prefix + fill_height=True, + coder=True, +) diff --git a/app_langchain.py b/app_langchain.py new file mode 100644 index 0000000000000000000000000000000000000000..5bcb14c1fd8ca5896641915f324b7c7c8e96af75 --- /dev/null +++ b/app_langchain.py @@ -0,0 +1,23 @@ +import ai_gradio + +from utils_ai_gradio import get_app + +# Get the hyperbolic models but keep their full names for loading +LANGCHAIN_MODELS_FULL = [k for k in ai_gradio.registry.keys() if k.startswith("langchain:")] + +# Create display names without the prefix +LANGCHAIN_MODELS_DISPLAY = [k.replace("langchain:", "") for k in LANGCHAIN_MODELS_FULL] + + +# Create and launch the interface using get_app utility +demo = get_app( + models=LANGCHAIN_MODELS_FULL, # Use the full names with prefix + default_model=LANGCHAIN_MODELS_FULL[0], + dropdown_label="Select Langchain Model", + choices=LANGCHAIN_MODELS_DISPLAY, # Display names without prefix + fill_height=True, +) + +if __name__ == "__main__": + demo.launch() + diff --git a/app_lumaai.py b/app_lumaai.py new file mode 100644 index 0000000000000000000000000000000000000000..ba56135427c955bb89739790dae5d322faf4211a --- /dev/null +++ b/app_lumaai.py @@ -0,0 +1,7 @@ +import gradio as gr +import lumaai_gradio + +demo = gr.load( + name="dream-machine", + src=lumaai_gradio.registry, +) diff --git a/app_marco_o1.py b/app_marco_o1.py new file mode 100644 index 0000000000000000000000000000000000000000..b2b2a2ca1afa977a91b5785b8e98bf55883a90ab --- /dev/null +++ b/app_marco_o1.py @@ -0,0 +1,12 @@ +import gradio as gr +import spaces +import transformers_gradio + +demo = gr.load(name="AIDC-AI/Marco-o1", src=transformers_gradio.registry) +demo.fn = spaces.GPU()(demo.fn) + +for fn in demo.fns.values(): + fn.api_name = False + +if __name__ == "__main__": + demo.launch() diff --git a/app_meta.py b/app_meta.py new file mode 100644 index 0000000000000000000000000000000000000000..a7084b2262f99493cfe90ebe65add99ee911c343 --- /dev/null +++ b/app_meta.py @@ -0,0 +1,6 @@ +import gradio as gr + +demo = gr.load("models/meta-llama/Llama-3.3-70B-Instruct") + +if __name__ == "__main__": + demo.launch() diff --git a/app_mindsearch.py b/app_mindsearch.py new file mode 100644 index 0000000000000000000000000000000000000000..91700ade77db82f9352c5ad2b07edabee5c4f90e --- /dev/null +++ b/app_mindsearch.py @@ -0,0 +1,12 @@ +import gradio as gr + +# Load the Gradio space +demo = gr.load(name="internlm/MindSearch", src="spaces") + +# Disable API access for all functions +if hasattr(demo, "fns"): + for fn in demo.fns.values(): + fn.api_name = False + +if __name__ == "__main__": + demo.launch() diff --git a/app_minimax.py b/app_minimax.py new file mode 100644 index 0000000000000000000000000000000000000000..bfe352f25b607d11b657707ca066192d5c14ba19 --- /dev/null +++ b/app_minimax.py @@ -0,0 +1,22 @@ +import ai_gradio + +from utils_ai_gradio import get_app + +# Get the hyperbolic models but keep their full names for loading +MINIMAX_MODELS_FULL = [k for k in ai_gradio.registry.keys() if k.startswith("minimax:")] + +# Create display names without the prefix +MINIMAX_MODELS_DISPLAY = [k.replace("minimax:", "") for k in MINIMAX_MODELS_FULL] + + +# Create and launch the interface using get_app utility +demo = get_app( + models=MINIMAX_MODELS_FULL, # Use the full names with prefix + default_model=MINIMAX_MODELS_FULL[0], + dropdown_label="Select Minimax Model", + choices=MINIMAX_MODELS_DISPLAY, # Display names without prefix + fill_height=True, +) + +if __name__ == "__main__": + demo.launch() diff --git a/app_minimax_coder.py b/app_minimax_coder.py new file mode 100644 index 0000000000000000000000000000000000000000..9973e8eeb920062a5c749fb4eb2764c01f2ee499 --- /dev/null +++ b/app_minimax_coder.py @@ -0,0 +1,23 @@ +import ai_gradio + +from utils_ai_gradio import get_app + +# Get the hyperbolic models but keep their full names for loading +MINIMAX_MODELS_FULL = [k for k in ai_gradio.registry.keys() if k.startswith("minimax:")] + +# Create display names without the prefix +MINIMAX_MODELS_DISPLAY = [k.replace("minimax:", "") for k in MINIMAX_MODELS_FULL] + + +# Create and launch the interface using get_app utility +demo = get_app( + models=MINIMAX_MODELS_FULL, # Use the full names with prefix + default_model=MINIMAX_MODELS_FULL[0], + dropdown_label="Select Minimax Model", + choices=MINIMAX_MODELS_DISPLAY, # Display names without prefix + fill_height=True, + coder=True +) + +if __name__ == "__main__": + demo.launch() diff --git a/app_mistral.py b/app_mistral.py new file mode 100644 index 0000000000000000000000000000000000000000..b27e9c2de923d514d5e7beb1a9098bf3a1f4d997 --- /dev/null +++ b/app_mistral.py @@ -0,0 +1,23 @@ +import ai_gradio + +from utils_ai_gradio import get_app + +# Get the mistral models but keep their full names for loading +MISTRAL_MODELS_FULL = [k for k in ai_gradio.registry.keys() if k.startswith("mistral:")] + +# Create display names without the prefix +MISTRAL_MODELS_DISPLAY = [k.replace("mistral:", "") for k in MISTRAL_MODELS_FULL] + + +# Create and launch the interface using get_app utility +demo = get_app( + models=MISTRAL_MODELS_FULL, # Use the full names with prefix + default_model=MISTRAL_MODELS_FULL[5], + dropdown_label="Select Mistral Model", + choices=MISTRAL_MODELS_DISPLAY, # Display names without prefix + fill_height=True, + coder=True +) + +if __name__ == "__main__": + demo.launch() diff --git a/app_moondream.py b/app_moondream.py new file mode 100644 index 0000000000000000000000000000000000000000..9eef19f0b511b528bb881e49adad1b8659b8112f --- /dev/null +++ b/app_moondream.py @@ -0,0 +1,13 @@ +import gradio as gr + +# Load the Gradio space +demo = gr.load(name="akhaliq/moondream", src="spaces") + + +# Disable API access for all functions +if hasattr(demo, "fns"): + for fn in demo.fns.values(): + fn.api_name = False + +if __name__ == "__main__": + demo.launch() diff --git a/app_nvidia.py b/app_nvidia.py new file mode 100644 index 0000000000000000000000000000000000000000..2d01ab88c09064189b6bf528b2b8c2d247b2c99e --- /dev/null +++ b/app_nvidia.py @@ -0,0 +1,22 @@ +import ai_gradio + +from utils_ai_gradio import get_app + +# Get the nvidia models but keep their full names for loading +NVIDIA_MODELS_FULL = [k for k in ai_gradio.registry.keys() if k.startswith("nvidia:")] + +# Create display names without the prefix +NVIDIA_MODELS_DISPLAY = [k.replace("nvidia:", "") for k in NVIDIA_MODELS_FULL] + + +# Create and launch the interface using get_app utility +demo = get_app( + models=NVIDIA_MODELS_FULL, # Use the full names with prefix + default_model=NVIDIA_MODELS_FULL[0], + dropdown_label="Select Nvidia Model", + choices=NVIDIA_MODELS_DISPLAY, # Display names without prefix + fill_height=True, +) + +if __name__ == "__main__": + demo.launch() diff --git a/app_nvidia_coder.py b/app_nvidia_coder.py new file mode 100644 index 0000000000000000000000000000000000000000..320994cf11fc815457c8dba074e76c6340995d7d --- /dev/null +++ b/app_nvidia_coder.py @@ -0,0 +1,23 @@ +import ai_gradio + +from utils_ai_gradio import get_app + +# Get the nvidia models but keep their full names for loading +NVIDIA_MODELS_FULL = [k for k in ai_gradio.registry.keys() if k.startswith("nvidia:")] + +# Create display names without the prefix +NVIDIA_MODELS_DISPLAY = [k.replace("nvidia:", "") for k in NVIDIA_MODELS_FULL] + + +# Create and launch the interface using get_app utility +demo = get_app( + models=NVIDIA_MODELS_FULL, # Use the full names with prefix + default_model=NVIDIA_MODELS_FULL[-1], + dropdown_label="Select Nvidia Model", + choices=NVIDIA_MODELS_DISPLAY, # Display names without prefix + fill_height=True, + coder=True +) + +if __name__ == "__main__": + demo.launch() diff --git a/app_omini.py b/app_omini.py new file mode 100644 index 0000000000000000000000000000000000000000..413b3f9b9eaef496bf0bc72cc540ef377af5348d --- /dev/null +++ b/app_omini.py @@ -0,0 +1,10 @@ +import gradio as gr + +# Load the Gradio space +demo = gr.load(name="Yuanshi/OminiControl", src="spaces") + + +# Disable API access for all functions +if hasattr(demo, "fns"): + for fn in demo.fns.values(): + fn.api_name = False diff --git a/app_openai.py b/app_openai.py new file mode 100644 index 0000000000000000000000000000000000000000..9be0bfcc3eb151f9ad05c839dbe2375ed435831d --- /dev/null +++ b/app_openai.py @@ -0,0 +1,21 @@ +import ai_gradio + +from utils_ai_gradio import get_app + +# Get the OpenAI models but keep their full names for loading +OPENAI_MODELS_FULL = [k for k in ai_gradio.registry.keys() if k.startswith("openai:")] + +# Create display names without the prefix +OPENAI_MODELS_DISPLAY = [k.replace("openai:", "") for k in OPENAI_MODELS_FULL] + +# Create and launch the interface using get_app utility +demo = get_app( + models=OPENAI_MODELS_FULL, # Use the full names with prefix + default_model=OPENAI_MODELS_FULL[-1], + dropdown_label="Select OpenAI Model", + choices=OPENAI_MODELS_DISPLAY, # Display names without prefix + fill_height=True, +) + +if __name__ == "__main__": + demo.launch() diff --git a/app_openai_coder.py b/app_openai_coder.py new file mode 100644 index 0000000000000000000000000000000000000000..2c5fb5e422fa87d02ceffaaca4d204b214094662 --- /dev/null +++ b/app_openai_coder.py @@ -0,0 +1,22 @@ +import ai_gradio + +from utils_ai_gradio import get_app + +# Get the OpenAI models but keep their full names for loading +OPENAI_MODELS_FULL = [k for k in ai_gradio.registry.keys() if k.startswith("openai:")] + +# Create display names without the prefix +OPENAI_MODELS_DISPLAY = [k.replace("openai:", "") for k in OPENAI_MODELS_FULL] + +# Create and launch the interface using get_app utility +demo = get_app( + models=OPENAI_MODELS_FULL, # Use the full names with prefix + default_model=OPENAI_MODELS_FULL[-1], + dropdown_label="Select OpenAI Model", + choices=OPENAI_MODELS_DISPLAY, # Display names without prefix + fill_height=True, + coder=True, +) + +if __name__ == "__main__": + demo.launch() diff --git a/app_openai_voice.py b/app_openai_voice.py new file mode 100644 index 0000000000000000000000000000000000000000..5073774e7e97893bfc114d360c124cd5c534d37d --- /dev/null +++ b/app_openai_voice.py @@ -0,0 +1,23 @@ +import os + +import openai_gradio + +from utils import get_app + +demo = get_app( + models=[ + "gpt-4o-realtime-preview", + "gpt-4o-realtime-preview-2024-12-17", + "gpt-4o-realtime-preview-2024-10-01", + "gpt-4o-mini-realtime-preview", + "gpt-4o-mini-realtime-preview-2024-12-17", + ], + default_model="gpt-4o-mini-realtime-preview-2024-12-17", + src=openai_gradio.registry, + accept_token=not os.getenv("OPENAI_API_KEY"), + twilio_sid=os.getenv("TWILIO_SID_OPENAI"), + twilio_token=os.getenv("TWILIO_AUTH_OPENAI"), +) + +if __name__ == "__main__": + demo.launch() diff --git a/app_openrouter.py b/app_openrouter.py new file mode 100644 index 0000000000000000000000000000000000000000..208cf05f06304246eed15f1ae08467f66efbd7f9 --- /dev/null +++ b/app_openrouter.py @@ -0,0 +1,22 @@ +import ai_gradio + +from utils_ai_gradio import get_app + +# Get the OpenAI models but keep their full names for loading +OPENROUTER_MODELS_FULL = [k for k in ai_gradio.registry.keys() if k.startswith("openrouter:")] + +# Create display names without the prefix +OPENROUTER_MODELS_DISPLAY = [k.replace("openrouter:", "") for k in OPENROUTER_MODELS_FULL] + +# Create and launch the interface using get_app utility +demo = get_app( + models=OPENROUTER_MODELS_FULL, # Use the full names with prefix + default_model=OPENROUTER_MODELS_FULL[-1], + dropdown_label="Select OpenRouter Model", + choices=OPENROUTER_MODELS_DISPLAY, # Display names without prefix + fill_height=True, + coder=True, +) + +if __name__ == "__main__": + demo.launch() diff --git a/app_paligemma.py b/app_paligemma.py new file mode 100644 index 0000000000000000000000000000000000000000..22cbe09758cf9c011016e81d7f3db8026ee4f287 --- /dev/null +++ b/app_paligemma.py @@ -0,0 +1,78 @@ +import gradio as gr +from gradio_client import Client, handle_file + +MODELS = {"Paligemma-10B": "akhaliq/paligemma2-10b-ft-docci-448"} + + +def create_chat_fn(client, system_prompt, temperature, max_tokens, top_k, rep_penalty, top_p): + def chat(message, history): + text = message.get("text", "") + files = message.get("files", []) + processed_files = [handle_file(f) for f in files] + + response = client.predict( + message={"text": text, "files": processed_files}, + system_prompt=system_prompt, + temperature=temperature, + max_new_tokens=max_tokens, + top_k=top_k, + repetition_penalty=rep_penalty, + top_p=top_p, + api_name="/chat", + ) + return response + + return chat + + +def set_client_for_session(model_name, request: gr.Request): + headers = {} + if request and hasattr(request, "headers"): + x_ip_token = request.headers.get("x-ip-token") + if x_ip_token: + headers["X-IP-Token"] = x_ip_token + + return Client(MODELS[model_name], headers=headers) + + +def safe_chat_fn(message, history, client, system_prompt, temperature, max_tokens, top_k, rep_penalty, top_p): + if client is None: + return "Error: Client not initialized. Please refresh the page." + try: + return create_chat_fn(client, system_prompt, temperature, max_tokens, top_k, rep_penalty, top_p)( + message, history + ) + except Exception as e: + print(f"Error during chat: {e!s}") + return f"Error during chat: {e!s}" + + +with gr.Blocks() as demo: + client = gr.State() + + with gr.Accordion("Advanced Settings", open=False): + system_prompt = gr.Textbox(value="You are a helpful AI assistant.", label="System Prompt") + with gr.Row(): + temperature = gr.Slider(minimum=0.0, maximum=2.0, value=0.7, label="Temperature") + top_p = gr.Slider(minimum=0.0, maximum=1.0, value=0.95, label="Top P") + with gr.Row(): + top_k = gr.Slider(minimum=1, maximum=100, value=40, step=1, label="Top K") + rep_penalty = gr.Slider(minimum=1.0, maximum=2.0, value=1.1, label="Repetition Penalty") + max_tokens = gr.Slider(minimum=64, maximum=4096, value=1024, step=64, label="Max Tokens") + + chat_interface = gr.ChatInterface( + fn=safe_chat_fn, + additional_inputs=[client, system_prompt, temperature, max_tokens, top_k, rep_penalty, top_p], + multimodal=True, + ) + + # Initialize client on page load with default model + demo.load(fn=set_client_for_session, inputs=[gr.State("Paligemma-10B")], outputs=[client]) # Using default model + +# Move the API access check here, after demo is defined +if hasattr(demo, "fns"): + for fn in demo.fns.values(): + fn.api_name = False + +if __name__ == "__main__": + demo.launch() diff --git a/app_perplexity.py b/app_perplexity.py new file mode 100644 index 0000000000000000000000000000000000000000..62c6d320792696577a14e707ae34e52f457b4476 --- /dev/null +++ b/app_perplexity.py @@ -0,0 +1,23 @@ +import os + +import perplexity_gradio + +from utils import get_app + +demo = get_app( + models=[ + "llama-3.1-sonar-large-128k-online", + "llama-3.1-sonar-small-128k-online", + "llama-3.1-sonar-huge-128k-online", + "llama-3.1-sonar-small-128k-chat", + "llama-3.1-sonar-large-128k-chat", + "llama-3.1-8b-instruct", + "llama-3.1-70b-instruct", + ], + default_model="llama-3.1-sonar-huge-128k-online", + src=perplexity_gradio.registry, + accept_token=not os.getenv("PERPLEXITY_API_KEY"), +) + +if __name__ == "__main__": + demo.launch() diff --git a/app_playai.py b/app_playai.py new file mode 100644 index 0000000000000000000000000000000000000000..7f3b37380a1ed0a324d0bed58aa25c3828489cc0 --- /dev/null +++ b/app_playai.py @@ -0,0 +1,10 @@ +import gradio as gr +import playai_gradio + +demo = gr.load( + name="PlayDialog", + src=playai_gradio.registry, +) + +for fn in demo.fns.values(): + fn.api_name = False diff --git a/app_qwen.py b/app_qwen.py new file mode 100644 index 0000000000000000000000000000000000000000..6c5cae191acf8041bec64947a3578975c4dc0d2d --- /dev/null +++ b/app_qwen.py @@ -0,0 +1,19 @@ +import ai_gradio + +from utils_ai_gradio import get_app + +# Get the qwen models but keep their full names for loading +QWEN_MODELS_FULL = [k for k in ai_gradio.registry.keys() if k.startswith("qwen:")] + +# Create display names without the prefix +QWEN_MODELS_DISPLAY = [k.replace("qwen:", "") for k in QWEN_MODELS_FULL] + + +# Create and launch the interface using get_app utility +demo = get_app( + models=QWEN_MODELS_FULL, # Use the full names with prefix + default_model=QWEN_MODELS_FULL[-1], + dropdown_label="Select Qwen Model", + choices=QWEN_MODELS_DISPLAY, # Display names without prefix + fill_height=True, +) diff --git a/app_qwen_coder.py b/app_qwen_coder.py new file mode 100644 index 0000000000000000000000000000000000000000..6d83ebab412ca269264fd7de642935f930cf53e5 --- /dev/null +++ b/app_qwen_coder.py @@ -0,0 +1,20 @@ +import ai_gradio + +from utils_ai_gradio import get_app + +# Get the qwen models but keep their full names for loading +QWEN_MODELS_FULL = [k for k in ai_gradio.registry.keys() if k.startswith("qwen:")] + +# Create display names without the prefix +QWEN_MODELS_DISPLAY = [k.replace("qwen:", "") for k in QWEN_MODELS_FULL] + + +# Create and launch the interface using get_app utility +demo = get_app( + models=QWEN_MODELS_FULL, # Use the full names with prefix + default_model=QWEN_MODELS_FULL[-1], + dropdown_label="Select Qwen Model", + choices=QWEN_MODELS_DISPLAY, # Display names without prefix + fill_height=True, + coder=True, +) diff --git a/app_replicate.py b/app_replicate.py new file mode 100644 index 0000000000000000000000000000000000000000..8c0287b8ff029506b13409ade56a7a0b6a94cd35 --- /dev/null +++ b/app_replicate.py @@ -0,0 +1,18 @@ +import replicate_gradio + +from utils import get_app + +demo = get_app( + models=[ + "black-forest-labs/flux-depth-pro", + "black-forest-labs/flux-canny-pro", + "black-forest-labs/flux-fill-pro", + "black-forest-labs/flux-depth-dev", + "tencent/hunyuan-video:140176772be3b423d14fdaf5403e6d4e38b85646ccad0c3fd2ed07c211f0cad1", + ], + default_model="tencent/hunyuan-video:140176772be3b423d14fdaf5403e6d4e38b85646ccad0c3fd2ed07c211f0cad1", + src=replicate_gradio.registry, +) + +if __name__ == "__main__": + demo.launch() diff --git a/app_sailor.py b/app_sailor.py new file mode 100644 index 0000000000000000000000000000000000000000..dff3fc49c3674d6b044007e802de6b41e4f56db6 --- /dev/null +++ b/app_sailor.py @@ -0,0 +1,9 @@ +import gradio as gr +import spaces +import transformers_gradio + +demo = gr.load(name="sail/Sailor2-20B-Chat", src=transformers_gradio.registry) +demo.fn = spaces.GPU()(demo.fn) + +for fn in demo.fns.values(): + fn.api_name = False diff --git a/app_sambanova.py b/app_sambanova.py new file mode 100644 index 0000000000000000000000000000000000000000..a010b6f1c8ebf6c5e78f2708e76e49ccdd6a162c --- /dev/null +++ b/app_sambanova.py @@ -0,0 +1,19 @@ +import ai_gradio + +from utils_ai_gradio import get_app + +# Get the hyperbolic models but keep their full names for loading +SAMBANOVA_MODELS_FULL = [k for k in ai_gradio.registry.keys() if k.startswith("sambanova:")] + +# Create display names without the prefix +SAMBANOVA_MODELS_DISPLAY = [k.replace("sambanova:", "") for k in SAMBANOVA_MODELS_FULL] + + +# Create and launch the interface using get_app utility +demo = get_app( + models=SAMBANOVA_MODELS_FULL, # Use the full names with prefix + default_model=SAMBANOVA_MODELS_FULL[-1], + dropdown_label="Select Sambanova Model", + choices=SAMBANOVA_MODELS_DISPLAY, # Display names without prefix + fill_height=True, +) \ No newline at end of file diff --git a/app_sambanova_coder.py b/app_sambanova_coder.py new file mode 100644 index 0000000000000000000000000000000000000000..5e34df4fd6948ab2cae33091ce52c53a2a73f4a1 --- /dev/null +++ b/app_sambanova_coder.py @@ -0,0 +1,20 @@ +import ai_gradio + +from utils_ai_gradio import get_app + +# Get the hyperbolic models but keep their full names for loading +SAMBANOVA_MODELS_FULL = [k for k in ai_gradio.registry.keys() if k.startswith("sambanova:")] + +# Create display names without the prefix +SAMBANOVA_MODELS_DISPLAY = [k.replace("sambanova:", "") for k in SAMBANOVA_MODELS_FULL] + + +# Create and launch the interface using get_app utility +demo = get_app( + models=SAMBANOVA_MODELS_FULL, # Use the full names with prefix + default_model=SAMBANOVA_MODELS_FULL[-1], + dropdown_label="Select Sambanova Model", + choices=SAMBANOVA_MODELS_DISPLAY, # Display names without prefix + fill_height=True, + coder=True, +) diff --git a/app_sambanova_qwen.py b/app_sambanova_qwen.py new file mode 100644 index 0000000000000000000000000000000000000000..3d5c6e8af611ef1c0dc995bf0ea4fa88a1860c8f --- /dev/null +++ b/app_sambanova_qwen.py @@ -0,0 +1,21 @@ +import os + +import sambanova_gradio + +from utils import get_app + +demo = get_app( + models=[ + "Qwen2.5-Coder-0.5B-Instruct", + "Qwen2.5-0.5B-Instruct", + "Qwen2.5-Coder-32B-Instruct", + "Qwen2.5-72B-Instruct", + ], + default_model="Qwen2.5-Coder-32B-Instruct", + src=sambanova_gradio.registry, + accept_token=not os.getenv("SAMBANOVA_API_KEY"), + multimodal=True, +) + +if __name__ == "__main__": + demo.launch() diff --git a/app_showui.py b/app_showui.py new file mode 100644 index 0000000000000000000000000000000000000000..48b6143cc38b7a298e9dd372eaa43d3cbd518813 --- /dev/null +++ b/app_showui.py @@ -0,0 +1,10 @@ +import gradio as gr + +# Load the Gradio space +demo = gr.load(name="showlab/ShowUI", src="spaces") + + +# Disable API access for all functions +if hasattr(demo, "fns"): + for fn in demo.fns.values(): + fn.api_name = False diff --git a/app_smolagents.py b/app_smolagents.py new file mode 100644 index 0000000000000000000000000000000000000000..c88abc7f269a2c3c3b3e6e4b116231e48206c823 --- /dev/null +++ b/app_smolagents.py @@ -0,0 +1,19 @@ +import ai_gradio + +from utils_ai_gradio import get_app + +SMOLAGENTS_MODELS_FULL = [k for k in ai_gradio.registry.keys() if k.startswith("smolagents:")] + + +SMOLAGENTS_MODELS_DISPLAY = [k.replace("smolagents:", "") for k in SMOLAGENTS_MODELS_FULL] + +demo = get_app( + models=SMOLAGENTS_MODELS_FULL, # Use the full names with prefix + default_model=SMOLAGENTS_MODELS_FULL[-1], + dropdown_label="Select SmolAgents Model", + choices=SMOLAGENTS_MODELS_DISPLAY, # Display names without prefix + fill_height=True, +) + +if __name__ == "__main__": + demo.launch() diff --git a/app_together.py b/app_together.py new file mode 100644 index 0000000000000000000000000000000000000000..de8c2dbb841565ef4945fc644f3a455736b59255 --- /dev/null +++ b/app_together.py @@ -0,0 +1,52 @@ +import os + +import together_gradio + +from utils import get_app + +demo = get_app( + models=[ + "meta-llama/Llama-Vision-Free", + "meta-llama/Llama-3.2-11B-Vision-Instruct-Turbo", + "meta-llama/Llama-3.2-90B-Vision-Instruct-Turbo", + "meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo", + "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo", + "meta-llama/Meta-Llama-3.1-405B-Instruct-Turbo", + "meta-llama/Meta-Llama-3-8B-Instruct-Turbo", + "meta-llama/Meta-Llama-3-70B-Instruct-Turbo", + "meta-llama/Llama-3.2-3B-Instruct-Turbo", + "meta-llama/Meta-Llama-3-8B-Instruct-Lite", + "meta-llama/Meta-Llama-3-70B-Instruct-Lite", + "meta-llama/Llama-3-8b-chat-hf", + "meta-llama/Llama-3-70b-chat-hf", + "nvidia/Llama-3.1-Nemotron-70B-Instruct-HF", + "Qwen/Qwen2.5-Coder-32B-Instruct", + "microsoft/WizardLM-2-8x22B", + "google/gemma-2-27b-it", + "google/gemma-2-9b-it", + "databricks/dbrx-instruct", + "mistralai/Mixtral-8x7B-Instruct-v0.1", + "mistralai/Mixtral-8x22B-Instruct-v0.1", + "Qwen/Qwen2.5-7B-Instruct-Turbo", + "Qwen/Qwen2.5-72B-Instruct-Turbo", + "Qwen/Qwen2-72B-Instruct", + "deepseek-ai/deepseek-llm-67b-chat", + "google/gemma-2b-it", + "Gryphe/MythoMax-L2-13b", + "meta-llama/Llama-2-13b-chat-hf", + "mistralai/Mistral-7B-Instruct-v0.1", + "mistralai/Mistral-7B-Instruct-v0.2", + "mistralai/Mistral-7B-Instruct-v0.3", + "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO", + "togethercomputer/StripedHyena-Nous-7B", + "upstage/SOLAR-10.7B-Instruct-v1.0", + "meta-llama/Llama-3.3-70B-Instruct-Turbo", + ], + default_model="meta-llama/Llama-3.3-70B-Instruct-Turbo", + src=together_gradio.registry, + accept_token=not os.getenv("TOGETHER_API_KEY"), + multimodal=True, +) + +if __name__ == "__main__": + demo.launch() diff --git a/app_transformers.py b/app_transformers.py new file mode 100644 index 0000000000000000000000000000000000000000..976f26bc6fc88cf301c970aeeb37c01a35074494 --- /dev/null +++ b/app_transformers.py @@ -0,0 +1,11 @@ +import gradio as gr + +demo = gr.load(name="akhaliq/phi-4", src="spaces") + +# Disable API access for all functions +if hasattr(demo, "fns"): + for fn in demo.fns.values(): + fn.api_name = False + +if __name__ == "__main__": + demo.launch() diff --git a/app_trellis.py b/app_trellis.py new file mode 100644 index 0000000000000000000000000000000000000000..c7f33cb2011fc7690ce004393dbec65e3aee5c75 --- /dev/null +++ b/app_trellis.py @@ -0,0 +1,10 @@ +import gradio as gr + +# Load the Gradio space +demo = gr.load(name="JeffreyXiang/TRELLIS", src="spaces") + + +# Disable API access for all functions +if hasattr(demo, "fns"): + for fn in demo.fns.values(): + fn.api_name = False diff --git a/app_xai.py b/app_xai.py new file mode 100644 index 0000000000000000000000000000000000000000..ce72140d2207ec88178c0ab917cd400ea0c585bd --- /dev/null +++ b/app_xai.py @@ -0,0 +1,20 @@ +import os + +import xai_gradio + +from utils import get_app + +demo = get_app( + models=[ + "grok-beta", + "grok-vision-beta", + "grok-2-vision-1212", + "grok-2-1212", + ], + default_model="grok-2-vision-1212", + src=xai_gradio.registry, + accept_token=not os.getenv("XAI_API_KEY"), +) + +if __name__ == "__main__": + demo.launch() diff --git a/backend_api.py b/backend_api.py deleted file mode 100644 index a2eacb69248998aa37b9c03a57cdda0d75bc3cdb..0000000000000000000000000000000000000000 --- a/backend_api.py +++ /dev/null @@ -1,1681 +0,0 @@ -""" -FastAPI backend for AnyCoder - provides REST API endpoints -""" -from fastapi import FastAPI, HTTPException, Header, WebSocket, WebSocketDisconnect, Request, Response -from fastapi.middleware.cors import CORSMiddleware -from fastapi.responses import StreamingResponse, RedirectResponse, JSONResponse -from pydantic import BaseModel -from typing import Optional, List, Dict, AsyncGenerator -import json -import asyncio -from datetime import datetime, timedelta -import secrets -import base64 -import urllib.parse -import re - -# Import only what we need, avoiding Gradio UI imports -import sys -import os -from huggingface_hub import InferenceClient -import httpx - -# Import model handling from backend_models -from backend_models import ( - get_inference_client, - get_real_model_id, - is_native_sdk_model, - is_mistral_model -) - -# Import project importer for importing from HF/GitHub -from project_importer import ProjectImporter - -# Import system prompts from standalone backend_prompts.py -# No dependencies on Gradio or heavy libraries -print("[Startup] Loading system prompts from backend_prompts...") - -try: - from backend_prompts import ( - HTML_SYSTEM_PROMPT, - TRANSFORMERS_JS_SYSTEM_PROMPT, - STREAMLIT_SYSTEM_PROMPT, - REACT_SYSTEM_PROMPT, - REACT_FOLLOW_UP_SYSTEM_PROMPT, # Import React followup prompt - get_gradio_system_prompt, # Import the function to get dynamic prompt - get_comfyui_system_prompt, # Import the function to get dynamic ComfyUI prompt - JSON_SYSTEM_PROMPT, - GENERIC_SYSTEM_PROMPT - ) - # Get the Gradio system prompt (includes full Gradio 6 documentation) - GRADIO_SYSTEM_PROMPT = get_gradio_system_prompt() - # Get the ComfyUI system prompt (includes full ComfyUI documentation) - COMFYUI_SYSTEM_PROMPT = get_comfyui_system_prompt() - print("[Startup] ✅ All system prompts loaded successfully from backend_prompts.py") - print(f"[Startup] 📚 Gradio system prompt loaded with full documentation ({len(GRADIO_SYSTEM_PROMPT)} chars)") - print(f"[Startup] 📚 ComfyUI system prompt loaded with full documentation ({len(COMFYUI_SYSTEM_PROMPT)} chars)") -except Exception as e: - import traceback - print(f"[Startup] ❌ ERROR: Could not import from backend_prompts: {e}") - print(f"[Startup] Traceback: {traceback.format_exc()}") - print("[Startup] Using minimal fallback prompts") - - # Define minimal fallback prompts - HTML_SYSTEM_PROMPT = "You are an expert web developer. Create complete HTML applications with CSS and JavaScript." - TRANSFORMERS_JS_SYSTEM_PROMPT = "You are an expert at creating transformers.js applications. Generate complete working code." - STREAMLIT_SYSTEM_PROMPT = "You are an expert Streamlit developer. Create complete Streamlit applications." - REACT_SYSTEM_PROMPT = "You are an expert React developer. Create complete React applications with Next.js." - GRADIO_SYSTEM_PROMPT = "You are an expert Gradio developer. Create complete, working Gradio applications." - COMFYUI_SYSTEM_PROMPT = "You are an expert ComfyUI developer. Generate clean, valid JSON workflows for ComfyUI based on the user's request. READ THE USER'S REQUEST CAREFULLY and create a workflow that matches their specific needs." - JSON_SYSTEM_PROMPT = "You are an expert at generating JSON configurations. Create valid, well-structured JSON." - GENERIC_SYSTEM_PROMPT = "You are an expert {language} developer. Create complete, working {language} applications." - -print("[Startup] System prompts initialization complete") - -# Cache system prompts map for fast lookup (created once at startup) -SYSTEM_PROMPT_CACHE = { - "html": HTML_SYSTEM_PROMPT, - "gradio": GRADIO_SYSTEM_PROMPT, - "streamlit": STREAMLIT_SYSTEM_PROMPT, - "transformers.js": TRANSFORMERS_JS_SYSTEM_PROMPT, - "react": REACT_SYSTEM_PROMPT, - "comfyui": COMFYUI_SYSTEM_PROMPT, # Use ComfyUI-specific prompt with documentation -} - -# Client connection pool for reuse (thread-safe) -import threading -_client_pool = {} -_client_pool_lock = threading.Lock() - -def get_cached_client(model_id: str, provider: str = "auto"): - """Get or create a cached API client for reuse""" - cache_key = f"{model_id}:{provider}" - - with _client_pool_lock: - if cache_key not in _client_pool: - _client_pool[cache_key] = get_inference_client(model_id, provider) - return _client_pool[cache_key] - -# Define models and languages here to avoid importing Gradio UI -AVAILABLE_MODELS = [ - {"name": "GLM-4.6", "id": "zai-org/GLM-4.6", "description": "GLM-4.6 model via HuggingFace with Cerebras provider (Default)", "supports_images": False}, - {"name": "GLM-4.6V 👁️", "id": "zai-org/GLM-4.6V:zai-org", "description": "GLM-4.6V vision model - supports image uploads for visual understanding", "supports_images": True}, - {"name": "DeepSeek V3", "id": "deepseek-ai/DeepSeek-V3", "description": "DeepSeek V3 - Fast model for code generation via HuggingFace Router with Novita provider", "supports_images": False}, - {"name": "DeepSeek R1", "id": "deepseek-ai/DeepSeek-R1", "description": "DeepSeek R1 model for code generation via HuggingFace", "supports_images": False}, - {"name": "MiniMax M2", "id": "MiniMaxAI/MiniMax-M2", "description": "MiniMax M2 model via HuggingFace InferenceClient with Novita provider", "supports_images": False}, - {"name": "Kimi K2 Thinking", "id": "moonshotai/Kimi-K2-Thinking", "description": "Moonshot Kimi K2 Thinking model via HuggingFace with Together AI provider", "supports_images": False}, -] - -# Cache model lookup for faster access (built after AVAILABLE_MODELS is defined) -MODEL_CACHE = {model["id"]: model for model in AVAILABLE_MODELS} -print(f"[Startup] ✅ Performance optimizations loaded: {len(SYSTEM_PROMPT_CACHE)} cached prompts, {len(MODEL_CACHE)} cached models, client pooling enabled") - -LANGUAGE_CHOICES = ["html", "gradio", "transformers.js", "streamlit", "comfyui", "react"] - -app = FastAPI(title="AnyCoder API", version="1.0.0") - -# OAuth and environment configuration (must be before CORS) -OAUTH_CLIENT_ID = os.getenv("OAUTH_CLIENT_ID", "") -OAUTH_CLIENT_SECRET = os.getenv("OAUTH_CLIENT_SECRET", "") -OAUTH_SCOPES = os.getenv("OAUTH_SCOPES", "openid profile manage-repos write-discussions") -OPENID_PROVIDER_URL = os.getenv("OPENID_PROVIDER_URL", "https://huggingface.co") -SPACE_HOST = os.getenv("SPACE_HOST", "localhost:7860") - -# Configure CORS - allow all origins in production, specific in dev -# In Docker Space, requests come from the same domain via Next.js proxy -ALLOWED_ORIGINS = os.getenv("ALLOWED_ORIGINS", "*").split(",") if os.getenv("ALLOWED_ORIGINS") else [ - "http://localhost:3000", - "http://localhost:3001", - "http://localhost:7860", - f"https://{SPACE_HOST}" if SPACE_HOST and not SPACE_HOST.startswith("localhost") else "http://localhost:7860" -] - -app.add_middleware( - CORSMiddleware, - allow_origins=ALLOWED_ORIGINS if ALLOWED_ORIGINS != ["*"] else ["*"], - allow_credentials=True, - allow_methods=["*"], - allow_headers=["*"], - allow_origin_regex=r"https://.*\.hf\.space" if SPACE_HOST and not SPACE_HOST.startswith("localhost") else None, -) - -# In-memory store for OAuth states (in production, use Redis or similar) -oauth_states = {} - -# In-memory store for user sessions -user_sessions = {} - - -def is_session_expired(session_data: dict) -> bool: - """Check if session has expired""" - expires_at = session_data.get("expires_at") - if not expires_at: - # If no expiration info, check if session is older than 8 hours - timestamp = session_data.get("timestamp", datetime.now()) - return (datetime.now() - timestamp) > timedelta(hours=8) - - return datetime.now() >= expires_at - - -# Background task for cleaning up expired sessions -async def cleanup_expired_sessions(): - """Periodically clean up expired sessions""" - while True: - try: - await asyncio.sleep(3600) # Run every hour - - expired_sessions = [] - for session_token, session_data in user_sessions.items(): - if is_session_expired(session_data): - expired_sessions.append(session_token) - - for session_token in expired_sessions: - user_sessions.pop(session_token, None) - print(f"[Auth] Cleaned up expired session: {session_token[:10]}...") - - if expired_sessions: - print(f"[Auth] Cleaned up {len(expired_sessions)} expired session(s)") - except Exception as e: - print(f"[Auth] Cleanup error: {e}") - -# Start cleanup task on app startup -@app.on_event("startup") -async def startup_event(): - """Run startup tasks""" - asyncio.create_task(cleanup_expired_sessions()) - print("[Startup] ✅ Session cleanup task started") - - -# Pydantic models for request/response -class CodeGenerationRequest(BaseModel): - query: str - language: str = "html" - model_id: str = "zai-org/GLM-4.6" - provider: str = "auto" - history: List[List[str]] = [] - agent_mode: bool = False - existing_repo_id: Optional[str] = None # For auto-deploy to update existing space - skip_auto_deploy: bool = False # Skip auto-deploy (for PR creation) - image_url: Optional[str] = None # For vision models like GLM-4.6V - - -class DeploymentRequest(BaseModel): - code: str - space_name: Optional[str] = None - language: str - requirements: Optional[str] = None - existing_repo_id: Optional[str] = None # For updating existing spaces - commit_message: Optional[str] = None - history: List[Dict] = [] # Chat history for tracking deployed spaces - - -class AuthStatus(BaseModel): - authenticated: bool - username: Optional[str] = None - message: str - - -class ModelInfo(BaseModel): - name: str - id: str - description: str - - -class CodeGenerationResponse(BaseModel): - code: str - history: List[List[str]] - status: str - - -class ImportRequest(BaseModel): - url: str - prefer_local: bool = False - username: Optional[str] = None # Username of authenticated user for ownership check - - -class ImportResponse(BaseModel): - status: str - message: str - code: str - language: str - url: str - metadata: Dict - owned_by_user: bool = False # True if user owns the imported repo - repo_id: Optional[str] = None # The repo ID (username/repo-name) if applicable - - -class PullRequestRequest(BaseModel): - repo_id: str # username/space-name - code: str - language: str - pr_title: Optional[str] = None - pr_description: Optional[str] = None - - -class PullRequestResponse(BaseModel): - success: bool - message: str - pr_url: Optional[str] = None - - -class DuplicateSpaceRequest(BaseModel): - from_space_id: str # username/space-name - to_space_name: Optional[str] = None # Just the name, not full ID - private: bool = False - - -class DuplicateSpaceResponse(BaseModel): - success: bool - message: str - space_url: Optional[str] = None - space_id: Optional[str] = None - - -# Mock authentication for development -# In production, integrate with HuggingFace OAuth -class MockAuth: - def __init__(self, token: Optional[str] = None, username: Optional[str] = None): - self.token = token - self.username = username - - def is_authenticated(self): - return bool(self.token) - - -def get_auth_from_header(authorization: Optional[str] = None): - """Extract authentication from header or session token""" - if not authorization: - return MockAuth(None, None) - - # Handle "Bearer " prefix - if authorization.startswith("Bearer "): - token = authorization.replace("Bearer ", "") - else: - token = authorization - - # Check if this is a session token (UUID format) - if token and "-" in token and len(token) > 20: - # Look up the session to get user info - if token in user_sessions: - session = user_sessions[token] - username = session.get("username") - - # If username is missing from session (e.g., old session), try to fetch it - if not username and session.get("user_info"): - user_info = session["user_info"] - # Use same order as OAuth callback for consistency - username = ( - user_info.get("preferred_username") or - user_info.get("name") or - user_info.get("sub") or - user_info.get("username") or - "user" - ) - # Update the session with the username for future requests - session["username"] = username - print(f"[Auth] Extracted and cached username from user_info: {username}") - - return MockAuth(session["access_token"], username) - - # Dev token format: dev_token__ - if token and token.startswith("dev_token_"): - parts = token.split("_") - username = parts[2] if len(parts) > 2 else "user" - return MockAuth(token, username) - - # Regular OAuth access token passed directly - try to fetch username from HF - # This happens when frontend sends OAuth token after OAuth callback - if token and len(token) > 20: - try: - from huggingface_hub import HfApi - hf_api = HfApi(token=token) - user_info = hf_api.whoami() - username = ( - user_info.get("preferred_username") or - user_info.get("name") or - user_info.get("sub") or - "user" - ) - print(f"[Auth] Fetched username from OAuth token: {username}") - return MockAuth(token, username) - except Exception as e: - print(f"[Auth] Could not fetch username from OAuth token: {e}") - # Return with token but no username - deployment will try to fetch it - return MockAuth(token, None) - - # Fallback: token with no username - return MockAuth(token, None) - - -@app.get("/") -async def root(): - """Health check endpoint""" - return {"status": "ok", "message": "AnyCoder API is running"} - - -@app.get("/api/models", response_model=List[ModelInfo]) -async def get_models(): - """Get available AI models""" - return [ - ModelInfo( - name=model["name"], - id=model["id"], - description=model["description"] - ) - for model in AVAILABLE_MODELS - ] - - -@app.get("/api/languages") -async def get_languages(): - """Get available programming languages/frameworks""" - return {"languages": LANGUAGE_CHOICES} - - -@app.get("/api/auth/login") -async def oauth_login(request: Request): - """Initiate OAuth login flow""" - # Generate a random state to prevent CSRF - state = secrets.token_urlsafe(32) - oauth_states[state] = {"timestamp": datetime.now()} - - # Build redirect URI - protocol = "https" if SPACE_HOST and not SPACE_HOST.startswith("localhost") else "http" - redirect_uri = f"{protocol}://{SPACE_HOST}/api/auth/callback" - - # Build authorization URL - auth_url = ( - f"{OPENID_PROVIDER_URL}/oauth/authorize" - f"?client_id={OAUTH_CLIENT_ID}" - f"&redirect_uri={urllib.parse.quote(redirect_uri)}" - f"&scope={urllib.parse.quote(OAUTH_SCOPES)}" - f"&state={state}" - f"&response_type=code" - ) - - return JSONResponse({"login_url": auth_url, "state": state}) - - -@app.get("/api/auth/callback") -async def oauth_callback(code: str, state: str, request: Request): - """Handle OAuth callback""" - # Verify state to prevent CSRF - if state not in oauth_states: - raise HTTPException(status_code=400, detail="Invalid state parameter") - - # Clean up old states - oauth_states.pop(state, None) - - # Exchange code for tokens - protocol = "https" if SPACE_HOST and not SPACE_HOST.startswith("localhost") else "http" - redirect_uri = f"{protocol}://{SPACE_HOST}/api/auth/callback" - - # Prepare authorization header - auth_string = f"{OAUTH_CLIENT_ID}:{OAUTH_CLIENT_SECRET}" - auth_bytes = auth_string.encode('utf-8') - auth_b64 = base64.b64encode(auth_bytes).decode('utf-8') - - async with httpx.AsyncClient() as client: - try: - token_response = await client.post( - f"{OPENID_PROVIDER_URL}/oauth/token", - data={ - "client_id": OAUTH_CLIENT_ID, - "code": code, - "grant_type": "authorization_code", - "redirect_uri": redirect_uri, - }, - headers={ - "Authorization": f"Basic {auth_b64}", - "Content-Type": "application/x-www-form-urlencoded", - }, - ) - token_response.raise_for_status() - token_data = token_response.json() - - # Get user info - access_token = token_data.get("access_token") - userinfo_response = await client.get( - f"{OPENID_PROVIDER_URL}/oauth/userinfo", - headers={"Authorization": f"Bearer {access_token}"}, - ) - userinfo_response.raise_for_status() - user_info = userinfo_response.json() - - # Extract username - try multiple possible fields - username = ( - user_info.get("preferred_username") or # Primary HF field - user_info.get("name") or # Alternative field - user_info.get("sub") or # OpenID subject - user_info.get("username") or # Generic username - "user" # Fallback - ) - - print(f"[OAuth] User info received: {user_info}") - print(f"[OAuth] Extracted username: {username}") - - # Calculate token expiration - # OAuth tokens typically have expires_in in seconds - expires_in = token_data.get("expires_in", 28800) # Default 8 hours - expires_at = datetime.now() + timedelta(seconds=expires_in) - - # Create session - session_token = secrets.token_urlsafe(32) - user_sessions[session_token] = { - "access_token": access_token, - "user_info": user_info, - "timestamp": datetime.now(), - "expires_at": expires_at, - "username": username, - "deployed_spaces": [] # Track deployed spaces for follow-up updates - } - - print(f"[OAuth] Session created: {session_token[:10]}... for user: {username}") - - # Redirect to frontend with session token - frontend_url = f"{protocol}://{SPACE_HOST}/?session={session_token}" - return RedirectResponse(url=frontend_url) - - except httpx.HTTPError as e: - print(f"OAuth error: {e}") - raise HTTPException(status_code=500, detail=f"OAuth failed: {str(e)}") - - -async def validate_token_with_hf(access_token: str) -> bool: - """Validate token with HuggingFace API""" - try: - async with httpx.AsyncClient() as client: - response = await client.get( - f"{OPENID_PROVIDER_URL}/oauth/userinfo", - headers={"Authorization": f"Bearer {access_token}"}, - timeout=5.0 - ) - return response.status_code == 200 - except Exception as e: - print(f"[Auth] Token validation error: {e}") - return False - - -@app.get("/api/auth/session") -async def get_session(session: str): - """Get user info from session token""" - if session not in user_sessions: - raise HTTPException(status_code=401, detail="Invalid session") - - session_data = user_sessions[session] - - # Check if session has expired - if is_session_expired(session_data): - # Clean up expired session - user_sessions.pop(session, None) - raise HTTPException(status_code=401, detail="Session expired. Please sign in again.") - - # Validate token with HuggingFace - if not await validate_token_with_hf(session_data["access_token"]): - # Token is invalid, clean up session - user_sessions.pop(session, None) - raise HTTPException(status_code=401, detail="Authentication expired. Please sign in again.") - - return { - "access_token": session_data["access_token"], - "user_info": session_data["user_info"], - } - - -@app.get("/api/auth/status") -async def auth_status(authorization: Optional[str] = Header(None)): - """Check authentication status and validate token""" - auth = get_auth_from_header(authorization) - - if not auth.is_authenticated(): - return AuthStatus( - authenticated=False, - username=None, - message="Not authenticated" - ) - - # For dev tokens, skip validation - if auth.token and auth.token.startswith("dev_token_"): - return AuthStatus( - authenticated=True, - username=auth.username, - message=f"Authenticated as {auth.username} (dev mode)" - ) - - # For session tokens, check expiration and validate - token = authorization.replace("Bearer ", "") if authorization else None - if token and "-" in token and len(token) > 20 and token in user_sessions: - session_data = user_sessions[token] - - # Check if session has expired - if is_session_expired(session_data): - # Clean up expired session - user_sessions.pop(token, None) - return AuthStatus( - authenticated=False, - username=None, - message="Session expired" - ) - - # Validate token with HuggingFace - if not await validate_token_with_hf(session_data["access_token"]): - # Token is invalid, clean up session - user_sessions.pop(token, None) - return AuthStatus( - authenticated=False, - username=None, - message="Authentication expired" - ) - - return AuthStatus( - authenticated=True, - username=auth.username, - message=f"Authenticated as {auth.username}" - ) - - # For direct OAuth tokens, validate with HF - if auth.token: - is_valid = await validate_token_with_hf(auth.token) - if is_valid: - return AuthStatus( - authenticated=True, - username=auth.username, - message=f"Authenticated as {auth.username}" - ) - else: - return AuthStatus( - authenticated=False, - username=None, - message="Token expired or invalid" - ) - - return AuthStatus( - authenticated=False, - username=None, - message="Not authenticated" - ) - - -def cleanup_generated_code(code: str, language: str) -> str: - """Remove LLM explanatory text and extract only the actual code""" - try: - original_code = code - - # Special handling for transformers.js - don't clean, pass through as-is - # The parser will handle extracting the files from === markers - if language == "transformers.js": - return code - - # Special handling for ComfyUI JSON - if language == "comfyui": - # Try to parse as JSON first - try: - json.loads(code) - return code # If it parses, return as-is - except json.JSONDecodeError: - pass - - # Find the last } in the code - last_brace = code.rfind('}') - if last_brace != -1: - # Extract everything up to and including the last } - potential_json = code[:last_brace + 1] - - # Try to find where the JSON actually starts - json_start = 0 - if '```json' in potential_json: - match = re.search(r'```json\s*\n', potential_json) - if match: - json_start = match.end() - elif '```' in potential_json: - match = re.search(r'```\s*\n', potential_json) - if match: - json_start = match.end() - - # Extract the JSON - cleaned_json = potential_json[json_start:].strip() - cleaned_json = re.sub(r'```\s*$', '', cleaned_json).strip() - - # Validate - try: - json.loads(cleaned_json) - return cleaned_json - except json.JSONDecodeError: - pass - - # General cleanup for code languages - # Remove markdown code blocks and extract code - if '```' in code: - # Pattern to match code blocks with language specifiers - patterns = [ - r'```(?:html|HTML)\s*\n([\s\S]+?)(?:\n```|$)', - r'```(?:python|py|Python)\s*\n([\s\S]+?)(?:\n```|$)', - r'```(?:javascript|js|jsx|JavaScript)\s*\n([\s\S]+?)(?:\n```|$)', - r'```(?:typescript|ts|tsx|TypeScript)\s*\n([\s\S]+?)(?:\n```|$)', - r'```\s*\n([\s\S]+?)(?:\n```|$)', # Generic code block - ] - - for pattern in patterns: - match = re.search(pattern, code, re.IGNORECASE) - if match: - code = match.group(1).strip() - break - - # Remove common LLM explanatory patterns - # Remove lines that start with explanatory text - lines = code.split('\n') - cleaned_lines = [] - in_code = False - - for line in lines: - stripped = line.strip() - - # Skip common explanatory patterns at the start - if not in_code and ( - stripped.lower().startswith('here') or - stripped.lower().startswith('this') or - stripped.lower().startswith('the above') or - stripped.lower().startswith('note:') or - stripped.lower().startswith('explanation:') or - stripped.lower().startswith('to use') or - stripped.lower().startswith('usage:') or - stripped.lower().startswith('instructions:') or - stripped.startswith('===') and '===' in stripped # Section markers - ): - continue - - # Once we hit actual code, we're in - if stripped and not stripped.startswith('#') and not stripped.startswith('//'): - in_code = True - - cleaned_lines.append(line) - - code = '\n'.join(cleaned_lines).strip() - - # Remove trailing explanatory text after the code ends - # For HTML: remove everything after final closing tag - if language == "html": - # Find last or or at root level - last_html = code.rfind('') - last_body = code.rfind('') - last_tag = max(last_html, last_body) - if last_tag != -1: - # Check if there's significant text after - after_tag = code[last_tag + 7:].strip() # +7 for length - if after_tag and len(after_tag) > 100: # Significant explanatory text - code = code[:last_tag + 7].strip() - - # For Python: remove text after the last function/class definition or code block - elif language in ["gradio", "streamlit"]: - # Find the last line that looks like actual code (not comments or blank) - lines = code.split('\n') - last_code_line = -1 - for i in range(len(lines) - 1, -1, -1): - stripped = lines[i].strip() - if stripped and not stripped.startswith('#') and not stripped.startswith('"""') and not stripped.startswith("'''"): - # This looks like actual code - last_code_line = i - break - - if last_code_line != -1 and last_code_line < len(lines) - 5: - # If there are more than 5 lines after last code, likely explanatory - code = '\n'.join(lines[:last_code_line + 1]) - - # Return cleaned code or original if cleaning made it too short - if len(code) > 50: - return code - else: - return original_code - - except Exception as e: - print(f"[Code Cleanup] Error for {language}: {e}") - return code - - -@app.post("/api/generate") -async def generate_code( - request: CodeGenerationRequest, - authorization: Optional[str] = Header(None) -): - """Generate code based on user query - returns streaming response""" - # Dev mode: No authentication required - just use server's HF_TOKEN - # In production, you would check real OAuth tokens here - - # Extract parameters from request body - query = request.query - language = request.language - model_id = request.model_id - provider = request.provider - - async def event_stream() -> AsyncGenerator[str, None]: - """Stream generated code chunks""" - # Use the model_id from outer scope - selected_model_id = model_id - - try: - # Fast model lookup using cache - selected_model = MODEL_CACHE.get(selected_model_id) - if not selected_model: - # Fallback to first available model (shouldn't happen often) - selected_model = AVAILABLE_MODELS[0] - selected_model_id = selected_model["id"] - - # Track generated code - generated_code = "" - - # Fast system prompt lookup using cache - system_prompt = SYSTEM_PROMPT_CACHE.get(language) - if not system_prompt: - # Format generic prompt only if needed - system_prompt = GENERIC_SYSTEM_PROMPT.format(language=language) - - # Detect if this is a followup request for React apps - # Check if there's existing code in the conversation history - is_followup = False - if language == "react" and request.history: - # Check if there's any previous assistant message with code (indicating a followup) - for msg in request.history: - if isinstance(msg, dict): - role = msg.get('role', '') - content = msg.get('content', '') - elif isinstance(msg, list) and len(msg) >= 2: - role = msg[0] - content = msg[1] - else: - continue - - # If we find previous code from assistant, this is a followup - if role == 'assistant' and ('===' in content or 'Dockerfile' in content or 'package.json' in content): - is_followup = True - print(f"[Generate] Detected React followup request") - break - - # Use followup prompt for React if detected - if is_followup and language == "react": - system_prompt = REACT_FOLLOW_UP_SYSTEM_PROMPT - print(f"[Generate] Using React followup system prompt for targeted fixes") - - # Get cached client (reuses connections) - client = get_cached_client(selected_model_id, provider) - - # Get the real model ID with provider suffixes - actual_model_id = get_real_model_id(selected_model_id) - - # Prepare messages (optimized - no string concatenation in hot path) - # Check if this is a vision model and we have an image - if request.image_url and selected_model_id == "zai-org/GLM-4.6V:zai-org": - # Vision model with image - use multi-modal format - user_content = [ - { - "type": "text", - "text": f"Generate a {language} application: {query}" - }, - { - "type": "image_url", - "image_url": { - "url": request.image_url - } - } - ] - messages = [ - {"role": "system", "content": system_prompt}, - {"role": "user", "content": user_content} - ] - else: - # Regular text-only model - user_content = f"Generate a {language} application: {query}" - messages = [ - {"role": "system", "content": system_prompt}, - {"role": "user", "content": user_content} - ] - - # Stream the response - try: - # All models now use OpenAI-compatible API via HF Router or Inference API - stream = client.chat.completions.create( - model=actual_model_id, - messages=messages, - temperature=0.7, - max_tokens=10000, - stream=True - ) - - chunk_count = 0 - - # Only process stream if it exists - if stream: - # Optimized chunk processing - for chunk in stream: - chunk_content = None - - # OpenAI format: chunk.choices[0].delta.content - try: - if chunk.choices and chunk.choices[0].delta.content: - chunk_content = chunk.choices[0].delta.content - except (AttributeError, IndexError): - continue - - if chunk_content: - generated_code += chunk_content - chunk_count += 1 - - # Send chunk immediately - optimized JSON serialization - # Only yield control every 5 chunks to reduce overhead - if chunk_count % 5 == 0: - await asyncio.sleep(0) - - # Build event data efficiently - event_data = json.dumps({ - "type": "chunk", - "content": chunk_content - }) - yield f"data: {event_data}\n\n" - - # Clean up generated code (remove LLM explanatory text and markdown) - generated_code = cleanup_generated_code(generated_code, language) - - # Send completion event (optimized - no timestamp in hot path) - completion_data = json.dumps({ - "type": "complete", - "code": generated_code - }) - yield f"data: {completion_data}\n\n" - - # Auto-deploy after code generation (if authenticated and not skipped) - auth = get_auth_from_header(authorization) - - if request.skip_auto_deploy: - print(f"[Auto-Deploy] Skipped - PR creation will be handled by frontend") - - if auth.is_authenticated() and not (auth.token and auth.token.startswith("dev_token_")) and not request.skip_auto_deploy: - try: - # Send deploying status - deploying_data = json.dumps({ - "type": "deploying", - "message": "🚀 Deploying your app to HuggingFace Spaces..." - }) - yield f"data: {deploying_data}\n\n" - - # Import deployment function - from backend_deploy import deploy_to_huggingface_space - - # Convert history to the format expected by deploy function - # History comes from frontend as [[role, content], ...] - history_list = [] - if request.history: - for msg in request.history: - if isinstance(msg, list) and len(msg) >= 2: - # Already in correct format [[role, content], ...] - history_list.append([msg[0], msg[1]]) - elif isinstance(msg, dict): - # Convert dict format to list format - role = msg.get('role', '') - content = msg.get('content', '') - if role and content: - history_list.append([role, content]) - - print(f"[Auto-Deploy] Starting deployment...") - print(f"[Auto-Deploy] - Language: {language}") - print(f"[Auto-Deploy] - History items: {len(history_list)}") - print(f"[Auto-Deploy] - Username: {auth.username}") - print(f"[Auto-Deploy] - Code length: {len(generated_code)}") - print(f"[Auto-Deploy] - Existing repo ID from request: {request.existing_repo_id}") - - # Deploy the code (update existing space if provided) - success, message, space_url = deploy_to_huggingface_space( - code=generated_code, - language=language, - token=auth.token, - username=auth.username, - existing_repo_id=request.existing_repo_id, # Use duplicated/imported space - history=history_list - ) - - print(f"[Auto-Deploy] Deployment result:") - print(f"[Auto-Deploy] - Success: {success}") - print(f"[Auto-Deploy] - Message: {message}") - print(f"[Auto-Deploy] - Space URL: {space_url}") - - if success and space_url: - # Send deployment success - deploy_success_data = json.dumps({ - "type": "deployed", - "message": message, - "space_url": space_url - }) - yield f"data: {deploy_success_data}\n\n" - else: - # Send deployment error (non-blocking - code generation still succeeded) - deploy_error_data = json.dumps({ - "type": "deploy_error", - "message": f"⚠️ Deployment failed: {message}" - }) - yield f"data: {deploy_error_data}\n\n" - except Exception as deploy_error: - # Log deployment error but don't fail the generation - import traceback - print(f"[Auto-Deploy] ========== DEPLOYMENT EXCEPTION ==========") - print(f"[Auto-Deploy] Exception type: {type(deploy_error).__name__}") - print(f"[Auto-Deploy] Error message: {str(deploy_error)}") - print(f"[Auto-Deploy] Full traceback:") - traceback.print_exc() - print(f"[Auto-Deploy] ==========================================") - - deploy_error_data = json.dumps({ - "type": "deploy_error", - "message": f"⚠️ Deployment error: {str(deploy_error)}" - }) - yield f"data: {deploy_error_data}\n\n" - else: - print(f"[Auto-Deploy] Skipped - authenticated: {auth.is_authenticated()}, token_exists: {auth.token is not None}, is_dev: {auth.token.startswith('dev_token_') if auth.token else False}") - - except Exception as e: - # Handle rate limiting and other API errors - error_message = str(e) - is_rate_limit = False - error_type = type(e).__name__ - - # Check for OpenAI SDK rate limit errors - if error_type == "RateLimitError" or "rate_limit" in error_type.lower(): - is_rate_limit = True - # Check if this is a rate limit error (429 status code) - elif hasattr(e, 'status_code') and e.status_code == 429: - is_rate_limit = True - # Check error message for rate limit indicators - elif "429" in error_message or "rate limit" in error_message.lower() or "too many requests" in error_message.lower(): - is_rate_limit = True - - if is_rate_limit: - # Try to extract retry-after header or message - retry_after = None - if hasattr(e, 'response') and e.response: - retry_after = e.response.headers.get('Retry-After') or e.response.headers.get('retry-after') - # Also check if the error object has retry_after - elif hasattr(e, 'retry_after'): - retry_after = str(e.retry_after) - - if selected_model_id == "x-ai/grok-4.1-fast" or selected_model_id.startswith("openrouter/"): - error_message = "⏱️ Rate limit exceeded for OpenRouter model" - if retry_after: - error_message += f". Please wait {retry_after} seconds before trying again." - else: - error_message += ". Free tier allows up to 20 requests per minute. Please wait a moment and try again." - else: - error_message = f"⏱️ Rate limit exceeded. Please wait before trying again." - if retry_after: - error_message += f" Retry after {retry_after} seconds." - - # Check for other common API errors - elif hasattr(e, 'status_code'): - if e.status_code == 401: - error_message = "❌ Authentication failed. Please check your API key." - elif e.status_code == 403: - error_message = "❌ Access forbidden. Please check your API key permissions." - elif e.status_code == 500 or e.status_code == 502 or e.status_code == 503: - error_message = "❌ Service temporarily unavailable. Please try again later." - - error_data = json.dumps({ - "type": "error", - "message": error_message - }) - yield f"data: {error_data}\n\n" - - except Exception as e: - # Fallback error handling - error_message = str(e) - # Check if it's a rate limit error in the exception message - if "429" in error_message or "rate limit" in error_message.lower() or "too many requests" in error_message.lower(): - if selected_model_id == "x-ai/grok-4.1-fast" or selected_model_id.startswith("openrouter/"): - error_message = "⏱️ Rate limit exceeded for OpenRouter model. Free tier allows up to 20 requests per minute. Please wait a moment and try again." - else: - error_message = "⏱️ Rate limit exceeded. Please wait before trying again." - - error_data = json.dumps({ - "type": "error", - "message": f"Generation error: {error_message}" - }) - yield f"data: {error_data}\n\n" - - return StreamingResponse( - event_stream(), - media_type="text/event-stream", - headers={ - "Cache-Control": "no-cache, no-transform", - "Connection": "keep-alive", - "X-Accel-Buffering": "no", - "Content-Encoding": "none", - "Transfer-Encoding": "chunked" - } - ) - - -@app.post("/api/deploy") -async def deploy( - request: DeploymentRequest, - authorization: Optional[str] = Header(None) -): - """Deploy generated code to HuggingFace Spaces""" - print(f"[Deploy] ========== NEW DEPLOYMENT REQUEST ==========") - print(f"[Deploy] Authorization header present: {authorization is not None}") - if authorization: - auth_preview = authorization[:20] + "..." if len(authorization) > 20 else authorization - print(f"[Deploy] Authorization preview: {auth_preview}") - - auth = get_auth_from_header(authorization) - - print(f"[Deploy] Auth object - is_authenticated: {auth.is_authenticated()}, username: {auth.username}, has_token: {auth.token is not None}") - - if not auth.is_authenticated(): - raise HTTPException(status_code=401, detail="Authentication required") - - # Check if this is dev mode (no real token) - if auth.token and auth.token.startswith("dev_token_"): - # In dev mode, open HF Spaces creation page - from backend_deploy import detect_sdk_from_code - base_url = "https://huggingface.co/new-space" - - sdk = detect_sdk_from_code(request.code, request.language) - - params = urllib.parse.urlencode({ - "name": request.space_name or "my-anycoder-app", - "sdk": sdk - }) - - # Prepare file content based on language - if request.language in ["html", "transformers.js", "comfyui"]: - file_path = "index.html" - else: - file_path = "app.py" - - files_params = urllib.parse.urlencode({ - "files[0][path]": file_path, - "files[0][content]": request.code - }) - - space_url = f"{base_url}?{params}&{files_params}" - - return { - "success": True, - "space_url": space_url, - "message": "Dev mode: Please create the space manually", - "dev_mode": True - } - - # Production mode with real OAuth token - try: - from backend_deploy import deploy_to_huggingface_space - - # Get user token - should be the access_token from OAuth session - user_token = auth.token if auth.token else os.getenv("HF_TOKEN") - - if not user_token: - raise HTTPException(status_code=401, detail="No HuggingFace token available. Please sign in first.") - - print(f"[Deploy] Attempting deployment with token (first 10 chars): {user_token[:10]}...") - print(f"[Deploy] Request parameters - language: {request.language}, space_name: {request.space_name}, existing_repo_id: {request.existing_repo_id}") - - # If username is missing, fetch it from HuggingFace API - username = auth.username - if not username: - print(f"[Deploy] Username not found in auth, fetching from HuggingFace API...") - try: - from huggingface_hub import HfApi - hf_api = HfApi(token=user_token) - user_info = hf_api.whoami() - username = user_info.get("name") or user_info.get("preferred_username") or "user" - print(f"[Deploy] Fetched username from HF API: {username}") - except Exception as e: - print(f"[Deploy] Warning: Could not fetch username from HF API: {e}") - # Continue without username - the deploy function will try to fetch it again - - # Check for existing deployed space in this session - session_token = authorization.replace("Bearer ", "") if authorization else None - existing_repo_id = request.existing_repo_id - - # PRIORITY 1: Check history for deployed/imported spaces (like Gradio version does) - # This is more reliable than session tracking since history persists in frontend - if request.history and username: - print(f"[Deploy] ========== CHECKING HISTORY ==========") - print(f"[Deploy] History length: {len(request.history)} messages") - print(f"[Deploy] Username: {username}") - - # Log each message in history for debugging - for i, msg in enumerate(request.history): - role = msg.get('role', 'unknown') - content = msg.get('content', '') - content_preview = content[:100] if content else '' - print(f"[Deploy] Message {i+1}: role={role}, content_preview='{content_preview}...'") - - print(f"[Deploy] ==========================================") - - for msg in request.history: - role = msg.get('role', '') - content = msg.get('content', '') - - # Check for deployment confirmations - if role == 'assistant' and ('✅ Deployed!' in content or '✅ Updated!' in content): - import re - print(f"[Deploy] 🔍 Found deployment message in history!") - print(f"[Deploy] Content: {content[:200]}") - match = re.search(r'huggingface\.co/spaces/([^/\s\)]+/[^/\s\)]+)', content) - if match: - history_space_id = match.group(1) - print(f"[Deploy] ✅ EXTRACTED space ID from history: {history_space_id}") - if not existing_repo_id: - existing_repo_id = history_space_id - print(f"[Deploy] ✅ WILL UPDATE EXISTING SPACE: {existing_repo_id}") - break - else: - print(f"[Deploy] ⚠️ Deployment message found but couldn't extract space ID") - - # Check for imports - elif role == 'user' and 'import' in content.lower(): - import re - match = re.search(r'huggingface\.co/spaces/([^/\s\)]+/[^/\s\)]+)', content) - if match: - imported_space = match.group(1) - # Only use if user owns it - if imported_space.startswith(f"{username}/"): - print(f"[Deploy] ✅ Found imported space in history (user owns it): {imported_space}") - if not existing_repo_id: - existing_repo_id = imported_space - break - else: - if not request.history: - print(f"[Deploy] ⚠️ No history provided in request") - if not username: - print(f"[Deploy] ⚠️ No username available") - - # PRIORITY 2: Check session for previously deployed spaces (fallback) - # This helps when history isn't passed from frontend - if not existing_repo_id and session_token and session_token in user_sessions: - session = user_sessions[session_token] - - # Ensure deployed_spaces exists (for backward compatibility with old sessions) - if "deployed_spaces" not in session: - session["deployed_spaces"] = [] - - deployed_spaces = session.get("deployed_spaces", []) - - print(f"[Deploy] Checking session for existing spaces. Found {len(deployed_spaces)} deployed spaces.") - for i, space in enumerate(deployed_spaces): - print(f"[Deploy] Space {i+1}: repo_id={space.get('repo_id')}, language={space.get('language')}, timestamp={space.get('timestamp')}") - - # Find the most recent space for this language - for space in reversed(deployed_spaces): - if space.get("language") == request.language: - session_space_id = space.get("repo_id") - print(f"[Deploy] ✅ Found existing space in session for {request.language}: {session_space_id}") - existing_repo_id = session_space_id - break - - if not existing_repo_id: - print(f"[Deploy] ⚠️ No existing space found for language: {request.language}") - elif not existing_repo_id: - print(f"[Deploy] ⚠️ No session found and no history provided. session_token: {session_token[:10] if session_token else 'None'}") - - # Use the standalone deployment function - print(f"[Deploy] ========== CALLING deploy_to_huggingface_space ==========") - print(f"[Deploy] existing_repo_id: {existing_repo_id}") - print(f"[Deploy] space_name: {request.space_name}") - print(f"[Deploy] language: {request.language}") - print(f"[Deploy] username: {username}") - print(f"[Deploy] ==========================================================") - - success, message, space_url = deploy_to_huggingface_space( - code=request.code, - language=request.language, - space_name=request.space_name, - token=user_token, - username=username, - description=request.description if hasattr(request, 'description') else None, - private=False, - existing_repo_id=existing_repo_id, - commit_message=request.commit_message - ) - - if success: - # Extract repo_id from space_url - repo_id = space_url.split("/spaces/")[-1] if space_url else None - print(f"[Deploy] ✅ Success! Repo ID: {repo_id}") - print(f"[Deploy] Space URL: {space_url}") - print(f"[Deploy] Message: {message}") - - # Track deployed space in session for follow-up updates - if session_token and session_token in user_sessions: - if repo_id: - session = user_sessions[session_token] - - # Ensure deployed_spaces exists - if "deployed_spaces" not in session: - session["deployed_spaces"] = [] - - deployed_spaces = session.get("deployed_spaces", []) - - print(f"[Deploy] 📝 Tracking space in session...") - print(f"[Deploy] Current deployed_spaces count: {len(deployed_spaces)}") - - # Update or add the space - space_entry = { - "repo_id": repo_id, - "language": request.language, - "timestamp": datetime.now() - } - - # Remove old entry for same repo_id if exists - old_count = len(deployed_spaces) - deployed_spaces = [s for s in deployed_spaces if s.get("repo_id") != repo_id] - if old_count != len(deployed_spaces): - print(f"[Deploy] Removed old entry for {repo_id}") - - # Also remove old entries for same language (keep only most recent per language) - # This ensures we always update the same space for a given language - deployed_spaces = [s for s in deployed_spaces if s.get("language") != request.language] - - deployed_spaces.append(space_entry) - - session["deployed_spaces"] = deployed_spaces - print(f"[Deploy] ✅ Tracked space in session: {repo_id}") - print(f"[Deploy] New deployed_spaces count: {len(deployed_spaces)}") - print(f"[Deploy] All deployed spaces: {[s.get('repo_id') for s in deployed_spaces]}") - else: - print(f"[Deploy] ⚠️ Could not extract repo_id from space_url: {space_url}") - else: - if not session_token: - print(f"[Deploy] ⚠️ No session_token provided for tracking") - elif session_token not in user_sessions: - print(f"[Deploy] ⚠️ Session not found: {session_token[:10]}...") - print(f"[Deploy] Available sessions: {[k[:10] for k in list(user_sessions.keys())[:5]]}") - - return { - "success": True, - "space_url": space_url, - "message": message, - "repo_id": repo_id - } - else: - # Provide user-friendly error message based on the error - if "401" in message or "Unauthorized" in message: - raise HTTPException( - status_code=401, - detail="Authentication failed. Please sign in again with HuggingFace." - ) - elif "403" in message or "Forbidden" in message or "Permission" in message: - raise HTTPException( - status_code=403, - detail="Permission denied. Your HuggingFace token may not have the required permissions (manage-repos scope)." - ) - else: - raise HTTPException( - status_code=500, - detail=message - ) - - except HTTPException: - # Re-raise HTTP exceptions as-is - raise - except Exception as e: - # Log the full error for debugging - import traceback - error_details = traceback.format_exc() - print(f"[Deploy] Deployment error: {error_details}") - - raise HTTPException( - status_code=500, - detail=f"Deployment failed: {str(e)}" - ) - - -@app.post("/api/create-pr", response_model=PullRequestResponse) -async def create_pull_request( - request: PullRequestRequest, - authorization: Optional[str] = Header(None) -): - """Create a Pull Request on an existing HuggingFace Space with redesigned code""" - print(f"[PR] ========== NEW PULL REQUEST ==========") - print(f"[PR] Repo ID: {request.repo_id}") - print(f"[PR] Language: {request.language}") - print(f"[PR] PR Title: {request.pr_title}") - - auth = get_auth_from_header(authorization) - - if not auth.is_authenticated(): - raise HTTPException(status_code=401, detail="Authentication required") - - # Check if this is dev mode - if auth.token and auth.token.startswith("dev_token_"): - return PullRequestResponse( - success=False, - message="Dev mode: PR creation not available in dev mode. Please use production authentication.", - pr_url=None - ) - - # Production mode with real OAuth token - try: - from backend_deploy import create_pull_request_on_space - - user_token = auth.token if auth.token else os.getenv("HF_TOKEN") - - if not user_token: - raise HTTPException(status_code=401, detail="No HuggingFace token available. Please sign in first.") - - print(f"[PR] Creating PR with token (first 10 chars): {user_token[:10]}...") - - # Create the pull request - success, message, pr_url = create_pull_request_on_space( - repo_id=request.repo_id, - code=request.code, - language=request.language, - token=user_token, - pr_title=request.pr_title, - pr_description=request.pr_description - ) - - print(f"[PR] Result:") - print(f"[PR] - Success: {success}") - print(f"[PR] - Message: {message}") - print(f"[PR] - PR URL: {pr_url}") - - if success: - return PullRequestResponse( - success=True, - message=message, - pr_url=pr_url - ) - else: - # Provide user-friendly error messages - if "401" in message or "Unauthorized" in message: - raise HTTPException( - status_code=401, - detail="Authentication failed. Please sign in again with HuggingFace." - ) - elif "403" in message or "Forbidden" in message or "Permission" in message: - raise HTTPException( - status_code=403, - detail="Permission denied. You may not have write access to this space." - ) - else: - raise HTTPException( - status_code=500, - detail=message - ) - - except HTTPException: - raise - except Exception as e: - import traceback - error_details = traceback.format_exc() - print(f"[PR] Error: {error_details}") - - raise HTTPException( - status_code=500, - detail=f"Failed to create pull request: {str(e)}" - ) - - -@app.post("/api/duplicate-space", response_model=DuplicateSpaceResponse) -async def duplicate_space_endpoint( - request: DuplicateSpaceRequest, - authorization: Optional[str] = Header(None) -): - """Duplicate a HuggingFace Space to the user's account""" - print(f"[Duplicate] ========== DUPLICATE SPACE REQUEST ==========") - print(f"[Duplicate] From: {request.from_space_id}") - print(f"[Duplicate] To: {request.to_space_name or 'auto'}") - print(f"[Duplicate] Private: {request.private}") - - auth = get_auth_from_header(authorization) - - if not auth.is_authenticated(): - raise HTTPException(status_code=401, detail="Authentication required") - - # Check if this is dev mode - if auth.token and auth.token.startswith("dev_token_"): - return DuplicateSpaceResponse( - success=False, - message="Dev mode: Space duplication not available in dev mode. Please use production authentication.", - space_url=None, - space_id=None - ) - - # Production mode with real OAuth token - try: - from backend_deploy import duplicate_space_to_user - - user_token = auth.token if auth.token else os.getenv("HF_TOKEN") - - if not user_token: - raise HTTPException(status_code=401, detail="No HuggingFace token available. Please sign in first.") - - print(f"[Duplicate] Duplicating space with token (first 10 chars): {user_token[:10]}...") - - # Duplicate the space - success, message, space_url = duplicate_space_to_user( - from_space_id=request.from_space_id, - to_space_name=request.to_space_name, - token=user_token, - private=request.private - ) - - print(f"[Duplicate] Result:") - print(f"[Duplicate] - Success: {success}") - print(f"[Duplicate] - Message: {message}") - print(f"[Duplicate] - Space URL: {space_url}") - - if success: - # Extract space_id from URL - space_id = space_url.split("/spaces/")[-1] if space_url else None - - return DuplicateSpaceResponse( - success=True, - message=message, - space_url=space_url, - space_id=space_id - ) - else: - # Provide user-friendly error messages - if "401" in message or "Unauthorized" in message: - raise HTTPException( - status_code=401, - detail="Authentication failed. Please sign in again with HuggingFace." - ) - elif "403" in message or "Forbidden" in message or "Permission" in message: - raise HTTPException( - status_code=403, - detail="Permission denied. You may not have access to this space." - ) - elif "404" in message or "not found" in message.lower(): - raise HTTPException( - status_code=404, - detail="Space not found. Please check the URL and try again." - ) - else: - raise HTTPException( - status_code=500, - detail=message - ) - - except HTTPException: - raise - except Exception as e: - import traceback - error_details = traceback.format_exc() - print(f"[Duplicate] Error: {error_details}") - - raise HTTPException( - status_code=500, - detail=f"Failed to duplicate space: {str(e)}" - ) - - -@app.post("/api/import", response_model=ImportResponse) -async def import_project(request: ImportRequest): - """ - Import a project from HuggingFace Space, HuggingFace Model, or GitHub repo - - Supports URLs like: - - https://huggingface.co/spaces/username/space-name - - https://huggingface.co/username/model-name - - https://github.com/username/repo-name - """ - try: - importer = ProjectImporter() - result = importer.import_from_url(request.url) - - # Handle model-specific prefer_local flag - if request.prefer_local and result.get('metadata', {}).get('has_alternatives'): - # Switch to local code if available - local_code = result['metadata'].get('local_code') - if local_code: - result['code'] = local_code - result['metadata']['code_type'] = 'local' - result['message'] = result['message'].replace('inference', 'local') - - # Check if user owns this repo (for HuggingFace Spaces) - owned_by_user = False - repo_id = None - - if request.username and result['status'] == 'success': - # Extract repo_id from URL - url = result.get('url', '') - if 'huggingface.co/spaces/' in url: - # Extract username/repo from URL - match = re.search(r'huggingface\.co/spaces/([^/]+/[^/?#]+)', url) - if match: - repo_id = match.group(1) - # Check if user owns this space - if repo_id.startswith(f"{request.username}/"): - owned_by_user = True - print(f"[Import] User {request.username} owns the imported space: {repo_id}") - - # Add ownership info to response - result['owned_by_user'] = owned_by_user - result['repo_id'] = repo_id - - return ImportResponse(**result) - - except Exception as e: - return ImportResponse( - status="error", - message=f"Import failed: {str(e)}", - code="", - language="unknown", - url=request.url, - metadata={}, - owned_by_user=False, - repo_id=None - ) - - -@app.get("/api/import/space/{username}/{space_name}") -async def import_space(username: str, space_name: str): - """Import a specific HuggingFace Space by username and space name""" - try: - importer = ProjectImporter() - result = importer.import_space(username, space_name) - return result - except Exception as e: - return { - "status": "error", - "message": f"Failed to import space: {str(e)}", - "code": "", - "language": "unknown", - "url": f"https://huggingface.co/spaces/{username}/{space_name}", - "metadata": {} - } - - -@app.get("/api/import/model/{path:path}") -async def import_model(path: str, prefer_local: bool = False): - """ - Import a specific HuggingFace Model by model ID - - Example: /api/import/model/meta-llama/Llama-3.2-1B-Instruct - """ - try: - importer = ProjectImporter() - result = importer.import_model(path, prefer_local=prefer_local) - return result - except Exception as e: - return { - "status": "error", - "message": f"Failed to import model: {str(e)}", - "code": "", - "language": "python", - "url": f"https://huggingface.co/{path}", - "metadata": {} - } - - -@app.get("/api/import/github/{owner}/{repo}") -async def import_github(owner: str, repo: str): - """Import a GitHub repository by owner and repo name""" - try: - importer = ProjectImporter() - result = importer.import_github_repo(owner, repo) - return result - except Exception as e: - return { - "status": "error", - "message": f"Failed to import repository: {str(e)}", - "code": "", - "language": "python", - "url": f"https://github.com/{owner}/{repo}", - "metadata": {} - } - - -@app.websocket("/ws/generate") -async def websocket_generate(websocket: WebSocket): - """WebSocket endpoint for real-time code generation""" - await websocket.accept() - - try: - while True: - # Receive message from client - data = await websocket.receive_json() - - query = data.get("query") - language = data.get("language", "html") - model_id = data.get("model_id", "claude-opus-4.5") - - # Send acknowledgment - await websocket.send_json({ - "type": "status", - "message": "Generating code..." - }) - - # Mock code generation for now - await asyncio.sleep(0.5) - - # Send generated code in chunks - sample_code = f"\n

Hello from AnyCoder!

" - - for i, char in enumerate(sample_code): - await websocket.send_json({ - "type": "chunk", - "content": char, - "progress": (i + 1) / len(sample_code) * 100 - }) - await asyncio.sleep(0.01) - - # Send completion - await websocket.send_json({ - "type": "complete", - "code": sample_code - }) - - except WebSocketDisconnect: - print("Client disconnected") - except Exception as e: - await websocket.send_json({ - "type": "error", - "message": str(e) - }) - await websocket.close() - - -if __name__ == "__main__": - import uvicorn - uvicorn.run("backend_api:app", host="0.0.0.0", port=8000, reload=True) - diff --git a/backend_deploy.py b/backend_deploy.py deleted file mode 100644 index d87ad64820f2910bd0892f1a447e120839661ebe..0000000000000000000000000000000000000000 --- a/backend_deploy.py +++ /dev/null @@ -1,1687 +0,0 @@ -""" -Standalone deployment utilities for publishing to HuggingFace Spaces. -No Gradio dependencies - can be used in backend API. -""" -import os -import re -import json -import uuid -import tempfile -import shutil -import ast -from typing import Dict, List, Optional, Tuple -from pathlib import Path - -from huggingface_hub import HfApi -from backend_models import get_inference_client, get_real_model_id -from backend_parsers import ( - parse_transformers_js_output, - parse_html_code, - parse_python_requirements, - parse_multi_file_python_output, - parse_react_output, - strip_tool_call_markers, - remove_code_block, - extract_import_statements, - generate_requirements_txt_with_llm -) - - -def prettify_comfyui_json_for_html(json_content: str) -> str: - """Convert ComfyUI JSON to stylized HTML display with download button""" - try: - # Parse and prettify the JSON - parsed_json = json.loads(json_content) - prettified_json = json.dumps(parsed_json, indent=2, ensure_ascii=False) - - # Create Apple-style HTML wrapper - html_content = f""" - - - - - ComfyUI Workflow - - - -
-
-

ComfyUI Workflow

-

View and download your workflow JSON

-
- -
- - -
- -
-
{prettified_json}
-
-
- - - -""" - return html_content - except json.JSONDecodeError: - # If it's not valid JSON, return as-is wrapped in basic HTML - return f""" - - - - - ComfyUI Workflow - - - -

ComfyUI Workflow

-

Error: Invalid JSON format

-
{json_content}
- -""" - except Exception as e: - print(f"Error prettifying ComfyUI JSON: {e}") - return json_content - - -# Note: parse_transformers_js_output, parse_python_requirements, strip_tool_call_markers, -# remove_code_block, extract_import_statements, generate_requirements_txt_with_llm, -# and parse_multi_file_python_output are now imported from backend_parsers.py - - -def is_streamlit_code(code: str) -> bool: - """Check if code is Streamlit""" - return 'import streamlit' in code or 'streamlit.run' in code - - -def is_gradio_code(code: str) -> bool: - """Check if code is Gradio""" - return 'import gradio' in code or 'gr.' in code - - -def detect_sdk_from_code(code: str, language: str) -> str: - """Detect the appropriate SDK from code and language""" - if language == "html": - return "static" - elif language == "transformers.js": - return "static" - elif language == "comfyui": - return "static" - elif language == "react": - return "docker" - elif language == "streamlit" or is_streamlit_code(code): - return "docker" - elif language == "gradio" or is_gradio_code(code): - return "gradio" - else: - return "gradio" # Default - - -def add_anycoder_tag_to_readme(api, repo_id: str, app_port: Optional[int] = None, sdk: Optional[str] = None) -> None: - """ - Download existing README, add anycoder tag and app_port if needed, and upload back. - Preserves all existing README content and frontmatter. - - Args: - api: HuggingFace API client - repo_id: Repository ID (username/space-name) - app_port: Optional port number to set for Docker spaces (e.g., 7860) - sdk: Optional SDK type (e.g., 'gradio', 'streamlit', 'docker', 'static') - """ - try: - import tempfile - import re - - # Download the existing README - readme_path = api.hf_hub_download( - repo_id=repo_id, - filename="README.md", - repo_type="space" - ) - - # Read the existing README content - with open(readme_path, 'r', encoding='utf-8') as f: - content = f.read() - - # Parse frontmatter and content - if content.startswith('---'): - # Split frontmatter and body - parts = content.split('---', 2) - if len(parts) >= 3: - frontmatter = parts[1].strip() - body = parts[2] if len(parts) > 2 else "" - - # Check if tags already exist - if 'tags:' in frontmatter: - # Add anycoder to existing tags if not present - if '- anycoder' not in frontmatter: - frontmatter = re.sub(r'(tags:\s*\n(?:\s*-\s*[^\n]+\n)*)', r'\1- anycoder\n', frontmatter) - else: - # Add tags section with anycoder - frontmatter += '\ntags:\n- anycoder' - - # Add app_port if specified and not already present - if app_port is not None and 'app_port:' not in frontmatter: - frontmatter += f'\napp_port: {app_port}' - - # For Gradio spaces, always set sdk_version to 6.0.2 - if sdk == 'gradio': - if 'sdk_version:' in frontmatter: - # Update existing sdk_version - frontmatter = re.sub(r'sdk_version:\s*[^\n]+', 'sdk_version: 6.0.2', frontmatter) - print(f"[README] Updated sdk_version to 6.0.2 for Gradio space") - else: - # Add sdk_version - frontmatter += '\nsdk_version: 6.0.2' - print(f"[README] Added sdk_version: 6.0.2 for Gradio space") - - # Reconstruct the README - new_content = f"---\n{frontmatter}\n---{body}" - else: - # Malformed frontmatter, just add tags at the end of frontmatter - new_content = content.replace('---', '---\ntags:\n- anycoder\n---', 1) - else: - # No frontmatter, add it at the beginning - app_port_line = f'\napp_port: {app_port}' if app_port else '' - sdk_version_line = '\nsdk_version: 6.0.2' if sdk == 'gradio' else '' - new_content = f"---\ntags:\n- anycoder{app_port_line}{sdk_version_line}\n---\n\n{content}" - - # Upload the modified README - with tempfile.NamedTemporaryFile("w", suffix=".md", delete=False, encoding='utf-8') as f: - f.write(new_content) - temp_path = f.name - - api.upload_file( - path_or_fileobj=temp_path, - path_in_repo="README.md", - repo_id=repo_id, - repo_type="space" - ) - - os.unlink(temp_path) - - except Exception as e: - print(f"Warning: Could not modify README.md to add anycoder tag: {e}") - - -def create_dockerfile_for_streamlit(space_name: str) -> str: - """Create Dockerfile for Streamlit app""" - return f"""FROM python:3.11-slim - -WORKDIR /app - -COPY requirements.txt . -RUN pip install --no-cache-dir -r requirements.txt - -COPY . . - -EXPOSE 7860 - -CMD ["streamlit", "run", "app.py", "--server.port=7860", "--server.address=0.0.0.0"] -""" - - -def create_dockerfile_for_react(space_name: str) -> str: - """Create Dockerfile for React app""" - return f"""FROM node:18-slim - -# Use existing node user -USER node -ENV HOME=/home/node -ENV PATH=/home/node/.local/bin:$PATH - -WORKDIR /home/node/app - -COPY --chown=node:node package*.json ./ -RUN npm install - -COPY --chown=node:node . . -RUN npm run build - -EXPOSE 7860 - -CMD ["npm", "start", "--", "-p", "7860"] -""" - - -def extract_space_id_from_history(history: Optional[List], username: Optional[str] = None) -> Optional[str]: - """ - Extract existing space ID from chat history (for updates after followups/imports) - - Args: - history: Chat history (list of lists [[role, content], ...] or list of dicts) - username: Current username (to verify ownership of imported spaces) - - Returns: - Space ID (username/space-name) if found, None otherwise - """ - if not history: - return None - - import re - existing_space = None - - # Look through history for previous deployments or imports - for msg in history: - # Handle both list format [[role, content], ...] and dict format [{'role': ..., 'content': ...}, ...] - if isinstance(msg, list) and len(msg) >= 2: - role = msg[0] - content = msg[1] - elif isinstance(msg, dict): - role = msg.get('role', '') - content = msg.get('content', '') - else: - continue - - # Check assistant messages for deployment confirmations - if role == 'assistant': - # Look for various deployment success patterns (case-insensitive) - content_lower = content.lower() - has_deployment_indicator = ( - "deployed" in content_lower or - "updated" in content_lower or - "✅" in content # Check mark often indicates deployment success - ) - - if has_deployment_indicator: - # Look for space URL pattern - match = re.search(r'huggingface\.co/spaces/([^/\s\)]+/[^/\s\)]+)', content) - if match: - existing_space = match.group(1) - print(f"[Extract Space] Found existing space: {existing_space}") - break - - # Check user messages for imports - elif role == 'user': - if "import" in content.lower() and "space" in content.lower(): - # Extract space name from import message - match = re.search(r'huggingface\.co/spaces/([^/\s\)]+/[^/\s\)]+)', content) - if match: - imported_space = match.group(1) - # Only use imported space if user owns it (can update it) - if username and imported_space.startswith(f"{username}/"): - existing_space = imported_space - break - # If user doesn't own the imported space, we'll create a new one - # (existing_space remains None, triggering new deployment) - - return existing_space - - -def deploy_to_huggingface_space( - code: str, - language: str, - space_name: Optional[str] = None, - token: Optional[str] = None, - username: Optional[str] = None, - description: Optional[str] = None, - private: bool = False, - existing_repo_id: Optional[str] = None, - commit_message: Optional[str] = None, - history: Optional[List[Dict]] = None -) -> Tuple[bool, str, Optional[str]]: - """ - Deploy code to HuggingFace Spaces (create new or update existing) - - Args: - code: Generated code to deploy - language: Target language/framework (html, gradio, streamlit, react, transformers.js, comfyui) - space_name: Name for the space (auto-generated if None, ignored if existing_repo_id provided) - token: HuggingFace API token - username: HuggingFace username - description: Space description - private: Whether to make the space private (only for new spaces) - existing_repo_id: If provided (username/space-name), updates this space instead of creating new one - commit_message: Custom commit message (defaults to "Deploy from anycoder" or "Update from anycoder") - history: Chat history (list of dicts with 'role' and 'content') - used to detect followups/imports - - Returns: - Tuple of (success: bool, message: str, space_url: Optional[str]) - """ - if not token: - token = os.getenv("HF_TOKEN") - if not token: - return False, "No HuggingFace token provided", None - - try: - api = HfApi(token=token) - - # Get username if not provided (needed for history tracking) - if not username: - try: - user_info = api.whoami() - username = user_info.get("name") or user_info.get("preferred_username") or "user" - except Exception as e: - pass # Will handle later if needed - - # Check history for existing space if not explicitly provided - # This enables automatic updates for followup prompts and imported spaces - if not existing_repo_id and history: - existing_repo_id = extract_space_id_from_history(history, username) - if existing_repo_id: - print(f"[Deploy] Detected existing space from history: {existing_repo_id}") - - # Determine if this is an update or new deployment - is_update = existing_repo_id is not None - - print(f"[Deploy] ========== DEPLOYMENT DECISION ==========") - print(f"[Deploy] existing_repo_id provided: {existing_repo_id}") - print(f"[Deploy] history provided: {history is not None} (length: {len(history) if history else 0})") - print(f"[Deploy] username: {username}") - print(f"[Deploy] is_update: {is_update}") - print(f"[Deploy] language: {language}") - print(f"[Deploy] ============================================") - - # For React space updates (followup changes), handle SEARCH/REPLACE blocks - if is_update and language == "react": - print(f"[Deploy] React space update - checking for search/replace blocks") - - # Import search/replace utilities - from backend_search_replace import has_search_replace_blocks, parse_file_specific_changes, apply_search_replace_changes - from huggingface_hub import hf_hub_download - - # Check if code contains search/replace blocks - if has_search_replace_blocks(code): - print(f"[Deploy] Detected SEARCH/REPLACE blocks - applying targeted changes") - - # Parse file-specific changes from code - file_changes = parse_file_specific_changes(code) - - # Download existing files from the space - try: - print(f"[Deploy] Downloading existing files from space: {existing_repo_id}") - - # Get list of files in the space - space_files = api.list_repo_files(repo_id=existing_repo_id, repo_type="space") - print(f"[Deploy] Found {len(space_files)} files in space: {space_files}") - - # Download relevant files (React/Next.js files) - react_file_patterns = ['.js', '.jsx', '.ts', '.tsx', '.css', '.json', 'Dockerfile'] - existing_files = {} - - for file_path in space_files: - # Skip non-code files - if any(file_path.endswith(ext) or ext in file_path for ext in react_file_patterns): - try: - downloaded_path = hf_hub_download( - repo_id=existing_repo_id, - filename=file_path, - repo_type="space", - token=token - ) - with open(downloaded_path, 'r', encoding='utf-8') as f: - existing_files[file_path] = f.read() - print(f"[Deploy] Downloaded: {file_path} ({len(existing_files[file_path])} chars)") - except Exception as e: - print(f"[Deploy] Warning: Could not download {file_path}: {e}") - - if not existing_files: - print(f"[Deploy] Warning: No React files found in space, falling back to full deployment") - else: - # Apply search/replace changes to the appropriate files - updated_files = [] - - # Check if changes are file-specific or global - if "__all__" in file_changes: - # Global changes - try to apply to all files - changes_text = file_changes["__all__"] - print(f"[Deploy] Applying global search/replace changes") - - # Try to apply to each file - for file_path, original_content in existing_files.items(): - modified_content = apply_search_replace_changes(original_content, changes_text) - if modified_content != original_content: - print(f"[Deploy] Modified {file_path}") - success, msg = update_space_file( - repo_id=existing_repo_id, - file_path=file_path, - content=modified_content, - token=token, - commit_message=commit_message or f"Update {file_path} from anycoder" - ) - if success: - updated_files.append(file_path) - else: - print(f"[Deploy] Warning: Failed to update {file_path}: {msg}") - else: - # File-specific changes - for filename, changes_text in file_changes.items(): - # Find the file in existing files (handle both with/without directory prefix) - matching_file = None - for file_path in existing_files.keys(): - if file_path == filename or file_path.endswith('/' + filename): - matching_file = file_path - break - - if matching_file: - original_content = existing_files[matching_file] - modified_content = apply_search_replace_changes(original_content, changes_text) - - print(f"[Deploy] Applying changes to {matching_file}") - success, msg = update_space_file( - repo_id=existing_repo_id, - file_path=matching_file, - content=modified_content, - token=token, - commit_message=commit_message or f"Update {matching_file} from anycoder" - ) - - if success: - updated_files.append(matching_file) - else: - print(f"[Deploy] Warning: Failed to update {matching_file}: {msg}") - else: - print(f"[Deploy] Warning: File {filename} not found in space") - - if updated_files: - space_url = f"https://huggingface.co/spaces/{existing_repo_id}" - files_list = ", ".join(updated_files) - return True, f"✅ Updated {len(updated_files)} file(s): {files_list}! View at: {space_url}", space_url - else: - return False, "No files were updated", None - - except Exception as e: - print(f"[Deploy] Error applying search/replace changes: {e}") - import traceback - traceback.print_exc() - # Fall through to normal deployment - else: - print(f"[Deploy] No SEARCH/REPLACE blocks detected, proceeding with full file update") - # Fall through to normal React deployment below - - # For Gradio space updates (import/redesign), update .py files and upload all new files - if is_update and language == "gradio": - print(f"[Deploy] Gradio space update - updating .py files and uploading any new files") - - # Parse the code to get all files - files = parse_multi_file_python_output(code) - - # Fallback if no files parsed - if not files: - print(f"[Deploy] No file markers found, using entire code as app.py") - cleaned_code = remove_code_block(code) - files['app.py'] = cleaned_code - - if not files: - return False, "Error: No files found in generated code", None - - print(f"[Deploy] Generated {len(files)} file(s): {list(files.keys())}") - - # For redesign operations, ONLY update app.py to preserve other helper files - # Detect redesign from commit message OR from history (user prompt contains "redesign") - is_redesign = False - if commit_message and "redesign" in commit_message.lower(): - is_redesign = True - elif history: - # Check last user message for "redesign" keyword - for role, content in reversed(history): - if role == "user" and content and "redesign" in content.lower(): - is_redesign = True - break - - if is_redesign: - print(f"[Deploy] Redesign operation detected - filtering to ONLY app.py") - app_py_content = files.get('app.py') - if not app_py_content: - return False, "Error: No app.py found in redesign output", None - files = {'app.py': app_py_content} - print(f"[Deploy] Will only update app.py ({len(app_py_content)} chars)") - - # Upload all generated files (the LLM is instructed to only output .py files, - # but if it creates new assets/data files, we should upload those too) - # This approach updates .py files and adds any new files without touching - # existing non-.py files that weren't generated - updated_files = [] - for file_path, content in files.items(): - print(f"[Deploy] Uploading {file_path} ({len(content)} chars)") - success, msg = update_space_file( - repo_id=existing_repo_id, - file_path=file_path, - content=content, - token=token, - commit_message=commit_message or f"Update {file_path} from anycoder" - ) - - if success: - updated_files.append(file_path) - else: - print(f"[Deploy] Warning: Failed to update {file_path}: {msg}") - - if updated_files: - space_url = f"https://huggingface.co/spaces/{existing_repo_id}" - files_list = ", ".join(updated_files) - return True, f"✅ Updated {len(updated_files)} file(s): {files_list}! View at: {space_url}", space_url - else: - return False, "Failed to update any files", None - - if is_update: - # Use existing repo - repo_id = existing_repo_id - space_name = existing_repo_id.split('/')[-1] - if '/' in existing_repo_id: - username = existing_repo_id.split('/')[0] - elif not username: - # Get username if still not available - try: - user_info = api.whoami() - username = user_info.get("name") or user_info.get("preferred_username") or "user" - except Exception as e: - return False, f"Failed to get user info: {str(e)}", None - else: - # Get username if not provided - if not username: - try: - user_info = api.whoami() - username = user_info.get("name") or user_info.get("preferred_username") or "user" - except Exception as e: - return False, f"Failed to get user info: {str(e)}", None - - # Generate space name if not provided or empty - if not space_name or space_name.strip() == "": - space_name = f"anycoder-{uuid.uuid4().hex[:8]}" - print(f"[Deploy] Auto-generated space name: {space_name}") - - # Clean space name (no spaces, lowercase, alphanumeric + hyphens) - space_name = re.sub(r'[^a-z0-9-]', '-', space_name.lower()) - space_name = re.sub(r'-+', '-', space_name).strip('-') - - # Ensure space_name is not empty after cleaning - if not space_name: - space_name = f"anycoder-{uuid.uuid4().hex[:8]}" - print(f"[Deploy] Space name was empty after cleaning, regenerated: {space_name}") - - repo_id = f"{username}/{space_name}" - print(f"[Deploy] Using repo_id: {repo_id}") - - # Detect SDK - sdk = detect_sdk_from_code(code, language) - - # Create temporary directory for files - with tempfile.TemporaryDirectory() as temp_dir: - temp_path = Path(temp_dir) - - # Parse code based on language - app_port = None # Track if we need app_port for Docker spaces - use_individual_uploads = False # Flag for transformers.js - - if language == "transformers.js": - try: - files = parse_transformers_js_output(code) - print(f"[Deploy] Parsed transformers.js files: {list(files.keys())}") - - # Log file sizes for debugging - for fname, fcontent in files.items(): - if fcontent: - print(f"[Deploy] {fname}: {len(fcontent)} characters") - else: - print(f"[Deploy] {fname}: EMPTY") - - # Validate all three files are present in the dict - required_files = {'index.html', 'index.js', 'style.css'} - missing_from_dict = required_files - set(files.keys()) - - if missing_from_dict: - error_msg = f"Failed to parse required files: {', '.join(sorted(missing_from_dict))}. " - error_msg += f"Parsed files: {', '.join(files.keys()) if files else 'none'}. " - error_msg += "Transformers.js apps require all three files (index.html, index.js, style.css). Please regenerate using the correct format." - print(f"[Deploy] {error_msg}") - return False, error_msg, None - - # Validate files have actual content (not empty or whitespace-only) - empty_files = [name for name in required_files if not files.get(name, '').strip()] - if empty_files: - error_msg = f"Empty file content detected: {', '.join(sorted(empty_files))}. " - error_msg += "All three files must contain actual code. Please regenerate with complete content." - print(f"[Deploy] {error_msg}") - return False, error_msg, None - - # Write transformers.js files to temp directory - for filename, content in files.items(): - file_path = temp_path / filename - print(f"[Deploy] Writing {filename} ({len(content)} chars) to {file_path}") - # Use text mode - Python handles encoding automatically - file_path.write_text(content, encoding='utf-8') - # Verify the write was successful - written_size = file_path.stat().st_size - print(f"[Deploy] Verified {filename}: {written_size} bytes on disk") - - # For transformers.js, we'll upload files individually (not via upload_folder) - use_individual_uploads = True - - except Exception as e: - print(f"[Deploy] Error parsing transformers.js: {e}") - import traceback - traceback.print_exc() - return False, f"Error parsing transformers.js output: {str(e)}", None - - elif language == "html": - html_code = parse_html_code(code) - (temp_path / "index.html").write_text(html_code, encoding='utf-8') - - elif language == "comfyui": - # ComfyUI is JSON, wrap in stylized HTML viewer with download button - html_code = prettify_comfyui_json_for_html(code) - (temp_path / "index.html").write_text(html_code, encoding='utf-8') - - elif language in ["gradio", "streamlit"]: - files = parse_multi_file_python_output(code) - - # Fallback: if no files parsed (missing === markers), treat entire code as app.py - if not files: - print(f"[Deploy] No file markers found in {language} code, using entire code as app.py") - # Clean up code blocks if present - cleaned_code = remove_code_block(code) - # Determine app filename based on language - app_filename = "streamlit_app.py" if language == "streamlit" else "app.py" - files[app_filename] = cleaned_code - - # Write Python files (create subdirectories if needed) - for filename, content in files.items(): - file_path = temp_path / filename - file_path.parent.mkdir(parents=True, exist_ok=True) - file_path.write_text(content, encoding='utf-8') - - # Ensure requirements.txt exists - generate from imports if missing - if "requirements.txt" not in files: - # Get the main app file (app.py for gradio, streamlit_app.py or app.py for streamlit) - main_app = files.get('streamlit_app.py') or files.get('app.py', '') - if main_app: - print(f"[Deploy] Generating requirements.txt from imports in {language} app") - import_statements = extract_import_statements(main_app) - requirements_content = generate_requirements_txt_with_llm(import_statements) - (temp_path / "requirements.txt").write_text(requirements_content, encoding='utf-8') - print(f"[Deploy] Generated requirements.txt with {len(requirements_content.splitlines())} lines") - else: - # Fallback to minimal requirements if no app file found - if language == "gradio": - (temp_path / "requirements.txt").write_text("gradio>=4.0.0\n", encoding='utf-8') - elif language == "streamlit": - (temp_path / "requirements.txt").write_text("streamlit>=1.30.0\n", encoding='utf-8') - - # Create Dockerfile if needed - if sdk == "docker": - if language == "streamlit": - dockerfile = create_dockerfile_for_streamlit(space_name) - (temp_path / "Dockerfile").write_text(dockerfile, encoding='utf-8') - app_port = 7860 # Set app_port for Docker spaces - use_individual_uploads = True # Streamlit uses individual file uploads - - elif language == "react": - # Parse React output to get all files (uses === filename === markers) - files = parse_react_output(code) - - if not files: - return False, "Error: Could not parse React output", None - - # If Dockerfile is missing, use template - if 'Dockerfile' not in files: - dockerfile = create_dockerfile_for_react(space_name) - files['Dockerfile'] = dockerfile - - # Write all React files (create subdirectories if needed) - for filename, content in files.items(): - file_path = temp_path / filename - file_path.parent.mkdir(parents=True, exist_ok=True) - file_path.write_text(content, encoding='utf-8') - - app_port = 7860 # Set app_port for Docker spaces - use_individual_uploads = True # React uses individual file uploads - - else: - # Default: treat as Gradio app - files = parse_multi_file_python_output(code) - - # Fallback: if no files parsed (missing === markers), treat entire code as app.py - if not files: - print(f"[Deploy] No file markers found in default (gradio) code, using entire code as app.py") - # Clean up code blocks if present - cleaned_code = remove_code_block(code) - files['app.py'] = cleaned_code - - # Write files (create subdirectories if needed) - for filename, content in files.items(): - file_path = temp_path / filename - file_path.parent.mkdir(parents=True, exist_ok=True) - file_path.write_text(content, encoding='utf-8') - - # Generate requirements.txt from imports if missing - if "requirements.txt" not in files: - main_app = files.get('app.py', '') - if main_app: - print(f"[Deploy] Generating requirements.txt from imports in default app") - import_statements = extract_import_statements(main_app) - requirements_content = generate_requirements_txt_with_llm(import_statements) - (temp_path / "requirements.txt").write_text(requirements_content, encoding='utf-8') - print(f"[Deploy] Generated requirements.txt with {len(requirements_content.splitlines())} lines") - else: - # Fallback to minimal requirements if no app file found - (temp_path / "requirements.txt").write_text("gradio>=4.0.0\n", encoding='utf-8') - - # Don't create README - HuggingFace will auto-generate it - # We'll add the anycoder tag after deployment - - # ONLY create repo for NEW deployments of non-Docker, non-transformers.js spaces - # Docker and transformers.js handle repo creation separately below - # This matches the Gradio version logic (line 1256 in ui.py) - if not is_update and sdk != "docker" and language not in ["transformers.js"]: - print(f"[Deploy] Creating NEW {sdk} space: {repo_id}") - try: - api.create_repo( - repo_id=repo_id, - repo_type="space", - space_sdk=sdk, - private=private, - exist_ok=True - ) - except Exception as e: - return False, f"Failed to create space: {str(e)}", None - elif is_update: - print(f"[Deploy] UPDATING existing space: {repo_id} (skipping create_repo)") - - # Handle transformers.js spaces (create repo via duplicate_space) - if language == "transformers.js": - if not is_update: - print(f"[Deploy] Creating NEW transformers.js space via template duplication") - print(f"[Deploy] space_name value: '{space_name}' (type: {type(space_name)})") - - # Safety check for space_name - if not space_name: - return False, "Internal error: space_name is None after generation", None - - try: - from huggingface_hub import duplicate_space - - # duplicate_space expects just the space name (not full repo_id) - # Use strip() to clean the space name - clean_space_name = space_name.strip() - print(f"[Deploy] Attempting to duplicate template space to: {clean_space_name}") - - duplicated_repo = duplicate_space( - from_id="static-templates/transformers.js", - to_id=clean_space_name, - token=token, - exist_ok=True - ) - print(f"[Deploy] Template duplication result: {duplicated_repo} (type: {type(duplicated_repo)})") - except Exception as e: - print(f"[Deploy] Exception during duplicate_space: {type(e).__name__}: {str(e)}") - - # Check if space actually exists (success despite error) - space_exists = False - try: - if api.space_info(repo_id): - space_exists = True - except: - pass - - # Handle RepoUrl object "errors" - error_msg = str(e) - if ("'url'" in error_msg or "RepoUrl" in error_msg) and space_exists: - print(f"[Deploy] Space exists despite RepoUrl error, continuing with deployment") - else: - # Fallback to regular create_repo - print(f"[Deploy] Template duplication failed, attempting fallback to create_repo: {e}") - try: - api.create_repo( - repo_id=repo_id, - repo_type="space", - space_sdk="static", - private=private, - exist_ok=True - ) - print(f"[Deploy] Fallback create_repo successful") - except Exception as e2: - return False, f"Failed to create transformers.js space (both duplication and fallback failed): {str(e2)}", None - else: - # For updates, verify we can access the existing space - try: - space_info = api.space_info(repo_id) - if not space_info: - return False, f"Could not access space {repo_id} for update", None - except Exception as e: - return False, f"Cannot update space {repo_id}: {str(e)}", None - - # Handle Docker spaces (React/Streamlit) - create repo separately - elif sdk == "docker" and language in ["streamlit", "react"]: - if not is_update: - print(f"[Deploy] Creating NEW Docker space for {language}: {repo_id}") - try: - from huggingface_hub import create_repo as hf_create_repo - hf_create_repo( - repo_id=repo_id, - repo_type="space", - space_sdk="docker", - token=token, - exist_ok=True - ) - except Exception as e: - return False, f"Failed to create Docker space: {str(e)}", None - - # Upload files - if not commit_message: - commit_message = "Update from anycoder" if is_update else "Deploy from anycoder" - - try: - if language == "transformers.js": - # Special handling for transformers.js - create NEW temp files for each upload - # This matches the working pattern in ui.py - import time - - # Get the parsed files from earlier - files_to_upload = [ - ("index.html", files.get('index.html')), - ("index.js", files.get('index.js')), - ("style.css", files.get('style.css')) - ] - - max_attempts = 3 - for file_name, file_content in files_to_upload: - if not file_content: - return False, f"Missing content for {file_name}", None - - success = False - last_error = None - - for attempt in range(max_attempts): - temp_file_path = None - try: - # Create a NEW temp file for this upload (matches Gradio version approach) - print(f"[Deploy] Creating temp file for {file_name} with {len(file_content)} chars") - # Use text mode "w" - lets Python handle encoding automatically (better emoji support) - with tempfile.NamedTemporaryFile("w", suffix=f".{file_name.split('.')[-1]}", delete=False) as f: - f.write(file_content) - temp_file_path = f.name - # File is now closed and flushed, safe to upload - - # Upload the file without commit_message (HF handles this for spaces) - api.upload_file( - path_or_fileobj=temp_file_path, - path_in_repo=file_name, - repo_id=repo_id, - repo_type="space" - ) - success = True - print(f"[Deploy] Successfully uploaded {file_name}") - break - - except Exception as e: - last_error = e - error_str = str(e) - print(f"[Deploy] Upload error for {file_name}: {error_str}") - if "403" in error_str or "Forbidden" in error_str: - return False, f"Permission denied uploading {file_name}. Check your token has write access to {repo_id}.", None - - if attempt < max_attempts - 1: - time.sleep(2) # Wait before retry - print(f"[Deploy] Retry {attempt + 1}/{max_attempts} for {file_name}") - finally: - # Clean up temp file - if temp_file_path and os.path.exists(temp_file_path): - os.unlink(temp_file_path) - - if not success: - return False, f"Failed to upload {file_name} after {max_attempts} attempts: {last_error}", None - - elif use_individual_uploads: - # For React, Streamlit: upload each file individually - import time - - # Get list of files to upload from temp directory - files_to_upload = [] - for file_path in temp_path.rglob('*'): - if file_path.is_file(): - # Get relative path from temp directory (use forward slashes for repo paths) - rel_path = file_path.relative_to(temp_path) - files_to_upload.append(str(rel_path).replace('\\', '/')) - - if not files_to_upload: - return False, "No files to upload", None - - print(f"[Deploy] Uploading {len(files_to_upload)} files individually: {files_to_upload}") - - max_attempts = 3 - for filename in files_to_upload: - # Convert back to Path for filesystem operations - file_path = temp_path / filename.replace('/', os.sep) - if not file_path.exists(): - return False, f"Failed to upload: {filename} not found", None - - # Upload with retry logic - success = False - last_error = None - - for attempt in range(max_attempts): - try: - # Upload without commit_message - HF API handles this for spaces - api.upload_file( - path_or_fileobj=str(file_path), - path_in_repo=filename, - repo_id=repo_id, - repo_type="space" - ) - success = True - print(f"[Deploy] Successfully uploaded {filename}") - break - except Exception as e: - last_error = e - error_str = str(e) - print(f"[Deploy] Upload error for {filename}: {error_str}") - if "403" in error_str or "Forbidden" in error_str: - return False, f"Permission denied uploading {filename}. Check your token has write access to {repo_id}.", None - if attempt < max_attempts - 1: - time.sleep(2) # Wait before retry - print(f"[Deploy] Retry {attempt + 1}/{max_attempts} for {filename}") - - if not success: - return False, f"Failed to upload {filename} after {max_attempts} attempts: {last_error}", None - else: - # For other languages, use upload_folder - print(f"[Deploy] Uploading folder to {repo_id}") - api.upload_folder( - folder_path=str(temp_path), - repo_id=repo_id, - repo_type="space" - ) - except Exception as e: - return False, f"Failed to upload files: {str(e)}", None - - # After successful upload, modify the auto-generated README to add anycoder tag - # For new spaces: HF auto-generates README, wait and modify it - # For updates: README should already exist, just add tag if missing - try: - import time - if not is_update: - time.sleep(2) # Give HF time to generate README for new spaces - add_anycoder_tag_to_readme(api, repo_id, app_port, sdk) - except Exception as e: - # Don't fail deployment if README modification fails - print(f"Warning: Could not add anycoder tag to README: {e}") - - # For transformers.js updates, trigger a space restart to ensure changes take effect - if is_update and language == "transformers.js": - try: - api.restart_space(repo_id=repo_id) - print(f"[Deploy] Restarted space after update: {repo_id}") - except Exception as restart_error: - # Don't fail the deployment if restart fails, just log it - print(f"Note: Could not restart space after update: {restart_error}") - - space_url = f"https://huggingface.co/spaces/{repo_id}" - action = "Updated" if is_update else "Deployed" - - # Include the space URL in the message for history tracking - # This allows future deployments to detect this as the existing space - success_msg = f"✅ {action}! View your space at: {space_url}" - - return True, success_msg, space_url - - except Exception as e: - print(f"[Deploy] Top-level exception caught: {type(e).__name__}: {str(e)}") - import traceback - traceback.print_exc() - return False, f"Deployment error: {str(e)}", None - - -def update_space_file( - repo_id: str, - file_path: str, - content: str, - token: Optional[str] = None, - commit_message: Optional[str] = None -) -> Tuple[bool, str]: - """ - Update a single file in an existing HuggingFace Space - - Args: - repo_id: Full repo ID (username/space-name) - file_path: Path of file to update (e.g., "app.py") - content: New file content - token: HuggingFace API token - commit_message: Commit message (default: "Update {file_path}") - - Returns: - Tuple of (success: bool, message: str) - """ - if not token: - token = os.getenv("HF_TOKEN") - if not token: - return False, "No HuggingFace token provided" - - try: - api = HfApi(token=token) - - if not commit_message: - commit_message = f"Update {file_path}" - - # Create temporary file - with tempfile.NamedTemporaryFile(mode='w', suffix=f'.{file_path.split(".")[-1]}', delete=False) as f: - f.write(content) - temp_path = f.name - - try: - api.upload_file( - path_or_fileobj=temp_path, - path_in_repo=file_path, - repo_id=repo_id, - repo_type="space", - commit_message=commit_message - ) - return True, f"✅ Successfully updated {file_path}" - finally: - os.unlink(temp_path) - - except Exception as e: - return False, f"Failed to update file: {str(e)}" - - -def delete_space( - repo_id: str, - token: Optional[str] = None -) -> Tuple[bool, str]: - """ - Delete a HuggingFace Space - - Args: - repo_id: Full repo ID (username/space-name) - token: HuggingFace API token - - Returns: - Tuple of (success: bool, message: str) - """ - if not token: - token = os.getenv("HF_TOKEN") - if not token: - return False, "No HuggingFace token provided" - - try: - api = HfApi(token=token) - api.delete_repo(repo_id=repo_id, repo_type="space") - return True, f"✅ Successfully deleted {repo_id}" - except Exception as e: - return False, f"Failed to delete space: {str(e)}" - - -def list_user_spaces( - username: Optional[str] = None, - token: Optional[str] = None -) -> Tuple[bool, str, Optional[List[Dict]]]: - """ - List all spaces for a user - - Args: - username: HuggingFace username (gets from token if None) - token: HuggingFace API token - - Returns: - Tuple of (success: bool, message: str, spaces: Optional[List[Dict]]) - """ - if not token: - token = os.getenv("HF_TOKEN") - if not token: - return False, "No HuggingFace token provided", None - - try: - api = HfApi(token=token) - - # Get username if not provided - if not username: - user_info = api.whoami() - username = user_info.get("name") or user_info.get("preferred_username") - - # List spaces - spaces = api.list_spaces(author=username) - - space_list = [] - for space in spaces: - space_list.append({ - "id": space.id, - "author": space.author, - "name": getattr(space, 'name', space.id.split('/')[-1]), - "sdk": getattr(space, 'sdk', 'unknown'), - "private": getattr(space, 'private', False), - "url": f"https://huggingface.co/spaces/{space.id}" - }) - - return True, f"Found {len(space_list)} spaces", space_list - - except Exception as e: - return False, f"Failed to list spaces: {str(e)}", None - - -def duplicate_space_to_user( - from_space_id: str, - to_space_name: Optional[str] = None, - token: Optional[str] = None, - private: bool = False -) -> Tuple[bool, str, Optional[str]]: - """ - Duplicate a HuggingFace Space to the user's account - - Args: - from_space_id: Source space ID (username/space-name) - to_space_name: Destination space name (just the name, not full ID) - token: HuggingFace API token - private: Whether the duplicated space should be private - - Returns: - Tuple of (success: bool, message: str, space_url: Optional[str]) - """ - if not token: - token = os.getenv("HF_TOKEN") - if not token: - return False, "No HuggingFace token provided", None - - try: - from huggingface_hub import duplicate_space - - # Get username from token - api = HfApi(token=token) - user_info = api.whoami() - username = user_info.get("name") or user_info.get("preferred_username") or "user" - - # Get original space info to detect hardware and SDK - print(f"[Duplicate] Fetching info for {from_space_id}") - original_hardware = None - original_storage = None - original_sdk = None - try: - original_space_info = api.space_info(from_space_id) - # Get SDK type - original_sdk = getattr(original_space_info, 'sdk', None) - # Get runtime info - runtime = getattr(original_space_info, 'runtime', None) - if runtime: - original_hardware = getattr(runtime, 'hardware', None) - original_storage = getattr(runtime, 'storage', None) - print(f"[Duplicate] Original space SDK: {original_sdk}, hardware: {original_hardware}, storage: {original_storage}") - except Exception as e: - print(f"[Duplicate] Could not fetch space info: {e}") - - # If no destination name provided, use original name - if not to_space_name: - # Extract original space name - original_name = from_space_id.split('/')[-1] - to_space_name = original_name - - # Clean space name - to_space_name = re.sub(r'[^a-z0-9-]', '-', to_space_name.lower()) - to_space_name = re.sub(r'-+', '-', to_space_name).strip('-') - - # Construct full destination ID - to_space_id = f"{username}/{to_space_name}" - - print(f"[Duplicate] Duplicating {from_space_id} to {to_space_id}") - - # Prepare duplicate_space parameters - duplicate_params = { - "from_id": from_space_id, - "to_id": to_space_name, # Just the name, not full ID - "token": token, - "exist_ok": True - } - - # Hardware is REQUIRED by HF API for all space types when duplicating - # Use detected hardware or default to cpu-basic - hardware_to_use = original_hardware if original_hardware else "cpu-basic" - duplicate_params["hardware"] = hardware_to_use - print(f"[Duplicate] Hardware: {hardware_to_use} (SDK: {original_sdk}, original: {original_hardware})") - - # Storage is optional - if original_storage and original_storage.get('requested'): - duplicate_params["storage"] = original_storage.get('requested') - print(f"[Duplicate] Storage: {original_storage.get('requested')}") - - # Only set private if explicitly requested - if private: - duplicate_params["private"] = private - - # Duplicate the space - print(f"[Duplicate] Duplicating {from_space_id} to {username}/{to_space_name}") - print(f"[Duplicate] Parameters: {list(duplicate_params.keys())}") - - try: - duplicated_repo = duplicate_space(**duplicate_params) - except Exception as dup_error: - # Check if it's a zero-gpu hardware error - error_str = str(dup_error).lower() - if 'zero' in error_str or 'hardware' in error_str: - print(f"[Duplicate] Hardware error detected (likely zero-gpu issue): {dup_error}") - print(f"[Duplicate] Retrying with cpu-basic hardware...") - - # Retry with cpu-basic hardware - duplicate_params["hardware"] = "cpu-basic" - try: - duplicated_repo = duplicate_space(**duplicate_params) - print(f"[Duplicate] ✅ Successfully duplicated with cpu-basic hardware") - except Exception as retry_error: - print(f"[Duplicate] Retry with cpu-basic also failed: {retry_error}") - raise retry_error - else: - # Not a hardware error, re-raise - raise dup_error - - # Extract space URL - space_url = f"https://huggingface.co/spaces/{to_space_id}" - - success_msg = f"✅ Space duplicated! View at: {space_url}" - print(f"[Duplicate] {success_msg}") - - return True, success_msg, space_url - - except Exception as e: - print(f"[Duplicate] Error: {type(e).__name__}: {str(e)}") - import traceback - traceback.print_exc() - return False, f"Failed to duplicate space: {str(e)}", None - - -def create_pull_request_on_space( - repo_id: str, - code: str, - language: str, - token: Optional[str] = None, - pr_title: Optional[str] = None, - pr_description: Optional[str] = None -) -> Tuple[bool, str, Optional[str]]: - """ - Create a Pull Request on an existing HuggingFace Space with redesigned code - - Args: - repo_id: Full repo ID (username/space-name) - code: New code to propose - language: Language/framework type - token: HuggingFace API token - pr_title: Title for the PR (default: "Redesign from AnyCoder") - pr_description: Description for the PR - - Returns: - Tuple of (success: bool, message: str, pr_url: Optional[str]) - """ - if not token: - token = os.getenv("HF_TOKEN") - if not token: - return False, "No HuggingFace token provided", None - - try: - api = HfApi(token=token) - - # Check if we can access the space first - try: - space_info = api.space_info(repo_id=repo_id, token=token) - print(f"[PR] Space info: private={space_info.private if hasattr(space_info, 'private') else 'unknown'}") - - # Check if space is private - if hasattr(space_info, 'private') and space_info.private: - return False, "❌ Cannot create PR on private space. The space must be public to accept PRs from others.", None - except Exception as info_error: - print(f"[PR] Could not fetch space info: {info_error}") - # Continue anyway - maybe we can still create the PR - - # Default PR title and description - if not pr_title: - pr_title = "🎨 Redesign from AnyCoder" - - if not pr_description: - pr_description = """This Pull Request contains a redesigned version of the app with: - -- ✨ Modern, mobile-friendly design -- 🎯 Minimal, clean components -- 📱 Responsive layout -- 🚀 Improved user experience - -Generated by [AnyCoder](https://huggingface.co/spaces/akhaliq/anycoder)""" - - # Create temporary directory for files - with tempfile.TemporaryDirectory() as temp_dir: - temp_path = Path(temp_dir) - - # Parse code based on language - if language == "transformers.js": - try: - files = parse_transformers_js_output(code) - print(f"[PR] Parsed transformers.js files: {list(files.keys())}") - - # Write transformers.js files - for filename, content in files.items(): - file_path = temp_path / filename - file_path.write_text(content, encoding='utf-8') - - except Exception as e: - print(f"[PR] Error parsing transformers.js: {e}") - return False, f"Error parsing transformers.js output: {str(e)}", None - - elif language == "html": - html_code = parse_html_code(code) - (temp_path / "index.html").write_text(html_code, encoding='utf-8') - - elif language == "comfyui": - html_code = prettify_comfyui_json_for_html(code) - (temp_path / "index.html").write_text(html_code, encoding='utf-8') - - elif language in ["gradio", "streamlit", "react"]: - files = parse_multi_file_python_output(code) - - # Fallback if no files parsed - if not files: - print(f"[PR] No file markers found, using entire code as main file") - cleaned_code = remove_code_block(code) - if language == "streamlit": - files["streamlit_app.py"] = cleaned_code - elif language == "react": - files["app.tsx"] = cleaned_code - else: - files["app.py"] = cleaned_code - - # For Gradio PRs, only include .py files (preserve existing requirements.txt, etc.) - # For redesigns, ONLY include app.py to avoid modifying helper files - if language == "gradio": - print(f"[PR] Gradio app - filtering to only .py files") - py_files = {fname: content for fname, content in files.items() if fname.endswith('.py')} - if not py_files: - print(f"[PR] Warning: No .py files found in parsed output") - return False, "No Python files found in generated code for Gradio PR", None - - # Check if this is a redesign (pr_title contains "Redesign") - is_redesign = "redesign" in pr_title.lower() if pr_title else False - - if is_redesign: - print(f"[PR] Redesign PR detected - filtering to ONLY app.py") - if 'app.py' not in py_files: - print(f"[PR] Warning: No app.py found in redesign output") - return False, "No app.py found in redesign output for Gradio PR", None - files = {'app.py': py_files['app.py']} - print(f"[PR] Will only update app.py ({len(py_files['app.py'])} chars)") - else: - files = py_files - print(f"[PR] Will update {len(files)} Python file(s): {list(files.keys())}") - - # Write files (create subdirectories if needed) - for filename, content in files.items(): - file_path = temp_path / filename - file_path.parent.mkdir(parents=True, exist_ok=True) - file_path.write_text(content, encoding='utf-8') - - # Skip requirements.txt generation for Gradio PRs (preserve existing) - # For Streamlit, generate requirements.txt if missing - if language == "streamlit" and "requirements.txt" not in files: - main_app = files.get('streamlit_app.py') or files.get('app.py', '') - if main_app: - print(f"[PR] Generating requirements.txt from imports") - import_statements = extract_import_statements(main_app) - requirements_content = generate_requirements_txt_with_llm(import_statements) - (temp_path / "requirements.txt").write_text(requirements_content, encoding='utf-8') - - else: - # Default: treat as code file - files = parse_multi_file_python_output(code) - if not files: - cleaned_code = remove_code_block(code) - files['app.py'] = cleaned_code - - for filename, content in files.items(): - file_path = temp_path / filename - file_path.parent.mkdir(parents=True, exist_ok=True) - file_path.write_text(content, encoding='utf-8') - - # Create PR with files using create_commit (recommended approach) - # This creates the PR and uploads files in one API call - try: - print(f"[PR] Creating pull request with files on {repo_id}") - - # Prepare operations for all files - from huggingface_hub import CommitOperationAdd - operations = [] - - for file_path in temp_path.rglob('*'): - if file_path.is_file(): - rel_path = file_path.relative_to(temp_path) - operations.append( - CommitOperationAdd( - path_in_repo=str(rel_path), - path_or_fileobj=str(file_path) - ) - ) - - print(f"[PR] Prepared {len(operations)} file operations") - print(f"[PR] Token being used (first 20 chars): {token[:20] if token else 'None'}...") - - # Create commit with PR (pass token explicitly) - commit_info = api.create_commit( - repo_id=repo_id, - repo_type="space", - operations=operations, - commit_message=pr_title, - commit_description=pr_description, - create_pr=True, # This creates a PR with the changes - token=token, # Explicitly pass token - ) - - # Extract PR URL - pr_url = commit_info.pr_url if hasattr(commit_info, 'pr_url') else None - pr_num = commit_info.pr_num if hasattr(commit_info, 'pr_num') else None - - if not pr_url and pr_num: - pr_url = f"https://huggingface.co/spaces/{repo_id}/discussions/{pr_num}" - elif not pr_url: - pr_url = f"https://huggingface.co/spaces/{repo_id}/discussions" - - print(f"[PR] Created PR: {pr_url}") - success_msg = f"✅ Pull Request created! View at: {pr_url}" - - return True, success_msg, pr_url - - except Exception as e: - error_msg = str(e) - print(f"[PR] Error creating pull request: {error_msg}") - import traceback - traceback.print_exc() - - # Provide helpful error message based on the error type - if "403" in error_msg or "Forbidden" in error_msg or "Authorization" in error_msg: - user_msg = ( - "❌ Cannot create Pull Request: Permission denied.\n\n" - "**Possible reasons:**\n" - "- The space owner hasn't enabled Pull Requests\n" - "- You don't have write access to this space\n" - "- Spaces have stricter PR permissions than models/datasets\n\n" - "**What you can do:**\n" - "✅ Use the 'Redesign' button WITHOUT checking 'Create PR' - this will:\n" - " 1. Duplicate the space to your account\n" - " 2. Apply the redesign to your copy\n" - " 3. You'll own the new space!\n\n" - "Or contact the space owner to enable Pull Requests." - ) - else: - user_msg = f"Failed to create pull request: {error_msg}" - - return False, user_msg, None - - except Exception as e: - print(f"[PR] Top-level exception: {type(e).__name__}: {str(e)}") - import traceback - traceback.print_exc() - return False, f"Pull request error: {str(e)}", None - diff --git a/backend_docs_manager.py b/backend_docs_manager.py deleted file mode 100644 index ca1aa35693cadbd0ef8790885fed2149b9565e0b..0000000000000000000000000000000000000000 --- a/backend_docs_manager.py +++ /dev/null @@ -1,708 +0,0 @@ -""" -Documentation management for backend system prompts. -Handles fetching, caching, and updating documentation from llms.txt files. -No dependencies on Gradio or other heavy libraries - pure Python only. -""" -import os -import re -from datetime import datetime -from typing import Optional - -try: - import requests - HAS_REQUESTS = True -except ImportError: - HAS_REQUESTS = False - print("Warning: requests library not available, using minimal fallback") - -# Configuration -GRADIO_LLMS_TXT_URL = "https://www.gradio.app/llms.txt" -GRADIO_DOCS_CACHE_FILE = ".backend_gradio_docs_cache.txt" -GRADIO_DOCS_LAST_UPDATE_FILE = ".backend_gradio_docs_last_update.txt" - -TRANSFORMERSJS_DOCS_URL = "https://huggingface.co/docs/transformers.js/llms.txt" -TRANSFORMERSJS_DOCS_CACHE_FILE = ".backend_transformersjs_docs_cache.txt" -TRANSFORMERSJS_DOCS_LAST_UPDATE_FILE = ".backend_transformersjs_docs_last_update.txt" - -COMFYUI_LLMS_TXT_URL = "https://docs.comfy.org/llms.txt" -COMFYUI_DOCS_CACHE_FILE = ".backend_comfyui_docs_cache.txt" -COMFYUI_DOCS_LAST_UPDATE_FILE = ".backend_comfyui_docs_last_update.txt" - -# Global variable to store the current Gradio documentation -_gradio_docs_content: Optional[str] = None -_gradio_docs_last_fetched: Optional[datetime] = None - -# Global variable to store the current transformers.js documentation -_transformersjs_docs_content: Optional[str] = None -_transformersjs_docs_last_fetched: Optional[datetime] = None - -# Global variable to store the current ComfyUI documentation -_comfyui_docs_content: Optional[str] = None -_comfyui_docs_last_fetched: Optional[datetime] = None - -def fetch_gradio_docs() -> Optional[str]: - """Fetch the latest Gradio documentation from llms.txt""" - if not HAS_REQUESTS: - return None - - try: - response = requests.get(GRADIO_LLMS_TXT_URL, timeout=10) - response.raise_for_status() - return response.text - except Exception as e: - print(f"Warning: Failed to fetch Gradio docs from {GRADIO_LLMS_TXT_URL}: {e}") - return None - -def fetch_transformersjs_docs() -> Optional[str]: - """Fetch the latest transformers.js documentation from llms.txt""" - if not HAS_REQUESTS: - return None - - try: - response = requests.get(TRANSFORMERSJS_DOCS_URL, timeout=10) - response.raise_for_status() - return response.text - except Exception as e: - print(f"Warning: Failed to fetch transformers.js docs from {TRANSFORMERSJS_DOCS_URL}: {e}") - return None - -def fetch_comfyui_docs() -> Optional[str]: - """Fetch the latest ComfyUI documentation from llms.txt""" - if not HAS_REQUESTS: - return None - - try: - response = requests.get(COMFYUI_LLMS_TXT_URL, timeout=10) - response.raise_for_status() - return response.text - except Exception as e: - print(f"Warning: Failed to fetch ComfyUI docs from {COMFYUI_LLMS_TXT_URL}: {e}") - return None - -def filter_problematic_instructions(content: str) -> str: - """Filter out problematic instructions that cause LLM to stop generation prematurely""" - if not content: - return content - - # List of problematic phrases that cause early termination when LLM encounters ``` in user code - problematic_patterns = [ - r"Output ONLY the code inside a ``` code block, and do not include any explanations or extra text", - r"output only the code inside a ```.*?``` code block", - r"Always output only the.*?code.*?inside.*?```.*?```.*?block", - r"Return ONLY the code inside a.*?```.*?``` code block", - r"Do NOT add the language name at the top of the code output", - r"do not include any explanations or extra text", - r"Always output only the.*?code blocks.*?shown above, and do not include any explanations", - r"Output.*?ONLY.*?code.*?inside.*?```.*?```", - r"Return.*?ONLY.*?code.*?inside.*?```.*?```", - r"Generate.*?ONLY.*?code.*?inside.*?```.*?```", - r"Provide.*?ONLY.*?code.*?inside.*?```.*?```", - ] - - # Remove problematic patterns - filtered_content = content - for pattern in problematic_patterns: - # Use case-insensitive matching - filtered_content = re.sub(pattern, "", filtered_content, flags=re.IGNORECASE | re.DOTALL) - - # Clean up any double newlines or extra whitespace left by removals - filtered_content = re.sub(r'\n\s*\n\s*\n', '\n\n', filtered_content) - filtered_content = re.sub(r'^\s+', '', filtered_content, flags=re.MULTILINE) - - return filtered_content - -def load_cached_gradio_docs() -> Optional[str]: - """Load cached Gradio documentation from file""" - try: - if os.path.exists(GRADIO_DOCS_CACHE_FILE): - with open(GRADIO_DOCS_CACHE_FILE, 'r', encoding='utf-8') as f: - return f.read() - except Exception as e: - print(f"Warning: Failed to load cached Gradio docs: {e}") - return None - -def save_gradio_docs_cache(content: str): - """Save Gradio documentation to cache file""" - try: - with open(GRADIO_DOCS_CACHE_FILE, 'w', encoding='utf-8') as f: - f.write(content) - with open(GRADIO_DOCS_LAST_UPDATE_FILE, 'w', encoding='utf-8') as f: - f.write(datetime.now().isoformat()) - except Exception as e: - print(f"Warning: Failed to save Gradio docs cache: {e}") - -def should_update_gradio_docs() -> bool: - """Check if Gradio documentation should be updated""" - # Only update if we don't have cached content (first run or cache deleted) - return not os.path.exists(GRADIO_DOCS_CACHE_FILE) - -def load_cached_transformersjs_docs() -> Optional[str]: - """Load cached transformers.js documentation from file""" - try: - if os.path.exists(TRANSFORMERSJS_DOCS_CACHE_FILE): - with open(TRANSFORMERSJS_DOCS_CACHE_FILE, 'r', encoding='utf-8') as f: - return f.read() - except Exception as e: - print(f"Warning: Failed to load cached transformers.js docs: {e}") - return None - -def save_transformersjs_docs_cache(content: str): - """Save transformers.js documentation to cache file""" - try: - with open(TRANSFORMERSJS_DOCS_CACHE_FILE, 'w', encoding='utf-8') as f: - f.write(content) - with open(TRANSFORMERSJS_DOCS_LAST_UPDATE_FILE, 'w', encoding='utf-8') as f: - f.write(datetime.now().isoformat()) - except Exception as e: - print(f"Warning: Failed to save transformers.js docs cache: {e}") - -def should_update_transformersjs_docs() -> bool: - """Check if transformers.js documentation should be updated""" - # Only update if we don't have cached content (first run or cache deleted) - return not os.path.exists(TRANSFORMERSJS_DOCS_CACHE_FILE) - -def load_cached_comfyui_docs() -> Optional[str]: - """Load cached ComfyUI documentation from file""" - try: - if os.path.exists(COMFYUI_DOCS_CACHE_FILE): - with open(COMFYUI_DOCS_CACHE_FILE, 'r', encoding='utf-8') as f: - return f.read() - except Exception as e: - print(f"Warning: Failed to load cached ComfyUI docs: {e}") - return None - -def save_comfyui_docs_cache(content: str): - """Save ComfyUI documentation to cache file""" - try: - with open(COMFYUI_DOCS_CACHE_FILE, 'w', encoding='utf-8') as f: - f.write(content) - with open(COMFYUI_DOCS_LAST_UPDATE_FILE, 'w', encoding='utf-8') as f: - f.write(datetime.now().isoformat()) - except Exception as e: - print(f"Warning: Failed to save ComfyUI docs cache: {e}") - -def should_update_comfyui_docs() -> bool: - """Check if ComfyUI documentation should be updated""" - # Only update if we don't have cached content (first run or cache deleted) - return not os.path.exists(COMFYUI_DOCS_CACHE_FILE) - -def get_gradio_docs_content() -> str: - """Get the current Gradio documentation content, updating if necessary""" - global _gradio_docs_content, _gradio_docs_last_fetched - - # Check if we need to update - if (_gradio_docs_content is None or - _gradio_docs_last_fetched is None or - should_update_gradio_docs()): - - print("📚 Loading Gradio 6 documentation...") - - # Try to fetch latest content - latest_content = fetch_gradio_docs() - - if latest_content: - # Filter out problematic instructions that cause early termination - filtered_content = filter_problematic_instructions(latest_content) - _gradio_docs_content = filtered_content - _gradio_docs_last_fetched = datetime.now() - save_gradio_docs_cache(filtered_content) - print(f"✅ Gradio 6 documentation loaded successfully ({len(filtered_content)} chars)") - else: - # Fallback to cached content - cached_content = load_cached_gradio_docs() - if cached_content: - _gradio_docs_content = cached_content - _gradio_docs_last_fetched = datetime.now() - print(f"⚠️ Using cached Gradio documentation (network fetch failed) ({len(cached_content)} chars)") - else: - # Fallback to minimal content - _gradio_docs_content = """ -# Gradio API Reference (Offline Fallback) - -This is a minimal fallback when documentation cannot be fetched. -Please check your internet connection for the latest API reference. - -Basic Gradio components: Button, Textbox, Slider, Image, Audio, Video, File, etc. -Use gr.Blocks() for custom layouts and gr.Interface() for simple apps. - -For the latest documentation, visit: https://www.gradio.app/llms.txt -""" - print("❌ Using minimal fallback documentation") - - return _gradio_docs_content or "" - -def get_transformersjs_docs_content() -> str: - """Get the current transformers.js documentation content, updating if necessary""" - global _transformersjs_docs_content, _transformersjs_docs_last_fetched - - # Check if we need to update - if (_transformersjs_docs_content is None or - _transformersjs_docs_last_fetched is None or - should_update_transformersjs_docs()): - - print("📚 Loading transformers.js documentation...") - - # Try to fetch latest content - latest_content = fetch_transformersjs_docs() - - if latest_content: - # Filter out problematic instructions that cause early termination - filtered_content = filter_problematic_instructions(latest_content) - _transformersjs_docs_content = filtered_content - _transformersjs_docs_last_fetched = datetime.now() - save_transformersjs_docs_cache(filtered_content) - print(f"✅ transformers.js documentation loaded successfully ({len(filtered_content)} chars)") - else: - # Fallback to cached content - cached_content = load_cached_transformersjs_docs() - if cached_content: - _transformersjs_docs_content = cached_content - _transformersjs_docs_last_fetched = datetime.now() - print(f"⚠️ Using cached transformers.js documentation (network fetch failed) ({len(cached_content)} chars)") - else: - # Fallback to minimal content - _transformersjs_docs_content = """ -# Transformers.js API Reference (Offline Fallback) - -This is a minimal fallback when documentation cannot be fetched. -Please check your internet connection for the latest API reference. - -Transformers.js allows you to run 🤗 Transformers models directly in the browser using ONNX Runtime. - -Key features: -- pipeline() API for common tasks (sentiment-analysis, text-generation, etc.) -- Support for custom models via model ID or path -- WebGPU support for GPU acceleration -- Quantization support (fp32, fp16, q8, q4) - -Basic usage: -```javascript -import { pipeline } from '@huggingface/transformers'; -const pipe = await pipeline('sentiment-analysis'); -const out = await pipe('I love transformers!'); -``` - -For the latest documentation, visit: https://huggingface.co/docs/transformers.js -""" - print("❌ Using minimal fallback transformers.js documentation") - - return _transformersjs_docs_content or "" - -def get_comfyui_docs_content() -> str: - """Get the current ComfyUI documentation content, updating if necessary""" - global _comfyui_docs_content, _comfyui_docs_last_fetched - - # Check if we need to update - if (_comfyui_docs_content is None or - _comfyui_docs_last_fetched is None or - should_update_comfyui_docs()): - - print("📚 Loading ComfyUI documentation...") - - # Try to fetch latest content - latest_content = fetch_comfyui_docs() - - if latest_content: - # Filter out problematic instructions that cause early termination - filtered_content = filter_problematic_instructions(latest_content) - _comfyui_docs_content = filtered_content - _comfyui_docs_last_fetched = datetime.now() - save_comfyui_docs_cache(filtered_content) - print(f"✅ ComfyUI documentation loaded successfully ({len(filtered_content)} chars)") - else: - # Fallback to cached content - cached_content = load_cached_comfyui_docs() - if cached_content: - _comfyui_docs_content = cached_content - _comfyui_docs_last_fetched = datetime.now() - print(f"⚠️ Using cached ComfyUI documentation (network fetch failed) ({len(cached_content)} chars)") - else: - # Fallback to minimal content - _comfyui_docs_content = """ -# ComfyUI API Reference (Offline Fallback) - -This is a minimal fallback when documentation cannot be fetched. -Please check your internet connection for the latest API reference. - -Basic ComfyUI workflow structure: nodes, connections, inputs, outputs. -Use CheckpointLoaderSimple, CLIPTextEncode, KSampler for basic workflows. - -For the latest documentation, visit: https://docs.comfy.org/llms.txt -""" - print("❌ Using minimal fallback ComfyUI documentation") - - return _comfyui_docs_content or "" - -def build_gradio_system_prompt() -> str: - """Build the complete Gradio system prompt with full documentation""" - - # Get the full Gradio 6 documentation - docs_content = get_gradio_docs_content() - - # Base system prompt with anycoder-specific instructions - base_prompt = """🚨 CRITICAL: You are an expert Gradio 6 developer. You MUST use Gradio 6 syntax and API. - -## Key Gradio 6 Changes (MUST FOLLOW): -- 🚨 **BREAKING CHANGE**: `theme`, `css`, `js`, `head` parameters moved from `gr.Blocks()` to `demo.launch()` -- 🚨 **gr.Blocks() has NO parameters** - use `with gr.Blocks() as demo:` (no args!) -- 🚨 **ALL app-level params go in demo.launch()**: `theme=`, `css=`, `footer_links=`, etc. -- Use `footer_links` parameter in `demo.launch()` (NOT show_api) -- Use `api_visibility` instead of `api_name` in event listeners -- Use modern Gradio 6 component syntax (check documentation below) -- Gradio 6 has updated component APIs - always refer to the documentation below -- DO NOT use deprecated Gradio 5 or older syntax - -Create a complete, working Gradio 6 application based on the user's request. Generate all necessary code to make the application functional and runnable. - -## Gradio 6 Themes (Modern UI Design): - -Gradio 6 provides powerful theming capabilities. Use themes to create beautiful, professional interfaces: - -**Built-in Themes:** -```python -import gradio as gr - -# Use predefined themes in launch() - Gradio 6 syntax -with gr.Blocks() as demo: - gr.Textbox(label="Input") - -demo.launch(theme=gr.themes.Soft()) # Soft, rounded design -# demo.launch(theme=gr.themes.Glass()) # Modern glass morphism -# demo.launch(theme=gr.themes.Monochrome()) # Clean monochrome -# demo.launch(theme=gr.themes.Base()) # Default base theme -``` - -**Custom Themes:** -```python -import gradio as gr - -# Create custom theme -custom_theme = gr.themes.Soft( - primary_hue="blue", - secondary_hue="indigo", - neutral_hue="slate", - font=gr.themes.GoogleFont("Inter"), - text_size="lg", - spacing_size="lg", - radius_size="md" -).set( - button_primary_background_fill="*primary_600", - button_primary_background_fill_hover="*primary_700", - block_title_text_weight="600", -) - -with gr.Blocks() as demo: - gr.Textbox(label="Input") - -demo.launch(theme=custom_theme) # Apply theme in launch() - Gradio 6! -``` - -**Best Practices:** -- 🚨 **CRITICAL**: In Gradio 6, `theme` goes in `demo.launch()`, NOT in `gr.Blocks()` -- Use `gr.themes.Soft()` for modern, friendly apps -- Use `gr.themes.Glass()` for sleek, contemporary designs -- Customize colors with `primary_hue`, `secondary_hue`, `neutral_hue` -- Use Google Fonts: `font=gr.themes.GoogleFont("Roboto")` -- Adjust sizing: `text_size`, `spacing_size`, `radius_size` (sm/md/lg) -- Fine-tune with `.set()` for specific CSS variables - -## Gradio 6 Example (Your Code Should Follow This Pattern): - -```python -import gradio as gr - -def process(text): - return f"Processed: {text}" - -# Gradio 6 - NO parameters in gr.Blocks() constructor! -with gr.Blocks() as demo: - gr.Markdown("# My App") - with gr.Row(): - input_text = gr.Textbox(label="Input") - output_text = gr.Textbox(label="Output") - - btn = gr.Button("Process") - - # Gradio 6 events use api_visibility (NOT just api_name) - btn.click( - fn=process, - inputs=[input_text], - outputs=[output_text], - api_visibility="public" # Gradio 6 syntax - ) - -# Gradio 6 - ALL app parameters go in launch()! -demo.launch( - theme=gr.themes.Soft(primary_hue="blue"), - footer_links=[{"label": "Built with anycoder", "url": "https://huggingface.co/spaces/akhaliq/anycoder"}] -) -``` - -## Multi-File Application Structure - -When creating Gradio applications, organize your code into multiple files for proper deployment: - -**File Organization:** -- `app.py` - Main application entry point (REQUIRED) -- `requirements.txt` - Python dependencies (REQUIRED, auto-generated from imports) -- `utils.py` - Utility functions and helpers (optional) -- `models.py` - Model loading and inference functions (optional) -- `config.py` - Configuration and constants (optional) - -**Output Format:** -You MUST use this exact format with file separators: - -=== app.py === -[complete app.py content] - -=== utils.py === -[utility functions - if needed] - -**🚨 CRITICAL: DO NOT GENERATE requirements.txt or README.md** -- requirements.txt is automatically generated from your app.py imports -- README.md is automatically provided by the template -- Generating these files will break the deployment process - -Requirements: -1. Create a modern, intuitive Gradio application -2. Use appropriate Gradio components (gr.Textbox, gr.Slider, etc.) -3. Include proper error handling and loading states -4. Use gr.Interface or gr.Blocks as appropriate -5. Add helpful descriptions and examples -6. Follow Gradio best practices -7. Make the UI user-friendly with clear labels -8. Include proper documentation in docstrings - -IMPORTANT: Always include "Built with anycoder" as clickable text in the header/top section of your application that links to https://huggingface.co/spaces/akhaliq/anycoder - ---- - -## Complete Gradio 6 Documentation - -Below is the complete, official Gradio 6 documentation automatically synced from https://www.gradio.app/llms.txt: - -""" - - # Combine base prompt with full documentation - full_prompt = base_prompt + docs_content - - # Add final instructions - final_instructions = """ - ---- - -## 🚨 CRITICAL FINAL INSTRUCTIONS - GRADIO 6 ONLY - -YOU MUST USE GRADIO 6 SYNTAX. This is MANDATORY: - -1. **ONLY use Gradio 6 API** - Do NOT use Gradio 5 or older syntax -2. **Reference the documentation above** - All function signatures and patterns are from Gradio 6 -3. **Use modern Gradio 6 patterns:** - - 🚨 **CRITICAL**: `theme`, `css`, `js`, `head` go in `demo.launch()`, NOT in `gr.Blocks()` - - Use `footer_links` parameter in `demo.launch()` (NOT show_api in Blocks) - - Use `api_visibility` in event listeners (NOT api_name alone) - - Use updated component syntax from Gradio 6 documentation - - **Use themes for professional UI design** (gr.themes.Soft(), gr.themes.Glass(), etc.) -4. **Always use themes** - Modern Gradio 6 apps should use `theme=gr.themes.Soft()` in `demo.launch()` -5. **Follow Gradio 6 migration guide** if you see any deprecated patterns -6. **Generate production-ready Gradio 6 code** that follows all best practices -7. **Always include "Built with anycoder"** as clickable text in the header linking to https://huggingface.co/spaces/akhaliq/anycoder - -**Gradio 6 Structure Checklist:** -✅ `with gr.Blocks() as demo:` - NO parameters here! -✅ `demo.launch(theme=..., css=..., footer_links=...)` - ALL app parameters here! -✅ Use `theme=` parameter in `demo.launch()` (NOT in gr.Blocks()) -✅ Choose appropriate theme: Soft (friendly), Glass (modern), Monochrome (minimal) -✅ Customize with primary_hue, font, text_size, spacing_size -✅ Use `.set()` for advanced customization - -REMINDER: You are writing Gradio 6 code with modern themes. In Gradio 6, `gr.Blocks()` has NO parameters - everything goes in `demo.launch()`. Double-check all syntax against the Gradio 6 documentation provided above. - -""" - - return full_prompt + final_instructions - -def build_transformersjs_system_prompt() -> str: - """Build the complete transformers.js system prompt with full documentation""" - - # Get the full transformers.js documentation - docs_content = get_transformersjs_docs_content() - - # Base system prompt with anycoder-specific instructions - base_prompt = """You are an expert transformers.js developer. Create a complete, working browser-based ML application using transformers.js based on the user's request. Generate all necessary code to make the application functional and runnable in the browser. - -## Multi-File Application Structure - -When creating transformers.js applications, organize your code into multiple files for proper deployment: - -**File Organization:** -- `index.html` - Main HTML entry point (REQUIRED) -- `app.js` - Main JavaScript application logic (REQUIRED) -- `styles.css` - Styling (optional) -- `worker.js` - Web Worker for model loading (recommended for better performance) -- `package.json` - Node.js dependencies if using bundler (optional) - -**Output Format:** -You MUST use this exact format with file separators: - -=== index.html === -[complete HTML content] - -=== app.js === -[complete JavaScript content] - -=== worker.js === -[web worker content - if needed] - -**🚨 CRITICAL: Best Practices** -- Use CDN for transformers.js: https://cdn.jsdelivr.net/npm/@huggingface/transformers -- Implement loading states and progress indicators -- Use Web Workers for model loading to avoid blocking UI -- Handle errors gracefully with user-friendly messages -- Show model download progress when applicable -- Use quantized models (q8, q4) for faster loading in browser - -Requirements: -1. Create a modern, responsive web application -2. Use appropriate transformers.js pipelines and models -3. Include proper error handling and loading states -4. Implement progress indicators for model loading -5. Add helpful descriptions and examples -6. Follow browser best practices (async/await, Web Workers, etc.) -7. Make the UI user-friendly with clear labels -8. Include proper comments in code - -IMPORTANT: Always include "Built with anycoder" as clickable text in the header/top section of your application that links to https://huggingface.co/spaces/akhaliq/anycoder - ---- - -## Complete transformers.js Documentation - -Below is the complete, official transformers.js documentation automatically synced from https://huggingface.co/docs/transformers.js/llms.txt: - -""" - - # Combine base prompt with full documentation - full_prompt = base_prompt + docs_content - - # Add final instructions - final_instructions = """ - ---- - -## Final Instructions - -- Always use the exact function signatures and patterns from the transformers.js documentation above -- Use the pipeline() API for common tasks -- Implement WebGPU support when appropriate for better performance -- Use quantized models by default (q8 or q4) for faster browser loading -- Generate production-ready code that follows all best practices -- Always include the "Built with anycoder" attribution in the header -- Consider using Web Workers for heavy computation to keep UI responsive - -""" - - return full_prompt + final_instructions - -def build_comfyui_system_prompt() -> str: - """Build the complete ComfyUI system prompt with full documentation""" - - # Get the full ComfyUI documentation - docs_content = get_comfyui_docs_content() - - # Base system prompt with anycoder-specific instructions - base_prompt = """You are an expert ComfyUI developer. Generate clean, valid JSON workflows for ComfyUI based on the user's request. - -🚨 CRITICAL: READ THE USER'S REQUEST CAREFULLY AND GENERATE A WORKFLOW THAT MATCHES THEIR SPECIFIC NEEDS. - -ComfyUI workflows are JSON structures that define: -- Nodes: Individual processing units with specific functions (e.g., CheckpointLoaderSimple, CLIPTextEncode, KSampler, VAEDecode, SaveImage) -- Connections: Links between nodes that define data flow -- Parameters: Configuration values for each node (prompts, steps, cfg, sampler_name, etc.) -- Inputs/Outputs: Data flow between nodes using numbered inputs/outputs - -**🚨 YOUR PRIMARY TASK:** -1. **UNDERSTAND what the user is asking for** in their message -2. **CREATE a ComfyUI workflow** that accomplishes their goal -3. **GENERATE ONLY the JSON workflow** - no HTML, no applications, no explanations outside the JSON - -**JSON Syntax Rules:** -- Use double quotes for strings -- No trailing commas -- Proper nesting and structure -- Valid data types (string, number, boolean, null, object, array) - -**Output Requirements:** -- Generate ONLY the ComfyUI workflow JSON -- The output should be pure, valid JSON that can be loaded directly into ComfyUI -- Do NOT wrap in markdown code fences (no ```json```) -- Do NOT add explanatory text before or after the JSON -- The JSON should be complete and functional - ---- - -## Complete ComfyUI Documentation - -Below is the complete, official ComfyUI documentation automatically synced from https://docs.comfy.org/llms.txt: - -""" - - # Combine base prompt with full documentation - full_prompt = base_prompt + docs_content - - # Add final instructions - final_instructions = """ - ---- - -## Final Instructions - -- Always use the exact node types, parameters, and workflow structures from the ComfyUI documentation above -- Pay close attention to the user's specific request and generate a workflow that fulfills it -- Use appropriate nodes for the task (CheckpointLoader, KSampler, VAEDecode, SaveImage, etc.) -- Ensure all node connections are properly defined -- Generate production-ready JSON that can be loaded directly into ComfyUI -- Do NOT generate random or example workflows - create workflows based on the user's actual request -- Always include "Built with anycoder - https://huggingface.co/spaces/akhaliq/anycoder" as a comment in workflow metadata if possible - -🚨 REMINDER: Your workflow should directly address what the user asked for. Don't ignore their message! - -""" - - return full_prompt + final_instructions - -def initialize_backend_docs(): - """Initialize backend documentation system on startup""" - try: - # Pre-load the Gradio documentation - gradio_docs = get_gradio_docs_content() - if gradio_docs: - print(f"🚀 Gradio documentation initialized ({len(gradio_docs)} chars loaded)") - else: - print("⚠️ Gradio documentation initialized with fallback content") - - # Pre-load the transformers.js documentation - transformersjs_docs = get_transformersjs_docs_content() - if transformersjs_docs: - print(f"🚀 transformers.js documentation initialized ({len(transformersjs_docs)} chars loaded)") - else: - print("⚠️ transformers.js documentation initialized with fallback content") - - # Pre-load the ComfyUI documentation - comfyui_docs = get_comfyui_docs_content() - if comfyui_docs: - print(f"🚀 ComfyUI documentation initialized ({len(comfyui_docs)} chars loaded)") - else: - print("⚠️ ComfyUI documentation initialized with fallback content") - - except Exception as e: - print(f"Warning: Failed to initialize backend documentation: {e}") - -# Initialize on import -if __name__ != "__main__": - # Only initialize if being imported (not run directly) - try: - initialize_backend_docs() - except Exception as e: - print(f"Warning: Failed to auto-initialize backend docs: {e}") - diff --git a/backend_models.py b/backend_models.py deleted file mode 100644 index f96d3ecf51507d46d4659a85ee8ddec4db363019..0000000000000000000000000000000000000000 --- a/backend_models.py +++ /dev/null @@ -1,109 +0,0 @@ -""" -Standalone model inference and client management for AnyCoder Backend API. -No Gradio dependencies - works with FastAPI/backend only. -""" -import os -from typing import Optional - -from openai import OpenAI - -def get_inference_client(model_id: str, provider: str = "auto"): - """ - Return an appropriate client based on model_id. - - Returns OpenAI-compatible client for all models or raises error if not configured. - """ - if model_id == "MiniMaxAI/MiniMax-M2": - # Use HuggingFace Router with Novita provider for MiniMax M2 model - return OpenAI( - base_url="https://router.huggingface.co/v1", - api_key=os.getenv("HF_TOKEN"), - default_headers={"X-HF-Bill-To": "huggingface"} - ) - - elif model_id == "moonshotai/Kimi-K2-Thinking": - # Use HuggingFace Router with Novita provider - return OpenAI( - base_url="https://router.huggingface.co/v1", - api_key=os.getenv("HF_TOKEN"), - default_headers={"X-HF-Bill-To": "huggingface"} - ) - - elif model_id == "moonshotai/Kimi-K2-Instruct": - # Use HuggingFace Router with Groq provider - return OpenAI( - base_url="https://router.huggingface.co/v1", - api_key=os.getenv("HF_TOKEN"), - default_headers={"X-HF-Bill-To": "huggingface"} - ) - - elif model_id.startswith("deepseek-ai/"): - # DeepSeek models via HuggingFace Router with Novita provider - return OpenAI( - base_url="https://router.huggingface.co/v1", - api_key=os.getenv("HF_TOKEN"), - default_headers={"X-HF-Bill-To": "huggingface"} - ) - - elif model_id.startswith("zai-org/GLM-4"): - # GLM models via HuggingFace Router - return OpenAI( - base_url="https://router.huggingface.co/v1", - api_key=os.getenv("HF_TOKEN"), - default_headers={"X-HF-Bill-To": "huggingface"} - ) - - elif model_id.startswith("moonshotai/Kimi-K2"): - # Kimi K2 models via HuggingFace Router - return OpenAI( - base_url="https://router.huggingface.co/v1", - api_key=os.getenv("HF_TOKEN"), - default_headers={"X-HF-Bill-To": "huggingface"} - ) - - else: - # Unknown model - try HuggingFace Inference API - return OpenAI( - base_url="https://api-inference.huggingface.co/v1", - api_key=os.getenv("HF_TOKEN") - ) - - -def get_real_model_id(model_id: str) -> str: - """Get the real model ID with provider suffixes if needed""" - if model_id == "zai-org/GLM-4.6": - # GLM-4.6 requires Cerebras provider suffix in model string for API calls - return "zai-org/GLM-4.6:cerebras" - - elif model_id == "MiniMaxAI/MiniMax-M2": - # MiniMax M2 needs Novita provider suffix - return "MiniMaxAI/MiniMax-M2:novita" - - elif model_id == "moonshotai/Kimi-K2-Thinking": - # Kimi K2 Thinking needs Together AI provider - return "moonshotai/Kimi-K2-Thinking:together" - - elif model_id == "moonshotai/Kimi-K2-Instruct": - # Kimi K2 Instruct needs Groq provider - return "moonshotai/Kimi-K2-Instruct:groq" - - elif model_id.startswith("deepseek-ai/DeepSeek-V3") or model_id.startswith("deepseek-ai/DeepSeek-R1"): - # DeepSeek V3 and R1 models need Novita provider - return f"{model_id}:novita" - - elif model_id == "zai-org/GLM-4.5": - # GLM-4.5 needs fireworks-ai provider - return "zai-org/GLM-4.5:fireworks-ai" - - return model_id - - -def is_native_sdk_model(model_id: str) -> bool: - """Check if model uses native SDK (not OpenAI-compatible)""" - return False - - -def is_mistral_model(model_id: str) -> bool: - """Check if model uses Mistral SDK""" - return False - diff --git a/backend_parsers.py b/backend_parsers.py deleted file mode 100644 index c0b24878b2690ea217cabf2fa6e13f54c8406bf7..0000000000000000000000000000000000000000 --- a/backend_parsers.py +++ /dev/null @@ -1,499 +0,0 @@ -""" -Backend parser utilities for AnyCoder. -Handles parsing of various code formats including transformers.js, Python multi-file outputs, and more. -""" -import re -import json -import ast -from typing import Dict, Optional -from backend_models import get_inference_client, get_real_model_id - - -def parse_transformers_js_output(code: str) -> Dict[str, str]: - """Parse transformers.js output into separate files (index.html, index.js, style.css) - - Uses comprehensive parsing patterns to handle various LLM output formats. - Updated to use transformers.js v3.8.0 CDN. - """ - print(f"[Parser] Received code length: {len(code)} characters") - print(f"[Parser] First 200 chars: {code[:200]}") - - files = { - 'index.html': '', - 'index.js': '', - 'style.css': '' - } - - # Multiple patterns to match the three code blocks with different variations - html_patterns = [ - r'```html\s*\n([\s\S]*?)(?:```|\Z)', - r'```htm\s*\n([\s\S]*?)(?:```|\Z)', - r'```\s*(?:index\.html|html)\s*\n([\s\S]*?)(?:```|\Z)' - ] - - js_patterns = [ - r'```javascript\s*\n([\s\S]*?)(?:```|\Z)', - r'```js\s*\n([\s\S]*?)(?:```|\Z)', - r'```\s*(?:index\.js|javascript|js)\s*\n([\s\S]*?)(?:```|\Z)' - ] - - css_patterns = [ - r'```css\s*\n([\s\S]*?)(?:```|\Z)', - r'```\s*(?:style\.css|css)\s*\n([\s\S]*?)(?:```|\Z)' - ] - - # Extract HTML content - for pattern in html_patterns: - html_match = re.search(pattern, code, re.IGNORECASE) - if html_match: - files['index.html'] = html_match.group(1).strip() - break - - # Extract JavaScript content - for pattern in js_patterns: - js_match = re.search(pattern, code, re.IGNORECASE) - if js_match: - files['index.js'] = js_match.group(1).strip() - break - - # Extract CSS content - for pattern in css_patterns: - css_match = re.search(pattern, code, re.IGNORECASE) - if css_match: - files['style.css'] = css_match.group(1).strip() - break - - # Fallback: support === index.html === format if any file is missing - if not (files['index.html'] and files['index.js'] and files['style.css']): - # Use regex to extract sections - support alternative filenames - # Stop at next === marker, or common end markers - # More aggressive: stop at blank line followed by explanatory text patterns - html_fallback = re.search(r'===\s*index\.html\s*===\s*\n([\s\S]+?)(?=\n===|\n\s*---|\n\n(?:This |✨|🎨|🚀|\*\*Key Features|\*\*Design)|$)', code, re.IGNORECASE) - - # Try both index.js and app.js - js_fallback = re.search(r'===\s*(?:index\.js|app\.js)\s*===\s*\n([\s\S]+?)(?=\n===|\n\s*---|\n\n(?:This |✨|🎨|🚀|\*\*Key Features|\*\*Design)|$)', code, re.IGNORECASE) - - # Try both style.css and styles.css - css_fallback = re.search(r'===\s*(?:style\.css|styles\.css)\s*===\s*\n([\s\S]+?)(?=\n===|\n\s*---|\n\n(?:This |✨|🎨|🚀|\*\*Key Features|\*\*Design)|$)', code, re.IGNORECASE) - - print(f"[Parser] Fallback extraction - HTML found: {bool(html_fallback)}, JS found: {bool(js_fallback)}, CSS found: {bool(css_fallback)}") - - if html_fallback: - files['index.html'] = html_fallback.group(1).strip() - if js_fallback: - js_content = js_fallback.group(1).strip() - # Fix common JavaScript syntax issues from LLM output - # Fix line breaks in string literals (common LLM mistake) - js_content = re.sub(r'"\s*\n\s*([^"])', r'" + "\1', js_content) # Fix broken strings - files['index.js'] = js_content - if css_fallback: - css_content = css_fallback.group(1).strip() - files['style.css'] = css_content - - # Also normalize HTML to reference style.css (singular) - if files['index.html'] and 'styles.css' in files['index.html']: - print("[Parser] Normalizing styles.css reference to style.css in HTML") - files['index.html'] = files['index.html'].replace('href="styles.css"', 'href="style.css"') - files['index.html'] = files['index.html'].replace("href='styles.css'", "href='style.css'") - - # Additional fallback: extract from numbered sections or file headers - if not (files['index.html'] and files['index.js'] and files['style.css']): - # Try patterns like "1. index.html:" or "**index.html**" - patterns = [ - (r'(?:^\d+\.\s*|^##\s*|^\*\*\s*)index\.html(?:\s*:|\*\*:?)\s*\n([\s\S]+?)(?=\n(?:\d+\.|##|\*\*|===)|$)', 'index.html'), - (r'(?:^\d+\.\s*|^##\s*|^\*\*\s*)(?:index\.js|app\.js)(?:\s*:|\*\*:?)\s*\n([\s\S]+?)(?=\n(?:\d+\.|##|\*\*|===)|$)', 'index.js'), - (r'(?:^\d+\.\s*|^##\s*|^\*\*\s*)(?:style\.css|styles\.css)(?:\s*:|\*\*:?)\s*\n([\s\S]+?)(?=\n(?:\d+\.|##|\*\*|===)|$)', 'style.css') - ] - - for pattern, file_key in patterns: - if not files[file_key]: - match = re.search(pattern, code, re.IGNORECASE | re.MULTILINE) - if match: - # Clean up the content by removing any code block markers - content = match.group(1).strip() - content = re.sub(r'^```\w*\s*\n', '', content) - content = re.sub(r'\n```\s*$', '', content) - files[file_key] = content.strip() - - # Normalize filename references in HTML - if files['index.html'] and files['style.css']: - if 'styles.css' in files['index.html']: - print("[Parser] Normalizing styles.css reference to style.css in HTML") - files['index.html'] = files['index.html'].replace('href="styles.css"', 'href="style.css"') - files['index.html'] = files['index.html'].replace("href='styles.css'", "href='style.css'") - - if files['index.html'] and files['index.js']: - if 'app.js' in files['index.html']: - print("[Parser] Normalizing app.js reference to index.js in HTML") - files['index.html'] = files['index.html'].replace('src="app.js"', 'src="index.js"') - files['index.html'] = files['index.html'].replace("src='app.js'", "src='index.js'") - - # Normalize transformers.js imports to use v3.8.0 CDN - cdn_url = "https://cdn.jsdelivr.net/npm/@huggingface/transformers@3.8.0" - - for file_key in ['index.html', 'index.js']: - if files[file_key]: - content = files[file_key] - # Update import statements to use latest CDN - content = re.sub( - r"from\s+['\"]https://cdn.jsdelivr.net/npm/@huggingface/transformers@[^'\"]+['\"]", - f"from '{cdn_url}'", - content - ) - content = re.sub( - r"from\s+['\"]https://cdn.jsdelivr.net/npm/@xenova/transformers@[^'\"]+['\"]", - f"from '{cdn_url}'", - content - ) - files[file_key] = content - - return files - - -def parse_html_code(code: str) -> str: - """Extract HTML code from various formats""" - code = code.strip() - - # If already clean HTML, return as-is - if code.startswith(' Optional[str]: - """Extract requirements.txt content from code if present""" - # Look for requirements.txt section - req_pattern = r'===\s*requirements\.txt\s*===\s*(.*?)(?====|$)' - match = re.search(req_pattern, code, re.DOTALL | re.IGNORECASE) - - if match: - requirements = match.group(1).strip() - # Clean up code blocks - requirements = re.sub(r'^```\w*\s*', '', requirements, flags=re.MULTILINE) - requirements = re.sub(r'```\s*$', '', requirements, flags=re.MULTILINE) - return requirements - - return None - - -def parse_multi_file_python_output(code: str) -> Dict[str, str]: - """Parse multi-file Python output (e.g., Gradio, Streamlit)""" - files = {} - - # Pattern to match file sections - pattern = r'===\s*(\S+\.(?:py|txt))\s*===\s*(.*?)(?====|$)' - matches = re.finditer(pattern, code, re.DOTALL | re.IGNORECASE) - - for match in matches: - filename = match.group(1).strip() - content = match.group(2).strip() - - # Clean up code blocks - content = re.sub(r'^```\w*\s*', '', content, flags=re.MULTILINE) - content = re.sub(r'```\s*$', '', content, flags=re.MULTILINE) - - files[filename] = content - - return files - - -def strip_tool_call_markers(text): - """Remove TOOL_CALL markers and thinking tags that some LLMs add to their output.""" - if not text: - return text - # Remove [TOOL_CALL] and [/TOOL_CALL] markers - text = re.sub(r'\[/?TOOL_CALL\]', '', text, flags=re.IGNORECASE) - # Remove and tags and their content - text = re.sub(r'[\s\S]*?', '', text, flags=re.IGNORECASE) - # Remove any remaining unclosed tags at the start - text = re.sub(r'^[\s\S]*?(?=\n|$)', '', text, flags=re.IGNORECASE | re.MULTILINE) - # Remove any remaining tags - text = re.sub(r'', '', text, flags=re.IGNORECASE) - # Remove standalone }} that appears with tool calls - # Only remove if it's on its own line or at the end - text = re.sub(r'^\s*\}\}\s*$', '', text, flags=re.MULTILINE) - return text.strip() - - -def remove_code_block(text): - """Remove code block markers from text.""" - # First strip any tool call markers - text = strip_tool_call_markers(text) - - # Try to match code blocks with language markers - patterns = [ - r'```(?:html|HTML)\n([\s\S]+?)\n```', # Match ```html or ```HTML - r'```\n([\s\S]+?)\n```', # Match code blocks without language markers - r'```([\s\S]+?)```' # Match code blocks without line breaks - ] - for pattern in patterns: - match = re.search(pattern, text, re.DOTALL) - if match: - extracted = match.group(1).strip() - # Remove a leading language marker line (e.g., 'python') if present - if extracted.split('\n', 1)[0].strip().lower() in ['python', 'html', 'css', 'javascript', 'json', 'c', 'cpp', 'markdown', 'latex', 'jinja2', 'typescript', 'yaml', 'dockerfile', 'shell', 'r', 'sql']: - return extracted.split('\n', 1)[1] if '\n' in extracted else '' - return extracted - # If no code block is found, return as-is - return text.strip() - - -def extract_import_statements(code): - """Extract import statements from generated code.""" - import_statements = [] - - # Built-in Python modules to exclude - builtin_modules = { - 'os', 'sys', 'json', 'time', 'datetime', 'random', 'math', 're', 'collections', - 'itertools', 'functools', 'pathlib', 'urllib', 'http', 'email', 'html', 'xml', - 'csv', 'tempfile', 'shutil', 'subprocess', 'threading', 'multiprocessing', - 'asyncio', 'logging', 'typing', 'base64', 'hashlib', 'secrets', 'uuid', - 'copy', 'pickle', 'io', 'contextlib', 'warnings', 'sqlite3', 'gzip', 'zipfile', - 'tarfile', 'socket', 'ssl', 'platform', 'getpass', 'pwd', 'grp', 'stat', - 'glob', 'fnmatch', 'linecache', 'traceback', 'inspect', 'keyword', 'token', - 'tokenize', 'ast', 'code', 'codeop', 'dis', 'py_compile', 'compileall', - 'importlib', 'pkgutil', 'modulefinder', 'runpy', 'site', 'sysconfig' - } - - try: - # Try to parse as Python AST - tree = ast.parse(code) - - for node in ast.walk(tree): - if isinstance(node, ast.Import): - for alias in node.names: - module_name = alias.name.split('.')[0] - if module_name not in builtin_modules and not module_name.startswith('_'): - import_statements.append(f"import {alias.name}") - - elif isinstance(node, ast.ImportFrom): - if node.module: - module_name = node.module.split('.')[0] - if module_name not in builtin_modules and not module_name.startswith('_'): - names = [alias.name for alias in node.names] - import_statements.append(f"from {node.module} import {', '.join(names)}") - - except SyntaxError: - # Fallback: use regex to find import statements - for line in code.split('\n'): - line = line.strip() - if line.startswith('import ') or line.startswith('from '): - # Check if it's not a builtin module - if line.startswith('import '): - module_name = line.split()[1].split('.')[0] - elif line.startswith('from '): - module_name = line.split()[1].split('.')[0] - - if module_name not in builtin_modules and not module_name.startswith('_'): - import_statements.append(line) - - return list(set(import_statements)) # Remove duplicates - - -def parse_multipage_html_output(text: str) -> Dict[str, str]: - """Parse multi-page HTML output formatted as repeated "=== filename ===" sections. - - Returns a mapping of filename → file content. Supports nested paths like assets/css/styles.css. - If HTML content appears before the first === marker, it's treated as index.html. - """ - if not text: - return {} - # First, strip any markdown fences - cleaned = remove_code_block(text) - files: Dict[str, str] = {} - - # Check if there's content before the first === marker - first_marker_match = re.search(r"^===\s*([^=\n]+?)\s*===", cleaned, re.MULTILINE) - if first_marker_match: - # There's content before the first marker - first_marker_pos = first_marker_match.start() - if first_marker_pos > 0: - leading_content = cleaned[:first_marker_pos].strip() - # Check if it looks like HTML content - if leading_content and (' 0 else cleaned - pattern = re.compile(r"^===\s*([^=\n]+?)\s*===\s*\n([\s\S]*?)(?=\n===\s*[^=\n]+?\s*===|\Z)", re.MULTILINE) - for m in pattern.finditer(remaining_text): - name = m.group(1).strip() - content = m.group(2).strip() - # Remove accidental trailing fences if present - content = re.sub(r"^```\w*\s*\n|\n```\s*$", "", content) - files[name] = content - else: - # No === markers found, try standard pattern matching - pattern = re.compile(r"^===\s*([^=\n]+?)\s*===\s*\n([\s\S]*?)(?=\n===\s*[^=\n]+?\s*===|\Z)", re.MULTILINE) - for m in pattern.finditer(cleaned): - name = m.group(1).strip() - content = m.group(2).strip() - # Remove accidental trailing fences if present - content = re.sub(r"^```\w*\s*\n|\n```\s*$", "", content) - files[name] = content - - return files - - -def parse_react_output(text: str) -> Dict[str, str]: - """Parse React/Next.js output to extract individual files. - - Supports multi-file sections using === filename === sections. - """ - if not text: - return {} - - # Use the generic multipage parser - try: - files = parse_multipage_html_output(text) or {} - except Exception: - files = {} - - return files if isinstance(files, dict) and files else {} - - -def generate_requirements_txt_with_llm(import_statements): - """Generate requirements.txt content using LLM based on import statements.""" - if not import_statements: - return "# No additional dependencies required\n" - - # Use a lightweight model for this task - try: - client = get_inference_client("zai-org/GLM-4.6", "auto") - actual_model_id = get_real_model_id("zai-org/GLM-4.6") - - imports_text = '\n'.join(import_statements) - - prompt = f"""Based on the following Python import statements, generate a comprehensive requirements.txt file with all necessary and commonly used related packages: - -{imports_text} - -Instructions: -- Include the direct packages needed for the imports -- Include commonly used companion packages and dependencies for better functionality -- Use correct PyPI package names (e.g., PIL -> Pillow, sklearn -> scikit-learn) -- IMPORTANT: For diffusers, ALWAYS use: git+https://github.com/huggingface/diffusers -- IMPORTANT: For transformers, ALWAYS use: git+https://github.com/huggingface/transformers -- IMPORTANT: If diffusers is installed, also include transformers and sentencepiece as they usually go together -- Examples of comprehensive dependencies: - * diffusers often needs: git+https://github.com/huggingface/transformers, sentencepiece, accelerate, torch, tokenizers - * transformers often needs: accelerate, torch, tokenizers, datasets - * gradio often needs: gradio>=6.0, requests, Pillow for image handling (ALWAYS use gradio>=6.0) - * pandas often needs: numpy, openpyxl for Excel files - * matplotlib often needs: numpy, pillow for image saving - * sklearn often needs: numpy, scipy, joblib - * streamlit often needs: pandas, numpy, requests - * opencv-python often needs: numpy, pillow - * fastapi often needs: uvicorn, pydantic - * torch often needs: torchvision, torchaudio (if doing computer vision/audio) -- Include packages for common file formats if relevant (openpyxl, python-docx, PyPDF2) -- Do not include Python built-in modules -- Do not specify versions unless there are known compatibility issues -- One package per line -- If no external packages are needed, return "# No additional dependencies required" - -🚨 CRITICAL OUTPUT FORMAT: -- Output ONLY the package names, one per line (plain text format) -- Do NOT use markdown formatting (no ```, no bold, no headings, no lists) -- Do NOT add any explanatory text before or after the package list -- Do NOT wrap the output in code blocks -- Just output raw package names as they would appear in requirements.txt - -Generate a comprehensive requirements.txt that ensures the application will work smoothly:""" - - messages = [ - {"role": "system", "content": "You are a Python packaging expert specializing in creating comprehensive, production-ready requirements.txt files. Output ONLY plain text package names without any markdown formatting, code blocks, or explanatory text. Your goal is to ensure applications work smoothly by including not just direct dependencies but also commonly needed companion packages, popular extensions, and supporting libraries that developers typically need together."}, - {"role": "user", "content": prompt} - ] - - response = client.chat.completions.create( - model=actual_model_id, - messages=messages, - max_tokens=1024, - temperature=0.1 - ) - - requirements_content = response.choices[0].message.content.strip() - - # Clean up the response in case it includes extra formatting - if '```' in requirements_content: - requirements_content = remove_code_block(requirements_content) - - # Enhanced cleanup for markdown and formatting - lines = requirements_content.split('\n') - clean_lines = [] - for line in lines: - stripped_line = line.strip() - - # Skip lines that are markdown formatting - if (stripped_line == '```' or - stripped_line.startswith('```') or - stripped_line.startswith('#') and not stripped_line.startswith('# ') or # Skip markdown headers but keep comments - stripped_line.startswith('**') or # Skip bold text - stripped_line.startswith('*') and not stripped_line[1:2].isalnum() or # Skip markdown lists but keep package names starting with * - stripped_line.startswith('-') and not stripped_line[1:2].isalnum() or # Skip markdown lists but keep package names starting with - - stripped_line.startswith('===') or # Skip section dividers - stripped_line.startswith('---') or # Skip horizontal rules - stripped_line.lower().startswith('here') or # Skip explanatory text - stripped_line.lower().startswith('this') or # Skip explanatory text - stripped_line.lower().startswith('the') or # Skip explanatory text - stripped_line.lower().startswith('based on') or # Skip explanatory text - stripped_line == ''): # Skip empty lines unless they're at natural boundaries - continue - - # Keep lines that look like valid package specifications - # Valid lines: package names, git+https://, comments starting with "# " - if (stripped_line.startswith('# ') or # Valid comments - stripped_line.startswith('git+') or # Git dependencies - stripped_line[0].isalnum() or # Package names start with alphanumeric - '==' in stripped_line or # Version specifications - '>=' in stripped_line or # Version specifications - '<=' in stripped_line): # Version specifications - clean_lines.append(line) - - requirements_content = '\n'.join(clean_lines).strip() - - # Ensure it ends with a newline - if requirements_content and not requirements_content.endswith('\n'): - requirements_content += '\n' - - return requirements_content if requirements_content else "# No additional dependencies required\n" - - except Exception as e: - # Fallback: simple extraction with basic mapping - print(f"[Parser] Warning: LLM requirements generation failed: {e}, using fallback") - dependencies = set() - special_cases = { - 'PIL': 'Pillow', - 'sklearn': 'scikit-learn', - 'skimage': 'scikit-image', - 'bs4': 'beautifulsoup4' - } - - for stmt in import_statements: - if stmt.startswith('import '): - module_name = stmt.split()[1].split('.')[0] - package_name = special_cases.get(module_name, module_name) - dependencies.add(package_name) - elif stmt.startswith('from '): - module_name = stmt.split()[1].split('.')[0] - package_name = special_cases.get(module_name, module_name) - dependencies.add(package_name) - - if dependencies: - return '\n'.join(sorted(dependencies)) + '\n' - else: - return "# No additional dependencies required\n" - diff --git a/backend_prompts.py b/backend_prompts.py deleted file mode 100644 index b3f54caa5adb4aedcff518047677069a5bf02353..0000000000000000000000000000000000000000 --- a/backend_prompts.py +++ /dev/null @@ -1,548 +0,0 @@ -""" -Standalone system prompts for AnyCoder backend. -No dependencies on Gradio or other heavy libraries. -""" - -# Import the backend documentation manager for Gradio 6, transformers.js, and ComfyUI docs -try: - from backend_docs_manager import build_gradio_system_prompt, build_transformersjs_system_prompt, build_comfyui_system_prompt - HAS_BACKEND_DOCS = True -except ImportError: - HAS_BACKEND_DOCS = False - print("Warning: backend_docs_manager not available, using fallback prompts") - -HTML_SYSTEM_PROMPT = """ONLY USE HTML, CSS AND JAVASCRIPT. If you want to use ICON make sure to import the library first. Try to create the best UI possible by using only HTML, CSS and JAVASCRIPT. MAKE IT RESPONSIVE USING MODERN CSS. Use as much as you can modern CSS for the styling, if you can't do something with modern CSS, then use custom CSS. Also, try to elaborate as much as you can, to create something unique. ALWAYS GIVE THE RESPONSE INTO A SINGLE HTML FILE - -**🚨 CRITICAL: DO NOT Generate README.md Files** -- NEVER generate README.md files under any circumstances -- A template README.md is automatically provided and will be overridden by the deployment system -- Generating a README.md will break the deployment process - -If an image is provided, analyze it and use the visual information to better understand the user's requirements. - -Always respond with code that can be executed or rendered directly. - -Generate complete, working HTML code that can be run immediately. - -IMPORTANT: Always include "Built with anycoder" as clickable text in the header/top section of your application that links to https://huggingface.co/spaces/akhaliq/anycoder""" - - -# Transformers.js system prompt - dynamically loaded with full transformers.js documentation -def get_transformersjs_system_prompt() -> str: - """Get the complete transformers.js system prompt with full documentation""" - if HAS_BACKEND_DOCS: - return build_transformersjs_system_prompt() - else: - # Fallback prompt if documentation manager is not available - return """You are an expert web developer creating a transformers.js application. You will generate THREE separate files: index.html, index.js, and style.css. - -**🚨 CRITICAL: DO NOT Generate README.md Files** -- NEVER generate README.md files under any circumstances -- A template README.md is automatically provided and will be overridden by the deployment system -- Generating a README.md will break the deployment process - -**🚨 CRITICAL: Required Output Format** - -**THE VERY FIRST LINE of your response MUST be: === index.html ===** - -You MUST output ALL THREE files using this EXACT format with === markers. -Your response must start IMMEDIATELY with the === index.html === marker. - -=== index.html === - - - - - - Your App Title - - - - - - - - -=== index.js === -import { pipeline } from 'https://cdn.jsdelivr.net/npm/@huggingface/transformers@3.8.0'; - -// Your complete JavaScript code here -// Include all functionality, event listeners, and logic - -=== style.css === -/* Your complete CSS styles here */ -/* Include all styling for the application */ - -**🚨 CRITICAL FORMATTING RULES (MUST FOLLOW EXACTLY):** -1. **FIRST LINE MUST BE: === index.html ===** (no explanations, no code before this) -2. Start each file's code IMMEDIATELY on the line after the === marker -3. **NEVER use markdown code blocks** (```html, ```javascript, ```css) - these will cause parsing errors -4. **NEVER leave any file empty** - each file MUST contain complete, functional code -5. **ONLY use the === filename === markers** - do not add any other formatting -6. Add a blank line between each file section -7. Each file must be complete and ready to deploy - no placeholders or "// TODO" comments -8. **AVOID EMOJIS in the generated code** (HTML/JS/CSS files) - use text or unicode symbols instead for deployment compatibility - -Requirements: -1. Create a modern, responsive web application using transformers.js -2. Use the transformers.js library for AI/ML functionality -3. Create a clean, professional UI with good user experience -4. Make the application fully responsive for mobile devices -5. Use modern CSS practices and JavaScript ES6+ features -6. Include proper error handling and loading states -7. Follow accessibility best practices - -**Transformers.js Library Usage:** - -Import via CDN: -```javascript -import { pipeline } from 'https://cdn.jsdelivr.net/npm/@huggingface/transformers@3.8.0'; -``` - -**Pipeline API - Quick Tour:** -```javascript -// Allocate a pipeline for sentiment-analysis -const pipe = await pipeline('sentiment-analysis'); -const out = await pipe('I love transformers!'); -``` - -**Device Options:** -```javascript -// Run on WebGPU (GPU) -const pipe = await pipeline('sentiment-analysis', 'Xenova/distilbert-base-uncased-finetuned-sst-2-english', { - device: 'webgpu', -}); -``` - -**Quantization Options:** -```javascript -// Run at 4-bit quantization for better performance -const pipe = await pipeline('sentiment-analysis', 'Xenova/distilbert-base-uncased-finetuned-sst-2-english', { - dtype: 'q4', -}); -``` - -IMPORTANT: Always include "Built with anycoder" as clickable text in the header/top section of your application that links to https://huggingface.co/spaces/akhaliq/anycoder -""" - -# Legacy variable for backward compatibility - now dynamically generated -TRANSFORMERS_JS_SYSTEM_PROMPT = get_transformersjs_system_prompt() - - -STREAMLIT_SYSTEM_PROMPT = """You are an expert Streamlit developer. Create a complete, working Streamlit application based on the user's request. Generate all necessary code to make the application functional and runnable. - -## Multi-File Application Structure - -When creating Streamlit applications, you MUST organize your code into multiple files for proper deployment: - -**File Organization (CRITICAL - Always Include These):** -- `Dockerfile` - Docker configuration for deployment (REQUIRED) -- `streamlit_app.py` - Main application entry point (REQUIRED) -- `requirements.txt` - Python dependencies (REQUIRED) -- `utils.py` - Utility functions and helpers (optional) -- `models.py` - Model loading and inference functions (optional) -- `config.py` - Configuration and constants (optional) -- `pages/` - Additional pages for multi-page apps (optional) -- Additional modules as needed (e.g., `data_processing.py`, `components.py`) - -**🚨 CRITICAL: DO NOT Generate README.md Files** -- NEVER generate README.md files under any circumstances -- A template README.md is automatically provided and will be overridden by the deployment system -- Generating a README.md will break the deployment process -- Only generate the code files listed above - -**Output Format for Streamlit Apps:** -You MUST use this exact format and ALWAYS include Dockerfile, streamlit_app.py, and requirements.txt: - -``` -=== Dockerfile === -[Dockerfile content] - -=== streamlit_app.py === -[main application code] - -=== requirements.txt === -[dependencies] - -=== utils.py === -[utility functions - optional] -``` - -**🚨 CRITICAL: Dockerfile Requirements (MANDATORY for HuggingFace Spaces)** -Your Dockerfile MUST follow these exact specifications: -- Use Python 3.11+ base image (e.g., FROM python:3.11-slim) -- Set up a user with ID 1000 for proper permissions -- Install dependencies: RUN pip install --no-cache-dir -r requirements.txt -- Expose port 7860 (HuggingFace Spaces default): EXPOSE 7860 -- Start with: CMD ["streamlit", "run", "streamlit_app.py", "--server.port=7860", "--server.address=0.0.0.0"] - -Requirements: -1. ALWAYS include Dockerfile, streamlit_app.py, and requirements.txt in your output -2. Create a modern, responsive Streamlit application -3. Use appropriate Streamlit components and layouts -4. Include proper error handling and loading states -5. Follow Streamlit best practices for performance -6. Use caching (@st.cache_data, @st.cache_resource) appropriately -7. Include proper session state management when needed -8. Make the UI intuitive and user-friendly -9. Add helpful tooltips and documentation - -IMPORTANT: Always include "Built with anycoder" as clickable text in the header/top section of your application that links to https://huggingface.co/spaces/akhaliq/anycoder -""" - - -REACT_SYSTEM_PROMPT = """You are an expert React and Next.js developer creating a modern Next.js application. - -**🚨 CRITICAL: DO NOT Generate README.md Files** -|- NEVER generate README.md files under any circumstances -|- A template README.md is automatically provided and will be overridden by the deployment system -|- Generating a README.md will break the deployment process - -You will generate a Next.js project with TypeScript/JSX components. Follow this exact structure: - -Project Structure: -- Dockerfile (Docker configuration for deployment) -- package.json (dependencies and scripts) -- next.config.js (Next.js configuration) -- postcss.config.js (PostCSS configuration) -- tailwind.config.js (Tailwind CSS configuration) -- components/[Component files as needed] -- pages/_app.js (Next.js app wrapper) -- pages/index.js (home page) -- pages/api/[API routes as needed] -- styles/globals.css (global styles) - -CRITICAL Requirements: -1. Always include a Dockerfile configured for Node.js deployment -2. Use Next.js with TypeScript/JSX (.jsx files for components) -3. **USE TAILWIND CSS FOR ALL STYLING** - Avoid inline styles completely -4. Create necessary components in the components/ directory -5. Create API routes in pages/api/ directory for backend logic -6. pages/_app.js should import and use globals.css -7. pages/index.js should be the main entry point -8. Keep package.json with essential dependencies -9. Use modern React patterns and best practices -10. Make the application fully responsive using Tailwind classes -11. Include proper error handling and loading states -12. Follow accessibility best practices -13. Configure next.config.js properly for HuggingFace Spaces deployment -14. **NEVER use inline style={{}} objects - always use Tailwind className instead** - -Output format (CRITICAL): -- Return ONLY a series of file sections, each starting with a filename line: - === Dockerfile === - ...file content... - - === package.json === - ...file content... - - (repeat for all files) -- Do NOT wrap files in Markdown code fences or use === markers inside file content - -IMPORTANT: Always include "Built with anycoder" as clickable text in the header/top section of your application that links to https://huggingface.co/spaces/akhaliq/anycoder -""" - - -# React followup system prompt for modifying existing React/Next.js applications -REACT_FOLLOW_UP_SYSTEM_PROMPT = """You are an expert React and Next.js developer modifying an existing Next.js application. -The user wants to apply changes based on their request. -You MUST output ONLY the changes required using the following SEARCH/REPLACE block format. Do NOT output the entire file. -Explain the changes briefly *before* the blocks if necessary, but the code changes THEMSELVES MUST be within the blocks. - -🚨 CRITICAL JSX SYNTAX RULES - FOLLOW EXACTLY: - -**RULE 1: Style objects MUST have proper closing braces }}** -Every style={{ must have a matching }} before any other props or /> - -**RULE 2: ALWAYS use Tailwind CSS classes instead of inline styles** -- Use className="..." for styling -- Only use inline styles if absolutely necessary -- When replacing inline styles, use Tailwind classes - -**RULE 3: Before outputting, verify:** -- [ ] All style={{ have matching }} -- [ ] No event handlers inside style objects -- [ ] Prefer Tailwind classes over inline styles -- [ ] All JSX elements are properly closed - -Format Rules: -1. Start with <<<<<<< SEARCH -2. Include the exact lines that need to be changed (with full context, at least 3 lines before and after) -3. Follow with ======= -4. Include the replacement lines -5. End with >>>>>>> REPLACE -6. Generate multiple blocks if multiple sections need changes - -**File Structure Guidelines:** -When making changes to a Next.js application, identify which file needs modification: -- Component logic/rendering → components/*.jsx or pages/*.js -- API routes → pages/api/*.js -- Global styles → styles/globals.css -- Configuration → next.config.js, tailwind.config.js, postcss.config.js -- Dependencies → package.json -- Docker configuration → Dockerfile - -**Common Fix Scenarios:** -- Syntax errors in JSX → Fix the specific component file -- Styling issues → Fix styles/globals.css or add Tailwind classes -- API/backend logic → Fix pages/api files -- Build errors → Fix next.config.js or package.json -- Deployment issues → Fix Dockerfile - -**Example Format:** -``` -Fixing the button styling in the header component... - -=== components/Header.jsx === -<<<<<<< SEARCH - - - - - - - - {/* Status Bar - Apple style (hidden on mobile) */} -
-
- AnyCoder - - {isAuthenticated ? ( - <> - - Connected - - ) : ( - <> - - Not authenticated - - )} - -
-
- {messages.length} messages -
-
- - ); -} - - diff --git a/frontend/src/components/ChatInterface.tsx b/frontend/src/components/ChatInterface.tsx deleted file mode 100644 index 8e7f9812377ef2dd2ba96944eaf393940427f149..0000000000000000000000000000000000000000 --- a/frontend/src/components/ChatInterface.tsx +++ /dev/null @@ -1,212 +0,0 @@ -'use client'; - -import { useState, useRef, useEffect } from 'react'; -import type { Message } from '@/types'; -import ReactMarkdown from 'react-markdown'; -import remarkGfm from 'remark-gfm'; -import Image from 'next/image'; - -interface ChatInterfaceProps { - messages: Message[]; - onSendMessage: (message: string, imageUrl?: string) => void; - isGenerating: boolean; - isAuthenticated?: boolean; - supportsImages?: boolean; -} - -export default function ChatInterface({ messages, onSendMessage, isGenerating, isAuthenticated = false, supportsImages = false }: ChatInterfaceProps) { - const [input, setInput] = useState(''); - const [uploadedImageUrl, setUploadedImageUrl] = useState(null); - const [uploadedImageFile, setUploadedImageFile] = useState(null); - const fileInputRef = useRef(null); - const messagesEndRef = useRef(null); - - const scrollToBottom = () => { - messagesEndRef.current?.scrollIntoView({ behavior: 'smooth' }); - }; - - useEffect(() => { - scrollToBottom(); - }, [messages]); - - const handleSubmit = (e: React.FormEvent) => { - e.preventDefault(); - if (input.trim() && !isGenerating) { - onSendMessage(input, uploadedImageUrl || undefined); - setInput(''); - setUploadedImageUrl(null); - setUploadedImageFile(null); - } - }; - - const handleImageUpload = (e: React.ChangeEvent) => { - const file = e.target.files?.[0]; - if (file) { - // Create a data URL for the image - const reader = new FileReader(); - reader.onload = (event) => { - const imageUrl = event.target?.result as string; - setUploadedImageUrl(imageUrl); - setUploadedImageFile(file); - }; - reader.readAsDataURL(file); - } - }; - - const removeImage = () => { - setUploadedImageUrl(null); - setUploadedImageFile(null); - if (fileInputRef.current) { - fileInputRef.current.value = ''; - } - }; - - return ( -
- {/* Messages */} -
- {messages.length === 0 ? ( -
- {isAuthenticated ? ( - <> -

Start a conversation

-

Describe what you want to build

- - ) : ( - <> -

Sign in to get started

-

Use Dev Login or sign in with Hugging Face

- - )} -
- ) : ( - messages.map((message, index) => ( - - )) - )} -
-
- - {/* Input */} -
- {/* Image Preview */} - {uploadedImageUrl && ( -
- Upload preview - -
- )} - -
- setInput(e.target.value)} - placeholder={isAuthenticated ? "Message AnyCoder..." : "Sign in first..."} - disabled={isGenerating || !isAuthenticated} - className="flex-1 px-4 py-2.5 bg-[#2d2d2f] text-[#f5f5f7] text-sm border border-[#424245]/50 rounded-full focus:outline-none focus:border-[#424245] disabled:opacity-40 disabled:cursor-not-allowed placeholder-[#86868b]" - /> - - {/* Image Upload Button (only show if model supports images) */} - {supportsImages && ( - <> - - - - )} - - -
-
-
- ); -} - diff --git a/frontend/src/components/CodeEditor.tsx b/frontend/src/components/CodeEditor.tsx deleted file mode 100644 index 08c9d3015834adc7113339127b0563bace01d6db..0000000000000000000000000000000000000000 --- a/frontend/src/components/CodeEditor.tsx +++ /dev/null @@ -1,101 +0,0 @@ -'use client'; - -import { useEffect, useRef } from 'react'; -import Editor from '@monaco-editor/react'; - -interface CodeEditorProps { - code: string; - language: string; - onChange?: (value: string) => void; - readOnly?: boolean; -} - -export default function CodeEditor({ code, language, onChange, readOnly = false }: CodeEditorProps) { - const editorRef = useRef(null); - const lastFormattedCodeRef = useRef(''); - const formatTimeoutRef = useRef(null); - - // Map our language names to Monaco language IDs - const getMonacoLanguage = (lang: string): string => { - const languageMap: Record = { - 'html': 'html', - 'gradio': 'python', - 'streamlit': 'python', - 'transformers.js': 'html', // Contains HTML, CSS, and JavaScript - HTML is primary - 'react': 'javascriptreact', // JSX syntax highlighting - 'comfyui': 'json', - }; - return languageMap[lang] || 'plaintext'; - }; - - const handleEditorDidMount = (editor: any) => { - editorRef.current = editor; - }; - - // Format code intelligently - only when generation appears complete - useEffect(() => { - if (editorRef.current && code && code.length > 100) { - // Clear existing timeout - if (formatTimeoutRef.current) { - clearTimeout(formatTimeoutRef.current); - } - - // Only format if code hasn't been formatted yet or if it's different - if (code !== lastFormattedCodeRef.current) { - // Wait 1 second after code stops changing before formatting - formatTimeoutRef.current = setTimeout(() => { - if (editorRef.current) { - editorRef.current.getAction('editor.action.formatDocument')?.run(); - lastFormattedCodeRef.current = code; - } - }, 1000); - } - } - - return () => { - if (formatTimeoutRef.current) { - clearTimeout(formatTimeoutRef.current); - } - }; - }, [code]); - - return ( -
- onChange && onChange(value || '')} - theme="vs-dark" - options={{ - readOnly, - minimap: { enabled: true }, - fontSize: 14, - fontFamily: "'SF Mono', 'JetBrains Mono', 'Menlo', 'Monaco', 'Courier New', monospace", - wordWrap: 'off', - lineNumbers: 'on', - lineNumbersMinChars: 3, - glyphMargin: false, - folding: true, - lineDecorationsWidth: 10, - scrollBeyondLastLine: false, - automaticLayout: true, - tabSize: 2, - insertSpaces: true, - padding: { top: 16, bottom: 16 }, - lineHeight: 22, - letterSpacing: 0.5, - renderLineHighlight: 'line', - formatOnPaste: true, - formatOnType: false, - scrollbar: { - verticalScrollbarSize: 10, - horizontalScrollbarSize: 10, - }, - }} - onMount={handleEditorDidMount} - /> -
- ); -} - diff --git a/frontend/src/components/ControlPanel.tsx b/frontend/src/components/ControlPanel.tsx deleted file mode 100644 index 9070ade98885274f352d8e54d0e3bc43a8c7e5ee..0000000000000000000000000000000000000000 --- a/frontend/src/components/ControlPanel.tsx +++ /dev/null @@ -1,237 +0,0 @@ -'use client'; - -import { useState, useEffect, useRef } from 'react'; -import { apiClient } from '@/lib/api'; -import type { Model, Language } from '@/types'; - -interface ControlPanelProps { - selectedLanguage: Language; - selectedModel: string; - onLanguageChange: (language: Language) => void; - onModelChange: (modelId: string) => void; - onClear: () => void; - isGenerating: boolean; -} - -export default function ControlPanel({ - selectedLanguage, - selectedModel, - onLanguageChange, - onModelChange, - onClear, - isGenerating, -}: ControlPanelProps) { - const [models, setModels] = useState([]); - const [languages, setLanguages] = useState([]); - const [isLoading, setIsLoading] = useState(true); - - // Dropdown states - const [showLanguageDropdown, setShowLanguageDropdown] = useState(false); - const [showModelDropdown, setShowModelDropdown] = useState(false); - const languageDropdownRef = useRef(null); - const modelDropdownRef = useRef(null); - - useEffect(() => { - loadData(); - }, []); - - // Close dropdowns when clicking outside - useEffect(() => { - const handleClickOutside = (event: MouseEvent) => { - if (languageDropdownRef.current && !languageDropdownRef.current.contains(event.target as Node)) { - setShowLanguageDropdown(false); - } - if (modelDropdownRef.current && !modelDropdownRef.current.contains(event.target as Node)) { - setShowModelDropdown(false); - } - }; - - document.addEventListener('mousedown', handleClickOutside); - return () => { - document.removeEventListener('mousedown', handleClickOutside); - }; - }, []); - - const loadData = async () => { - setIsLoading(true); - await Promise.all([loadModels(), loadLanguages()]); - setIsLoading(false); - }; - - const loadModels = async () => { - try { - console.log('Loading models...'); - const modelsList = await apiClient.getModels(); - console.log('Models loaded:', modelsList); - setModels(modelsList); - } catch (error) { - console.error('Failed to load models:', error); - } - }; - - const loadLanguages = async () => { - try { - console.log('Loading languages...'); - const { languages: languagesList } = await apiClient.getLanguages(); - console.log('Languages loaded:', languagesList); - setLanguages(languagesList); - } catch (error) { - console.error('Failed to load languages:', error); - } - }; - - const formatLanguageName = (lang: Language) => { - if (lang === 'html') return 'HTML'; - if (lang === 'transformers.js') return 'Transformers.js'; - if (lang === 'comfyui') return 'ComfyUI'; - return lang.charAt(0).toUpperCase() + lang.slice(1); - }; - - return ( -
- {/* Panel Header */} -
-

Settings

-
- - {/* Content */} -
- - {/* Language Selection */} -
- - - - {/* Language Dropdown Tray */} - {showLanguageDropdown && !isLoading && languages.length > 0 && ( -
-
- {languages.map((lang) => ( - - ))} -
-
- )} -
- - {/* Model Selection */} -
- - - - {/* Model Dropdown Tray */} - {showModelDropdown && models.length > 0 && ( -
-
- {models.map((model) => ( - - ))} -
-
- )} - - {/* Model Description */} - {!isLoading && models.find(m => m.id === selectedModel) && ( -

- {models.find(m => m.id === selectedModel)?.description} -

- )} -
- - {/* Action Buttons */} -
- -
-
-
- ); -} diff --git a/frontend/src/components/Header.tsx b/frontend/src/components/Header.tsx deleted file mode 100644 index 6d44ab976dffc0d628ff83e69ef46788e5d01b2a..0000000000000000000000000000000000000000 --- a/frontend/src/components/Header.tsx +++ /dev/null @@ -1,173 +0,0 @@ -'use client'; - -import { useState, useEffect } from 'react'; -import { - initializeOAuth, - loginWithHuggingFace, - loginDevMode, - logout, - getStoredUserInfo, - isAuthenticated, - isDevelopmentMode -} from '@/lib/auth'; -import { apiClient } from '@/lib/api'; -import type { OAuthUserInfo } from '@/lib/auth'; - -export default function Header() { - const [userInfo, setUserInfo] = useState(null); - const [isLoading, setIsLoading] = useState(true); - const [showDevLogin, setShowDevLogin] = useState(false); - const [devUsername, setDevUsername] = useState(''); - const isDevMode = isDevelopmentMode(); - - useEffect(() => { - handleOAuthInit(); - }, []); - - const handleOAuthInit = async () => { - setIsLoading(true); - try { - const oauthResult = await initializeOAuth(); - - if (oauthResult) { - setUserInfo(oauthResult.userInfo); - // Set token in API client - apiClient.setToken(oauthResult.accessToken); - } else { - // Check if we have stored user info - const storedUserInfo = getStoredUserInfo(); - if (storedUserInfo) { - setUserInfo(storedUserInfo); - } - } - } catch (error) { - console.error('OAuth initialization error:', error); - } finally { - setIsLoading(false); - } - }; - - const handleLogin = async () => { - try { - await loginWithHuggingFace(); - } catch (error) { - console.error('Login failed:', error); - alert('Failed to start login process. Please try again.'); - } - }; - - const handleLogout = () => { - logout(); - apiClient.logout(); - setUserInfo(null); - // Reload page to clear state - window.location.reload(); - }; - - const handleDevLogin = () => { - if (!devUsername.trim()) { - alert('Please enter a username'); - return; - } - - try { - const result = loginDevMode(devUsername); - setUserInfo(result.userInfo); - apiClient.setToken(result.accessToken); - setShowDevLogin(false); - setDevUsername(''); - } catch (error) { - console.error('Dev login failed:', error); - alert('Failed to login in dev mode'); - } - }; - - return ( -
-
-
-

AnyCoder

-
- -
- {isLoading ? ( - Loading... - ) : userInfo ? ( -
- {userInfo.avatarUrl && ( - {userInfo.name} - )} - - {userInfo.preferredUsername || userInfo.name} - - -
- ) : ( -
- {/* Dev Mode Login (only on localhost) */} - {isDevMode && ( - <> - {showDevLogin ? ( -
- setDevUsername(e.target.value)} - onKeyPress={(e) => e.key === 'Enter' && handleDevLogin()} - placeholder="username" - className="px-3 py-1.5 rounded-lg text-sm bg-[#1d1d1f] text-[#f5f5f7] border border-[#424245] focus:outline-none focus:border-white/50 w-32 font-medium" - autoFocus - /> - - -
- ) : ( - - )} - or - - )} - - {/* OAuth Login */} - -
- )} -
-
-
- ); -} - diff --git a/frontend/src/components/LandingPage.tsx b/frontend/src/components/LandingPage.tsx deleted file mode 100644 index 8d7744276c05988bcab4070ae5a9b7e30229a4ad..0000000000000000000000000000000000000000 --- a/frontend/src/components/LandingPage.tsx +++ /dev/null @@ -1,1197 +0,0 @@ -'use client'; - -import { useState, useEffect, useRef } from 'react'; -import Image from 'next/image'; -import { apiClient } from '@/lib/api'; -import { - initializeOAuth, - loginWithHuggingFace, - loginDevMode, - logout, - getStoredUserInfo, - isAuthenticated as checkIsAuthenticated, - isDevelopmentMode -} from '@/lib/auth'; -import type { Model, Language } from '@/types'; -import type { OAuthUserInfo } from '@/lib/auth'; - -interface LandingPageProps { - onStart: (prompt: string, language: Language, modelId: string, imageUrl?: string, repoId?: string, shouldCreatePR?: boolean) => void; - onImport?: (code: string, language: Language, importUrl?: string) => void; - isAuthenticated: boolean; - initialLanguage?: Language; - initialModel?: string; - onAuthChange?: () => void; - setPendingPR?: (pr: { repoId: string; language: Language } | null) => void; - pendingPRRef?: React.MutableRefObject<{ repoId: string; language: Language } | null>; -} - -export default function LandingPage({ - onStart, - onImport, - isAuthenticated, - initialLanguage = 'html', - initialModel = 'zai-org/GLM-4.6', - onAuthChange, - setPendingPR, - pendingPRRef -}: LandingPageProps) { - const [prompt, setPrompt] = useState(''); - const [selectedLanguage, setSelectedLanguage] = useState(initialLanguage); - const [selectedModel, setSelectedModel] = useState(initialModel); - const [models, setModels] = useState([]); - const [languages, setLanguages] = useState([]); - const [isLoading, setIsLoading] = useState(true); - - // Auth states - const [userInfo, setUserInfo] = useState(null); - const [isAuthLoading, setIsAuthLoading] = useState(true); - const [showDevLogin, setShowDevLogin] = useState(false); - const [devUsername, setDevUsername] = useState(''); - const isDevMode = isDevelopmentMode(); - - // Dropdown states - const [showLanguageDropdown, setShowLanguageDropdown] = useState(false); - const [showModelDropdown, setShowModelDropdown] = useState(false); - const [showImportDialog, setShowImportDialog] = useState(false); - const [showRedesignDialog, setShowRedesignDialog] = useState(false); - const languageDropdownRef = useRef(null); - const modelDropdownRef = useRef(null); - const importDialogRef = useRef(null); - const redesignDialogRef = useRef(null); - - // Trending apps state - const [trendingApps, setTrendingApps] = useState([]); - - // Import project state - const [importUrl, setImportUrl] = useState(''); - const [isImporting, setIsImporting] = useState(false); - const [importError, setImportError] = useState(''); - const [importAction, setImportAction] = useState<'duplicate' | 'update' | 'pr'>('duplicate'); // Default to duplicate - const [isSpaceOwner, setIsSpaceOwner] = useState(false); // Track if user owns the space - - // Redesign project state - const [redesignUrl, setRedesignUrl] = useState(''); - const [isRedesigning, setIsRedesigning] = useState(false); - const [redesignError, setRedesignError] = useState(''); - const [createPR, setCreatePR] = useState(false); // Default to normal redesign (not PR) - - // Image upload state - const [uploadedImageUrl, setUploadedImageUrl] = useState(null); - const fileInputRef = useRef(null); - - // Debug effect for dropdown state - useEffect(() => { - console.log('showModelDropdown state changed to:', showModelDropdown); - }, [showModelDropdown]); - - // Debug effect for models state - useEffect(() => { - console.log('models state changed, length:', models.length, 'models:', models); - }, [models]); - - useEffect(() => { - console.log('Component mounted, initial load starting...'); - loadData(); - handleOAuthInit(); - loadTrendingApps(); - // Check auth status periodically to catch OAuth redirects - const interval = setInterval(() => { - const authenticated = checkIsAuthenticated(); - if (authenticated && !userInfo) { - handleOAuthInit(); - } - }, 1000); - return () => clearInterval(interval); - }, []); - - const handleOAuthInit = async () => { - setIsAuthLoading(true); - try { - const oauthResult = await initializeOAuth(); - - if (oauthResult) { - setUserInfo(oauthResult.userInfo); - apiClient.setToken(oauthResult.accessToken); - if (onAuthChange) onAuthChange(); - } else { - const storedUserInfo = getStoredUserInfo(); - if (storedUserInfo) { - setUserInfo(storedUserInfo); - } - } - } catch (error) { - console.error('OAuth initialization error:', error); - } finally { - setIsAuthLoading(false); - } - }; - - const handleLogin = async () => { - try { - await loginWithHuggingFace(); - } catch (error) { - console.error('Login failed:', error); - alert('Failed to start login process. Please try again.'); - } - }; - - const handleLogout = () => { - logout(); - apiClient.logout(); - setUserInfo(null); - if (onAuthChange) onAuthChange(); - window.location.reload(); - }; - - const handleDevLogin = () => { - if (!devUsername.trim()) { - alert('Please enter a username'); - return; - } - - try { - const result = loginDevMode(devUsername); - setUserInfo(result.userInfo); - apiClient.setToken(result.accessToken); - setShowDevLogin(false); - setDevUsername(''); - if (onAuthChange) onAuthChange(); - } catch (error) { - console.error('Dev login failed:', error); - alert('Failed to login in dev mode'); - } - }; - - // Close dropdowns when clicking outside - useEffect(() => { - const handleClickOutside = (event: MouseEvent) => { - if (languageDropdownRef.current && !languageDropdownRef.current.contains(event.target as Node)) { - setShowLanguageDropdown(false); - } - if (modelDropdownRef.current && !modelDropdownRef.current.contains(event.target as Node)) { - setShowModelDropdown(false); - } - if (importDialogRef.current && !importDialogRef.current.contains(event.target as Node)) { - setShowImportDialog(false); - } - if (redesignDialogRef.current && !redesignDialogRef.current.contains(event.target as Node)) { - setShowRedesignDialog(false); - } - }; - - document.addEventListener('mousedown', handleClickOutside); - return () => { - document.removeEventListener('mousedown', handleClickOutside); - }; - }, []); - - const loadData = async () => { - console.log('loadData called'); - setIsLoading(true); - await Promise.all([loadModels(), loadLanguages()]); - setIsLoading(false); - console.log('loadData completed'); - }; - - const loadModels = async () => { - try { - console.log('Loading models...'); - const modelsList = await apiClient.getModels(); - console.log('Models loaded successfully:', modelsList); - console.log('Number of models:', modelsList.length); - setModels(modelsList); - console.log('Models state updated'); - } catch (error) { - console.error('Failed to load models:', error); - setModels([]); // Set empty array on error - } - }; - - const loadLanguages = async () => { - try { - const { languages: languagesList } = await apiClient.getLanguages(); - setLanguages(languagesList); - } catch (error) { - console.error('Failed to load languages:', error); - } - }; - - // Check if current model supports images - // Show immediately for GLM-4.6V even before models load - const currentModelSupportsImages = - selectedModel === 'zai-org/GLM-4.6V:zai-org' || - models.find(m => m.id === selectedModel)?.supports_images || - false; - - // Debug logging - useEffect(() => { - console.log('[LandingPage] Selected model:', selectedModel); - console.log('[LandingPage] Models loaded:', models.length); - console.log('[LandingPage] Supports images:', currentModelSupportsImages); - }, [selectedModel, models, currentModelSupportsImages]); - - const loadTrendingApps = async () => { - try { - const apps = await apiClient.getTrendingAnycoderApps(); - setTrendingApps(apps); - } catch (error) { - console.error('Failed to load trending apps:', error); - } - }; - - const handleSubmit = (e: React.FormEvent) => { - e.preventDefault(); - if (prompt.trim() && isAuthenticated) { - console.log('[LandingPage Submit] Sending with image:', uploadedImageUrl ? 'Yes' : 'No'); - console.log('[LandingPage Submit] Image URL length:', uploadedImageUrl?.length || 0); - onStart(prompt.trim(), selectedLanguage, selectedModel, uploadedImageUrl || undefined); - // Clear prompt and image after sending - setPrompt(''); - setUploadedImageUrl(null); - } else if (!isAuthenticated) { - alert('Please sign in with HuggingFace first!'); - } - }; - - const handleImageUpload = (e: React.ChangeEvent) => { - const file = e.target.files?.[0]; - if (file) { - const reader = new FileReader(); - reader.onload = (event) => { - const imageUrl = event.target?.result as string; - setUploadedImageUrl(imageUrl); - }; - reader.readAsDataURL(file); - } - }; - - const removeImage = () => { - setUploadedImageUrl(null); - if (fileInputRef.current) { - fileInputRef.current.value = ''; - } - }; - - const formatLanguageName = (lang: Language) => { - if (lang === 'html') return 'HTML'; - if (lang === 'transformers.js') return 'Transformers.js'; - if (lang === 'comfyui') return 'ComfyUI'; - return lang.charAt(0).toUpperCase() + lang.slice(1); - }; - - // Check if user owns the imported space - const checkSpaceOwnership = (url: string) => { - if (!url || !userInfo?.preferred_username) { - setIsSpaceOwner(false); - return; - } - - const spaceMatch = url.match(/huggingface\.co\/spaces\/([^\/\s\)]+)\/[^\/\s\)]+/); - if (spaceMatch) { - const spaceOwner = spaceMatch[1]; - const isOwner = spaceOwner === userInfo.preferred_username; - setIsSpaceOwner(isOwner); - console.log('[Import] Space owner:', spaceOwner, '| Current user:', userInfo.preferred_username, '| Is owner:', isOwner); - - // Auto-select update mode if owner, otherwise duplicate - if (isOwner) { - setImportAction('update'); - } else { - setImportAction('duplicate'); - } - } else { - setIsSpaceOwner(false); - } - }; - - const handleImportProject = async () => { - if (!importUrl.trim()) { - setImportError('Please enter a valid URL'); - return; - } - - if (!isAuthenticated) { - alert('Please sign in with HuggingFace first!'); - return; - } - - setIsImporting(true); - setImportError(''); - - try { - console.log('[Import] ========== STARTING IMPORT =========='); - console.log('[Import] Import URL:', importUrl); - console.log('[Import] Action:', importAction); - - // Extract space ID from URL - const spaceMatch = importUrl.match(/huggingface\.co\/spaces\/([^\/\s\)]+\/[^\/\s\)]+)/); - console.log('[Import] Space regex match result:', spaceMatch); - - if (spaceMatch) { - const fromSpaceId = spaceMatch[1]; - console.log('[Import] ✅ Detected HF Space:', fromSpaceId); - - // Import the code first (always needed to load in editor) - const importResult = await apiClient.importProject(importUrl); - - if (importResult.status !== 'success') { - setImportError(importResult.message || 'Failed to import project'); - setIsImporting(false); - return; - } - - // Handle different import actions - if (importAction === 'update' && isSpaceOwner) { - // Option 1: Update existing space directly (for owners) - console.log('[Import] Owner update - loading code for direct update to:', fromSpaceId); - - if (onImport && importResult.code) { - // Pass the original space URL so future deployments update it - onImport(importResult.code, importResult.language || 'html', importUrl); - - alert(`✅ Code loaded!\n\nYou can now make changes and deploy them directly to: ${importUrl}\n\nThe code has been loaded in the editor.`); - } - - setShowImportDialog(false); - setImportUrl(''); - - } else if (importAction === 'pr') { - // Option 2: Create Pull Request - console.log('[Import] PR mode - loading code to create PR to:', fromSpaceId); - - if (onImport && importResult.code) { - // Load code in editor with the original space for PR tracking - onImport(importResult.code, importResult.language || 'html', importUrl); - - // Set pending PR state so any future code generation creates a PR - if (setPendingPR && pendingPRRef) { - const prInfo = { repoId: fromSpaceId, language: (importResult.language || 'html') as Language }; - setPendingPR(prInfo); - pendingPRRef.current = prInfo; - console.log('[Import PR] Set pending PR:', prInfo); - } - - // Show success message - alert(`✅ Code loaded in PR mode!\n\nYou can now:\n• Make manual edits in the editor\n• Generate new features with AI\n\nWhen you deploy, a Pull Request will be created to: ${fromSpaceId}`); - } - - setShowImportDialog(false); - setImportUrl(''); - - } else { - // Option 3: Duplicate space (default) - console.log('[Import] Duplicate mode - will duplicate:', fromSpaceId); - - const duplicateResult = await apiClient.duplicateSpace(fromSpaceId); - console.log('[Import] Duplicate API response:', duplicateResult); - - if (duplicateResult.success) { - console.log('[Import] ========== DUPLICATE SUCCESS =========='); - console.log('[Import] Duplicated space URL:', duplicateResult.space_url); - console.log('[Import] Duplicated space ID:', duplicateResult.space_id); - console.log('[Import] =========================================='); - - if (onImport && importResult.code) { - console.log('[Import] Calling onImport with duplicated space URL:', duplicateResult.space_url); - // Pass the duplicated space URL so it's tracked for future deployments - onImport(importResult.code, importResult.language || 'html', duplicateResult.space_url); - - // Show success message with link to duplicated space - alert(`✅ Space duplicated successfully!\n\nYour space: ${duplicateResult.space_url}\n\nThe code has been loaded in the editor. Any changes you deploy will update this duplicated space.`); - } - - setShowImportDialog(false); - setImportUrl(''); - } else { - setImportError(duplicateResult.message || 'Failed to duplicate space'); - } - } - } else { - // Not a Space URL - fall back to regular import - console.log('[Import] ❌ Not a HF Space URL - using regular import'); - const result = await apiClient.importProject(importUrl); - - if (result.status === 'success') { - if (onImport && result.code) { - onImport(result.code, result.language || 'html', importUrl); - } else { - const importMessage = `Imported from ${importUrl}`; - onStart(importMessage, result.language || 'html', selectedModel, undefined); - } - - setShowImportDialog(false); - setImportUrl(''); - } else { - setImportError(result.message || 'Failed to import project'); - } - } - } catch (error: any) { - console.error('Import error:', error); - setImportError(error.response?.data?.message || error.message || 'Failed to import project'); - } finally { - setIsImporting(false); - } - }; - - const handleRedesignProject = async () => { - if (!redesignUrl.trim()) { - setRedesignError('Please enter a valid URL'); - return; - } - - if (!isAuthenticated) { - alert('Please sign in with HuggingFace first!'); - return; - } - - setIsRedesigning(true); - setRedesignError(''); - - try { - // Extract space ID from URL - const spaceMatch = redesignUrl.match(/huggingface\.co\/spaces\/([^\/\s\)]+\/[^\/\s\)]+)/); - const repoId = spaceMatch ? spaceMatch[1] : null; - - if (!repoId) { - setRedesignError('Please enter a valid HuggingFace Space URL'); - setIsRedesigning(false); - return; - } - - // Import the code first - const result = await apiClient.importProject(redesignUrl); - - if (result.status !== 'success') { - setRedesignError(result.message || 'Failed to import project for redesign'); - setIsRedesigning(false); - return; - } - - if (!createPR) { - // Option 1: Redesign WITHOUT PR - Duplicate space first, then generate redesign - console.log('[Redesign] Duplicating space first:', repoId); - - try { - const duplicateResult = await apiClient.duplicateSpace(repoId); - console.log('[Redesign] Duplicate result:', duplicateResult); - - if (!duplicateResult.success) { - setRedesignError(duplicateResult.message || 'Failed to duplicate space'); - setIsRedesigning(false); - return; - } - - // Load code and trigger redesign - if (onImport && onStart) { - // Pass duplicated space URL - onImport(result.code, result.language || 'html', duplicateResult.space_url); - - // Extract duplicated space ID to pass to generation - const dupSpaceMatch = duplicateResult.space_url?.match(/huggingface\.co\/spaces\/([^\/\s\)]+\/[^\/\s\)]+)/); - const duplicatedRepoId = dupSpaceMatch ? dupSpaceMatch[1] : undefined; - - console.log('[Redesign] Duplicated space ID:', duplicatedRepoId); - - setTimeout(() => { - const isGradio = (result.language || 'html') === 'gradio'; - const redesignPrompt = `I have existing code in the editor from a duplicated space. Please redesign it to make it look better with minimal components needed, mobile friendly, and modern design. - -Current code: -\`\`\`${result.language || 'html'} -${result.code} -\`\`\` - -Please redesign this with: -- Minimal, clean components -- Mobile-first responsive design -- Modern UI/UX best practices -- Better visual hierarchy and spacing - -${isGradio ? '\n\nIMPORTANT: Only output app.py with the redesigned UI (themes, layout, styling). Do NOT modify or output any other .py files (utils.py, models.py, etc.). Do NOT include requirements.txt or README.md.' : ''}`; - - if (onStart) { - // Pass duplicated space ID so auto-deploy updates it - console.log('[Redesign] Calling onStart with duplicated repo ID:', duplicatedRepoId); - console.log('[Redesign] Using GLM-4.6 for redesign'); - onStart(redesignPrompt, result.language || 'html', 'zai-org/GLM-4.6', undefined, duplicatedRepoId); - } - }, 100); - - // Show success message - alert(`✅ Space duplicated!\n\nYour space: ${duplicateResult.space_url}\n\nGenerating redesign now...`); - } - - setShowRedesignDialog(false); - setRedesignUrl(''); - - } catch (dupError: any) { - console.error('[Redesign] Duplication error:', dupError); - setRedesignError(dupError.response?.data?.message || dupError.message || 'Failed to duplicate space'); - setIsRedesigning(false); - return; - } - - } else { - // Option 2: Redesign WITH PR - Import code and generate, then create PR - if (onImport && onStart) { - onImport(result.code, result.language || 'html', redesignUrl); - - setTimeout(() => { - const isGradio = (result.language || 'html') === 'gradio'; - const redesignPrompt = `I have existing code in the editor that I imported from ${redesignUrl}. Please redesign it to make it look better with minimal components needed, mobile friendly, and modern design. - -Current code: -\`\`\`${result.language || 'html'} -${result.code} -\`\`\` - -Please redesign this with: -- Minimal, clean components -- Mobile-first responsive design -- Modern UI/UX best practices -- Better visual hierarchy and spacing - -${isGradio ? '\n\nIMPORTANT: Only output app.py with the redesigned UI (themes, layout, styling). Do NOT modify or output any other .py files (utils.py, models.py, etc.). Do NOT include requirements.txt or README.md.' : ''} - -Note: After generating the redesign, I will create a Pull Request on the original space.`; - - if (onStart) { - console.log('[Redesign] Will create PR - not passing repo ID'); - console.log('[Redesign] Using GLM-4.6 for redesign'); - onStart(redesignPrompt, result.language || 'html', 'zai-org/GLM-4.6', undefined, repoId, true); // Pass true for shouldCreatePR - } - - console.log('[Redesign] Will create PR after code generation completes'); - }, 100); - - setShowRedesignDialog(false); - setRedesignUrl(''); - } else { - setRedesignError('Missing required callbacks. Please try again.'); - } - } - } catch (error: any) { - console.error('Redesign error:', error); - setRedesignError(error.response?.data?.message || error.message || 'Failed to process redesign request'); - } finally { - setIsRedesigning(false); - } - }; - - return ( -
- {/* Header - Apple style */} -
- - AnyCoder - - - {/* Auth Section */} -
- {isAuthLoading ? ( - Loading... - ) : userInfo ? ( -
- {userInfo.avatarUrl && ( - {userInfo.name} - )} - - {userInfo.preferredUsername || userInfo.name} - - -
- ) : ( -
- {/* Dev Mode Login (only on localhost) */} - {isDevMode && ( - <> - {showDevLogin ? ( -
- setDevUsername(e.target.value)} - onKeyPress={(e) => e.key === 'Enter' && handleDevLogin()} - placeholder="username" - className="px-3 py-1.5 rounded-lg text-sm bg-[#1d1d1f] text-[#f5f5f7] border border-[#424245] focus:outline-none focus:border-white/50 w-32 font-medium" - autoFocus - /> - - -
- ) : ( - - )} - or - - )} - - {/* OAuth Login */} - -
- )} -
-
- - {/* Main Content - Apple-style centered layout */} -
-
- {/* Apple-style Headline */} -
-

- Build with AnyCoder -

-

- Create apps with AI -

-
- - {/* Simple prompt form */} -
-
- {/* Image Preview */} - {uploadedImageUrl && ( -
-
- Upload preview - -
-
- )} - - {/* Textarea */} -