diff --git a/.cursorrules b/.cursorrules deleted file mode 100644 index 8fbe6def025d95d15c47f657eafbbbf0643a5ca5..0000000000000000000000000000000000000000 --- a/.cursorrules +++ /dev/null @@ -1,240 +0,0 @@ -# DeepCritical Project - Cursor Rules - -## Project-Wide Rules - -**Architecture**: Multi-agent research system using Pydantic AI for agent orchestration, supporting iterative and deep research patterns. Uses middleware for state management, budget tracking, and workflow coordination. - -**Type Safety**: ALWAYS use complete type hints. All functions must have parameter and return type annotations. Use `mypy --strict` compliance. Use `TYPE_CHECKING` imports for circular dependencies: `from typing import TYPE_CHECKING; if TYPE_CHECKING: from src.services.embeddings import EmbeddingService` - -**Async Patterns**: ALL I/O operations must be async (`async def`, `await`). Use `asyncio.gather()` for parallel operations. CPU-bound work must use `run_in_executor()`: `loop = asyncio.get_running_loop(); result = await loop.run_in_executor(None, cpu_bound_function, args)`. Never block the event loop. - -**Error Handling**: Use custom exceptions from `src/utils/exceptions.py`: `DeepCriticalError`, `SearchError`, `RateLimitError`, `JudgeError`, `ConfigurationError`. Always chain exceptions: `raise SearchError(...) from e`. Log with structlog: `logger.error("Operation failed", error=str(e), context=value)`. - -**Logging**: Use `structlog` for ALL logging (NOT `print` or `logging`). Import: `import structlog; logger = structlog.get_logger()`. Log with structured data: `logger.info("event", key=value)`. Use appropriate levels: DEBUG, INFO, WARNING, ERROR. - -**Pydantic Models**: All data exchange uses Pydantic models from `src/utils/models.py`. Models are frozen (`model_config = {"frozen": True}`) for immutability. Use `Field()` with descriptions. Validate with `ge=`, `le=`, `min_length=`, `max_length=` constraints. - -**Code Style**: Ruff with 100-char line length. Ignore rules: `PLR0913` (too many arguments), `PLR0912` (too many branches), `PLR0911` (too many returns), `PLR2004` (magic values), `PLW0603` (global statement), `PLC0415` (lazy imports). - -**Docstrings**: Google-style docstrings for all public functions. Include Args, Returns, Raises sections. Use type hints in docstrings only if needed for clarity. - -**Testing**: Unit tests in `tests/unit/` (mocked, fast). Integration tests in `tests/integration/` (real APIs, marked `@pytest.mark.integration`). Use `respx` for httpx mocking, `pytest-mock` for general mocking. - -**State Management**: Use `ContextVar` in middleware for thread-safe isolation. Never use global mutable state (except singletons via `@lru_cache`). Use `WorkflowState` from `src/middleware/state_machine.py` for workflow state. - -**Citation Validation**: ALWAYS validate references before returning reports. Use `validate_references()` from `src/utils/citation_validator.py`. Remove hallucinated citations. Log warnings for removed citations. - ---- - -## src/agents/ - Agent Implementation Rules - -**Pattern**: All agents use Pydantic AI `Agent` class. Agents have structured output types (Pydantic models) or return strings. Use factory functions in `src/agent_factory/agents.py` for creation. - -**Agent Structure**: -- System prompt as module-level constant (with date injection: `datetime.now().strftime("%Y-%m-%d")`) -- Agent class with `__init__(model: Any | None = None)` -- Main method (e.g., `async def evaluate()`, `async def write_report()`) -- Factory function: `def create_agent_name(model: Any | None = None) -> AgentName` - -**Model Initialization**: Use `get_model()` from `src/agent_factory/judges.py` if no model provided. Support OpenAI/Anthropic/HF Inference via settings. - -**Error Handling**: Return fallback values (e.g., `KnowledgeGapOutput(research_complete=False, outstanding_gaps=[...])`) on failure. Log errors with context. Use retry logic (3 retries) in Pydantic AI Agent initialization. - -**Input Validation**: Validate query/inputs are not empty. Truncate very long inputs with warnings. Handle None values gracefully. - -**Output Types**: Use structured output types from `src/utils/models.py` (e.g., `KnowledgeGapOutput`, `AgentSelectionPlan`, `ReportDraft`). For text output (writer agents), return `str` directly. - -**Agent-Specific Rules**: -- `knowledge_gap.py`: Outputs `KnowledgeGapOutput`. Evaluates research completeness. -- `tool_selector.py`: Outputs `AgentSelectionPlan`. Selects tools (RAG/web/database). -- `writer.py`: Returns markdown string. Includes citations in numbered format. -- `long_writer.py`: Uses `ReportDraft` input/output. Handles section-by-section writing. -- `proofreader.py`: Takes `ReportDraft`, returns polished markdown. -- `thinking.py`: Returns observation string from conversation history. -- `input_parser.py`: Outputs `ParsedQuery` with research mode detection. - ---- - -## src/tools/ - Search Tool Rules - -**Protocol**: All tools implement `SearchTool` protocol from `src/tools/base.py`: `name` property and `async def search(query, max_results) -> list[Evidence]`. - -**Rate Limiting**: Use `@retry` decorator from tenacity: `@retry(stop=stop_after_attempt(3), wait=wait_exponential(...))`. Implement `_rate_limit()` method for APIs with limits. Use shared rate limiters from `src/tools/rate_limiter.py`. - -**Error Handling**: Raise `SearchError` or `RateLimitError` on failures. Handle HTTP errors (429, 500, timeout). Return empty list on non-critical errors (log warning). - -**Query Preprocessing**: Use `preprocess_query()` from `src/tools/query_utils.py` to remove noise and expand synonyms. - -**Evidence Conversion**: Convert API responses to `Evidence` objects with `Citation`. Extract metadata (title, url, date, authors). Set relevance scores (0.0-1.0). Handle missing fields gracefully. - -**Tool-Specific Rules**: -- `pubmed.py`: Use NCBI E-utilities (ESearch → EFetch). Rate limit: 0.34s between requests. Parse XML with `xmltodict`. Handle single vs. multiple articles. -- `clinicaltrials.py`: Use `requests` library (NOT httpx - WAF blocks httpx). Run in thread pool: `await asyncio.to_thread(requests.get, ...)`. Filter: Only interventional studies, active/completed. -- `europepmc.py`: Handle preprint markers: `[PREPRINT - Not peer-reviewed]`. Build URLs from DOI or PMID. -- `rag_tool.py`: Wraps `LlamaIndexRAGService`. Returns Evidence from RAG results. Handles ingestion. -- `search_handler.py`: Orchestrates parallel searches across multiple tools. Uses `asyncio.gather()` with `return_exceptions=True`. Aggregates results into `SearchResult`. - ---- - -## src/middleware/ - Middleware Rules - -**State Management**: Use `ContextVar` for thread-safe isolation. `WorkflowState` uses `ContextVar[WorkflowState | None]`. Initialize with `init_workflow_state(embedding_service)`. Access with `get_workflow_state()` (auto-initializes if missing). - -**WorkflowState**: Tracks `evidence: list[Evidence]`, `conversation: Conversation`, `embedding_service: Any`. Methods: `add_evidence()` (deduplicates by URL), `async search_related()` (semantic search). - -**WorkflowManager**: Manages parallel research loops. Methods: `add_loop()`, `run_loops_parallel()`, `update_loop_status()`, `sync_loop_evidence_to_state()`. Uses `asyncio.gather()` for parallel execution. Handles errors per loop (don't fail all if one fails). - -**BudgetTracker**: Tracks tokens, time, iterations per loop and globally. Methods: `create_budget()`, `add_tokens()`, `start_timer()`, `update_timer()`, `increment_iteration()`, `check_budget()`, `can_continue()`. Token estimation: `estimate_tokens(text)` (~4 chars per token), `estimate_llm_call_tokens(prompt, response)`. - -**Models**: All middleware models in `src/utils/models.py`. `IterationData`, `Conversation`, `ResearchLoop`, `BudgetStatus` are used by middleware. - ---- - -## src/orchestrator/ - Orchestration Rules - -**Research Flows**: Two patterns: `IterativeResearchFlow` (single loop) and `DeepResearchFlow` (plan → parallel loops → synthesis). Both support agent chains (`use_graph=False`) and graph execution (`use_graph=True`). - -**IterativeResearchFlow**: Pattern: Generate observations → Evaluate gaps → Select tools → Execute → Judge → Continue/Complete. Uses `KnowledgeGapAgent`, `ToolSelectorAgent`, `ThinkingAgent`, `WriterAgent`, `JudgeHandler`. Tracks iterations, time, budget. - -**DeepResearchFlow**: Pattern: Planner → Parallel iterative loops per section → Synthesizer. Uses `PlannerAgent`, `IterativeResearchFlow` (per section), `LongWriterAgent` or `ProofreaderAgent`. Uses `WorkflowManager` for parallel execution. - -**Graph Orchestrator**: Uses Pydantic AI Graphs (when available) or agent chains (fallback). Routes based on research mode (iterative/deep/auto). Streams `AgentEvent` objects for UI. - -**State Initialization**: Always call `init_workflow_state()` before running flows. Initialize `BudgetTracker` per loop. Use `WorkflowManager` for parallel coordination. - -**Event Streaming**: Yield `AgentEvent` objects during execution. Event types: "started", "search_complete", "judge_complete", "hypothesizing", "synthesizing", "complete", "error". Include iteration numbers and data payloads. - ---- - -## src/services/ - Service Rules - -**EmbeddingService**: Local sentence-transformers (NO API key required). All operations async-safe via `run_in_executor()`. ChromaDB for vector storage. Deduplication threshold: 0.85 (85% similarity = duplicate). - -**LlamaIndexRAGService**: Uses OpenAI embeddings (requires `OPENAI_API_KEY`). Methods: `ingest_evidence()`, `retrieve()`, `query()`. Returns documents with metadata (source, title, url, date, authors). Lazy initialization with graceful fallback. - -**StatisticalAnalyzer**: Generates Python code via LLM. Executes in Modal sandbox (secure, isolated). Library versions pinned in `SANDBOX_LIBRARIES` dict. Returns `AnalysisResult` with verdict (SUPPORTED/REFUTED/INCONCLUSIVE). - -**Singleton Pattern**: Use `@lru_cache(maxsize=1)` for singletons: `@lru_cache(maxsize=1); def get_service() -> Service: return Service()`. Lazy initialization to avoid requiring dependencies at import time. - ---- - -## src/utils/ - Utility Rules - -**Models**: All Pydantic models in `src/utils/models.py`. Use frozen models (`model_config = {"frozen": True}`) except where mutation needed. Use `Field()` with descriptions. Validate with constraints. - -**Config**: Settings via Pydantic Settings (`src/utils/config.py`). Load from `.env` automatically. Use `settings` singleton: `from src.utils.config import settings`. Validate API keys with properties: `has_openai_key`, `has_anthropic_key`. - -**Exceptions**: Custom exception hierarchy in `src/utils/exceptions.py`. Base: `DeepCriticalError`. Specific: `SearchError`, `RateLimitError`, `JudgeError`, `ConfigurationError`. Always chain exceptions. - -**LLM Factory**: Centralized LLM model creation in `src/utils/llm_factory.py`. Supports OpenAI, Anthropic, HF Inference. Use `get_model()` or factory functions. Check requirements before initialization. - -**Citation Validator**: Use `validate_references()` from `src/utils/citation_validator.py`. Removes hallucinated citations (URLs not in evidence). Logs warnings. Returns validated report string. - ---- - -## src/orchestrator_factory.py Rules - -**Purpose**: Factory for creating orchestrators. Supports "simple" (legacy) and "advanced" (magentic) modes. Auto-detects mode based on API key availability. - -**Pattern**: Lazy import for optional dependencies (`_get_magentic_orchestrator_class()`). Handles `ImportError` gracefully with clear error messages. - -**Mode Detection**: `_determine_mode()` checks explicit mode or auto-detects: "advanced" if `settings.has_openai_key`, else "simple". Maps "magentic" → "advanced". - -**Function Signature**: `create_orchestrator(search_handler, judge_handler, config, mode) -> Any`. Simple mode requires handlers. Advanced mode uses MagenticOrchestrator. - -**Error Handling**: Raise `ValueError` with clear messages if requirements not met. Log mode selection with structlog. - ---- - -## src/orchestrator_hierarchical.py Rules - -**Purpose**: Hierarchical orchestrator using middleware and sub-teams. Adapts Magentic ChatAgent to SubIterationTeam protocol. - -**Pattern**: Uses `SubIterationMiddleware` with `ResearchTeam` and `LLMSubIterationJudge`. Event-driven via callback queue. - -**State Initialization**: Initialize embedding service with graceful fallback. Use `init_magentic_state()` (deprecated, but kept for compatibility). - -**Event Streaming**: Uses `asyncio.Queue` for event coordination. Yields `AgentEvent` objects. Handles event callback pattern with `asyncio.wait()`. - -**Error Handling**: Log errors with context. Yield error events. Process remaining events after task completion. - ---- - -## src/orchestrator_magentic.py Rules - -**Purpose**: Magentic-based orchestrator using ChatAgent pattern. Each agent has internal LLM. Manager orchestrates agents. - -**Pattern**: Uses `MagenticBuilder` with participants (searcher, hypothesizer, judge, reporter). Manager uses `OpenAIChatClient`. Workflow built in `_build_workflow()`. - -**Event Processing**: `_process_event()` converts Magentic events to `AgentEvent`. Handles: `MagenticOrchestratorMessageEvent`, `MagenticAgentMessageEvent`, `MagenticFinalResultEvent`, `MagenticAgentDeltaEvent`, `WorkflowOutputEvent`. - -**Text Extraction**: `_extract_text()` defensively extracts text from messages. Priority: `.content` → `.text` → `str(message)`. Handles buggy message objects. - -**State Initialization**: Initialize embedding service with graceful fallback. Use `init_magentic_state()` (deprecated). - -**Requirements**: Must call `check_magentic_requirements()` in `__init__`. Requires `agent-framework-core` and OpenAI API key. - -**Event Types**: Maps agent names to event types: "search" → "search_complete", "judge" → "judge_complete", "hypothes" → "hypothesizing", "report" → "synthesizing". - ---- - -## src/agent_factory/ - Factory Rules - -**Pattern**: Factory functions for creating agents and handlers. Lazy initialization for optional dependencies. Support OpenAI/Anthropic/HF Inference. - -**Judges**: `create_judge_handler()` creates `JudgeHandler` with structured output (`JudgeAssessment`). Supports `MockJudgeHandler`, `HFInferenceJudgeHandler` as fallbacks. - -**Agents**: Factory functions in `agents.py` for all Pydantic AI agents. Pattern: `create_agent_name(model: Any | None = None) -> AgentName`. Use `get_model()` if model not provided. - -**Graph Builder**: `graph_builder.py` contains utilities for building research graphs. Supports iterative and deep research graph construction. - -**Error Handling**: Raise `ConfigurationError` if required API keys missing. Log agent creation. Handle import errors gracefully. - ---- - -## src/prompts/ - Prompt Rules - -**Pattern**: System prompts stored as module-level constants. Include date injection: `datetime.now().strftime("%Y-%m-%d")`. Format evidence with truncation (1500 chars per item). - -**Judge Prompts**: In `judge.py`. Handle empty evidence case separately. Always request structured JSON output. - -**Hypothesis Prompts**: In `hypothesis.py`. Use diverse evidence selection (MMR algorithm). Sentence-aware truncation. - -**Report Prompts**: In `report.py`. Include full citation details. Use diverse evidence selection (n=20). Emphasize citation validation rules. - ---- - -## Testing Rules - -**Structure**: Unit tests in `tests/unit/` (mocked, fast). Integration tests in `tests/integration/` (real APIs, marked `@pytest.mark.integration`). - -**Mocking**: Use `respx` for httpx mocking. Use `pytest-mock` for general mocking. Mock LLM calls in unit tests (use `MockJudgeHandler`). - -**Fixtures**: Common fixtures in `tests/conftest.py`: `mock_httpx_client`, `mock_llm_response`. - -**Coverage**: Aim for >80% coverage. Test error handling, edge cases, and integration paths. - ---- - -## File-Specific Agent Rules - -**knowledge_gap.py**: Outputs `KnowledgeGapOutput`. System prompt evaluates research completeness. Handles conversation history. Returns fallback on error. - -**writer.py**: Returns markdown string. System prompt includes citation format examples. Validates inputs. Truncates long findings. Retry logic for transient failures. - -**long_writer.py**: Uses `ReportDraft` input/output. Writes sections iteratively. Reformats references (deduplicates, renumbers). Reformats section headings. - -**proofreader.py**: Takes `ReportDraft`, returns polished markdown. Removes duplicates. Adds summary. Preserves references. - -**tool_selector.py**: Outputs `AgentSelectionPlan`. System prompt lists available agents (WebSearchAgent, SiteCrawlerAgent, RAGAgent). Guidelines for when to use each. - -**thinking.py**: Returns observation string. Generates observations from conversation history. Uses query and background context. - -**input_parser.py**: Outputs `ParsedQuery`. Detects research mode (iterative/deep). Extracts entities and research questions. Improves/refines query. - - - - - - - diff --git a/.env.example b/.env.example deleted file mode 100644 index 442ff75d33f92422e78850b3c9d6d49af6f1d6e3..0000000000000000000000000000000000000000 --- a/.env.example +++ /dev/null @@ -1,107 +0,0 @@ -# HuggingFace -HF_TOKEN=your_huggingface_token_here - -# OpenAI (optional) -OPENAI_API_KEY=your_openai_key_here - -# Anthropic (optional) -ANTHROPIC_API_KEY=your_anthropic_key_here - -# Model names (optional - sensible defaults set in config.py) -# ANTHROPIC_MODEL=claude-sonnet-4-5-20250929 -# OPENAI_MODEL=gpt-5.1 - - -# ============================================ -# Audio Processing Configuration (TTS) -# ============================================ -# Kokoro TTS Model Configuration -TTS_MODEL=hexgrad/Kokoro-82M -TTS_VOICE=af_heart -TTS_SPEED=1.0 -TTS_GPU=T4 -TTS_TIMEOUT=60 - -# Available TTS Voices: -# American English Female: af_heart, af_bella, af_nicole, af_aoede, af_kore, af_sarah, af_nova, af_sky, af_alloy, af_jessica, af_river -# American English Male: am_michael, am_fenrir, am_puck, am_echo, am_eric, am_liam, am_onyx, am_santa, am_adam - -# Available GPU Types (Modal): -# T4 - Cheapest, good for testing (default) -# A10 - Good balance of cost/performance -# A100 - Fastest, most expensive -# L4 - NVIDIA L4 GPU -# L40S - NVIDIA L40S GPU -# Note: GPU type is set at function definition time. Changes require app restart. - -# ============================================ -# Audio Processing Configuration (STT) -# ============================================ -# Speech-to-Text API Configuration -STT_API_URL=nvidia/canary-1b-v2 -STT_SOURCE_LANG=English -STT_TARGET_LANG=English - -# Available STT Languages: -# English, Bulgarian, Croatian, Czech, Danish, Dutch, Estonian, Finnish, French, German, Greek, Hungarian, Italian, Latvian, Lithuanian, Maltese, Polish, Portuguese, Romanian, Slovak, Slovenian, Spanish, Swedish, Russian, Ukrainian - -# ============================================ -# Audio Feature Flags -# ============================================ -ENABLE_AUDIO_INPUT=true -ENABLE_AUDIO_OUTPUT=true - -# ============================================ -# Image OCR Configuration -# ============================================ -OCR_API_URL=prithivMLmods/Multimodal-OCR3 -ENABLE_IMAGE_INPUT=true - -# ============== EMBEDDINGS ============== - -# OpenAI Embedding Model (used if LLM_PROVIDER is openai and performing RAG/Embeddings) -OPENAI_EMBEDDING_MODEL=text-embedding-3-small - -# Local Embedding Model (used for local/offline embeddings) -LOCAL_EMBEDDING_MODEL=sentence-transformers/all-MiniLM-L6-v2 - -# ============== HUGGINGFACE (FREE TIER) ============== - -# HuggingFace Token - enables Llama 3.1 (best quality free model) -# Get yours at: https://huggingface.co/settings/tokens -# -# WITHOUT HF_TOKEN: Falls back to ungated models (zephyr-7b-beta) -# WITH HF_TOKEN: Uses Llama 3.1 8B Instruct (requires accepting license) -# -# For HuggingFace Spaces deployment: -# Set this as a "Secret" in Space Settings -> Variables and secrets -# Users/judges don't need their own token - the Space secret is used -# -HF_TOKEN=hf_your-token-here - -# ============== AGENT CONFIGURATION ============== - -MAX_ITERATIONS=10 -SEARCH_TIMEOUT=30 -LOG_LEVEL=INFO - -# ============================================ -# Modal Configuration (Required for TTS) -# ============================================ -# Modal credentials are required for TTS (Text-to-Speech) functionality -# Get your credentials from: https://modal.com/ -MODAL_TOKEN_ID=your_modal_token_id_here -MODAL_TOKEN_SECRET=your_modal_token_secret_here - -# ============== EXTERNAL SERVICES ============== - -# PubMed (optional - higher rate limits) -NCBI_API_KEY=your-ncbi-key-here - -# Vector Database (optional - for LlamaIndex RAG) -CHROMA_DB_PATH=./chroma_db -# Neo4j Knowledge Graph -NEO4J_URI=bolt://localhost:7687 -NEO4J_USER=neo4j -NEO4J_PASSWORD=your_neo4j_password_here -NEO4J_DATABASE=your_database_name diff --git a/.github/README.md b/.github/README.md deleted file mode 100644 index 8f3727f7e12fb16c26e4cc7bd30f99d7ffcf36b2..0000000000000000000000000000000000000000 --- a/.github/README.md +++ /dev/null @@ -1,56 +0,0 @@ - -> [!IMPORTANT] -> **You are reading the Github README!** -> -> - 📚 **Documentation**: See our [technical documentation](https://deepcritical.github.io/GradioDemo/) for detailed information -> - 📖 **Demo README**: Check out the [Demo README](..README.md) for more information > - 🏆 **Demo**: Kindly consider using our [Free Demo](https://hf.co/DataQuests/GradioDemo) - - -
- -[![GitHub](https://img.shields.io/github/stars/DeepCritical/GradioDemo?style=for-the-badge&logo=github&logoColor=white&label=🐙%20GitHub&labelColor=181717&color=181717)](https://github.com/DeepCritical/GradioDemo) -[![Documentation](https://img.shields.io/badge/Docs-0080FF?style=for-the-badge&logo=readthedocs&logoColor=white&labelColor=0080FF&color=0080FF)](deepcritical.github.io/GradioDemo/) -[![Demo](https://img.shields.io/badge/🚀%20Demo-FFD21E?style=for-the-badge&logo=huggingface&logoColor=white&labelColor=FFD21E&color=FFD21E)](https://huggingface.co/spaces/DataQuests/DeepCritical) -[![codecov](https://codecov.io/gh/DeepCritical/GradioDemo/graph/badge.svg?token=B1f05RCGpz)](https://codecov.io/gh/DeepCritical/GradioDemo) -[![Join us on Discord](https://img.shields.io/discord/1109943800132010065?label=Discord&logo=discord&style=flat-square)](https://discord.gg/qdfnvSPcqP) - -
- -## Quick Start - -### 1. Environment Setup - -```bash -# Install uv if you haven't already -pip install uv - -# Sync dependencies -uv sync --all-extras -``` - -### 2. Run the UI - -```bash -# Start the Gradio app -gradio run "src/app.py" -``` - -Open your browser to `http://localhost:7860`. - -### 3. Connect via MCP - -This application exposes a Model Context Protocol (MCP) server, allowing you to use its search tools directly from Claude Desktop or other MCP clients. - -**MCP Server URL**: `http://localhost:7860/gradio_api/mcp/` - -**Claude Desktop Configuration**: -Add this to your `claude_desktop_config.json`: -```json -{ - "mcpServers": { - "deepcritical": { - "url": "http://localhost:7860/gradio_api/mcp/" - } - } -} -``` diff --git a/.github/scripts/deploy_to_hf_space.py b/.github/scripts/deploy_to_hf_space.py deleted file mode 100644 index 0c5782714054ef8a63628ddf4bc84ffacd8e80ff..0000000000000000000000000000000000000000 --- a/.github/scripts/deploy_to_hf_space.py +++ /dev/null @@ -1,391 +0,0 @@ -"""Deploy repository to Hugging Face Space, excluding unnecessary files.""" - -import os -import shutil -import subprocess -import tempfile -from pathlib import Path - -from huggingface_hub import HfApi - - -def get_excluded_dirs() -> set[str]: - """Get set of directory names to exclude from deployment.""" - return { - "docs", - "dev", - "folder", - "site", - "tests", # Optional - can be included if desired - "examples", # Optional - can be included if desired - ".git", - ".github", - "__pycache__", - ".pytest_cache", - ".mypy_cache", - ".ruff_cache", - ".venv", - "venv", - "env", - "ENV", - "node_modules", - ".cursor", - "reference_repos", - "burner_docs", - "chroma_db", - "logs", - "build", - "dist", - ".eggs", - "htmlcov", - "hf_space", # Exclude the cloned HF Space directory itself - } - - -def get_excluded_files() -> set[str]: - """Get set of file names to exclude from deployment.""" - return { - ".pre-commit-config.yaml", - "mkdocs.yml", - "uv.lock", - "AGENTS.txt", - ".env", - ".env.local", - "*.local", - ".DS_Store", - "Thumbs.db", - "*.log", - ".coverage", - "coverage.xml", - } - - -def should_exclude(path: Path, excluded_dirs: set[str], excluded_files: set[str]) -> bool: - """Check if a path should be excluded from deployment.""" - # Check if any parent directory is excluded - for parent in path.parents: - if parent.name in excluded_dirs: - return True - - # Check if the path itself is a directory that should be excluded - if path.is_dir() and path.name in excluded_dirs: - return True - - # Check if the file name matches excluded patterns - if path.is_file(): - # Check exact match - if path.name in excluded_files: - return True - # Check pattern matches (simple wildcard support) - for pattern in excluded_files: - if "*" in pattern: - # Simple pattern matching (e.g., "*.log") - suffix = pattern.replace("*", "") - if path.name.endswith(suffix): - return True - - return False - - -def deploy_to_hf_space() -> None: - """Deploy repository to Hugging Face Space. - - Supports both user and organization Spaces: - - User Space: username/space-name - - Organization Space: organization-name/space-name - - Works with both classic tokens and fine-grained tokens. - """ - # Get configuration from environment variables - hf_token = os.getenv("HF_TOKEN") - hf_username = os.getenv("HF_USERNAME") # Can be username or organization name - space_name = os.getenv("HF_SPACE_NAME") - - # Check which variables are missing and provide helpful error message - missing = [] - if not hf_token: - missing.append("HF_TOKEN (should be in repository secrets)") - if not hf_username: - missing.append("HF_USERNAME (should be in repository variables)") - if not space_name: - missing.append("HF_SPACE_NAME (should be in repository variables)") - - if missing: - raise ValueError( - f"Missing required environment variables: {', '.join(missing)}\n" - f"Please configure:\n" - f" - HF_TOKEN in Settings > Secrets and variables > Actions > Secrets\n" - f" - HF_USERNAME in Settings > Secrets and variables > Actions > Variables\n" - f" - HF_SPACE_NAME in Settings > Secrets and variables > Actions > Variables" - ) - - # HF_USERNAME can be either a username or organization name - # Format: {username|organization}/{space_name} - repo_id = f"{hf_username}/{space_name}" - local_dir = "hf_space" - - print(f"🚀 Deploying to Hugging Face Space: {repo_id}") - - # Initialize HF API - api = HfApi(token=hf_token) - - # Create Space if it doesn't exist - try: - api.repo_info(repo_id=repo_id, repo_type="space", token=hf_token) - print(f"✅ Space exists: {repo_id}") - except Exception: - print(f"⚠️ Space does not exist, creating: {repo_id}") - # Create new repository - # Note: For organizations, repo_id should be "org/space-name" - # For users, repo_id should be "username/space-name" - api.create_repo( - repo_id=repo_id, # Full repo_id including owner - repo_type="space", - space_sdk="gradio", - token=hf_token, - exist_ok=True, - ) - print(f"✅ Created new Space: {repo_id}") - - # Configure Git credential helper for authentication - # This is needed for Git LFS to work properly with fine-grained tokens - print("🔐 Configuring Git credentials...") - - # Use Git credential store to store the token - # This allows Git LFS to authenticate properly - temp_dir = Path(tempfile.gettempdir()) - credential_store = temp_dir / ".git-credentials-hf" - - # Write credentials in the format: https://username:token@huggingface.co - credential_store.write_text( - f"https://{hf_username}:{hf_token}@huggingface.co\n", encoding="utf-8" - ) - try: - credential_store.chmod(0o600) # Secure permissions (Unix only) - except OSError: - # Windows doesn't support chmod, skip - pass - - # Configure Git to use the credential store - subprocess.run( - ["git", "config", "--global", "credential.helper", f"store --file={credential_store}"], - check=True, - capture_output=True, - ) - - # Also set environment variable for Git LFS - os.environ["GIT_CREDENTIAL_HELPER"] = f"store --file={credential_store}" - - # Clone repository using git - # Use the token in the URL for initial clone, but LFS will use credential store - space_url = f"https://{hf_username}:{hf_token}@huggingface.co/spaces/{repo_id}" - - if Path(local_dir).exists(): - print(f"🧹 Removing existing {local_dir} directory...") - shutil.rmtree(local_dir) - - print("📥 Cloning Space repository...") - try: - result = subprocess.run( - ["git", "clone", space_url, local_dir], - check=True, - capture_output=True, - text=True, - ) - print("✅ Cloned Space repository") - - # After clone, configure the remote to use credential helper - # This ensures future operations (like push) use the credential store - os.chdir(local_dir) - subprocess.run( - ["git", "remote", "set-url", "origin", f"https://huggingface.co/spaces/{repo_id}"], - check=True, - capture_output=True, - ) - os.chdir("..") - - except subprocess.CalledProcessError as e: - error_msg = e.stderr if e.stderr else e.stdout if e.stdout else "Unknown error" - print(f"❌ Failed to clone Space repository: {error_msg}") - - # Try alternative: clone with LFS skip, then fetch LFS files separately - print("🔄 Trying alternative clone method (skip LFS during clone)...") - try: - env = os.environ.copy() - env["GIT_LFS_SKIP_SMUDGE"] = "1" # Skip LFS during clone - - subprocess.run( - ["git", "clone", space_url, local_dir], - check=True, - capture_output=True, - text=True, - env=env, - ) - print("✅ Cloned Space repository (LFS skipped)") - - # Configure remote - os.chdir(local_dir) - subprocess.run( - ["git", "remote", "set-url", "origin", f"https://huggingface.co/spaces/{repo_id}"], - check=True, - capture_output=True, - ) - - # Try to fetch LFS files with proper authentication - print("📥 Fetching LFS files...") - subprocess.run( - ["git", "lfs", "pull"], - check=False, # Don't fail if LFS pull fails - we'll continue without LFS files - capture_output=True, - text=True, - ) - os.chdir("..") - print("✅ Repository cloned (LFS files may be incomplete, but deployment can continue)") - except subprocess.CalledProcessError as e2: - error_msg2 = e2.stderr if e2.stderr else e2.stdout if e2.stdout else "Unknown error" - print(f"❌ Alternative clone method also failed: {error_msg2}") - raise RuntimeError(f"Git clone failed: {error_msg}") from e - - # Get exclusion sets - excluded_dirs = get_excluded_dirs() - excluded_files = get_excluded_files() - - # Remove all existing files in HF Space (except .git) - print("🧹 Cleaning existing files...") - for item in Path(local_dir).iterdir(): - if item.name == ".git": - continue - if item.is_dir(): - shutil.rmtree(item) - else: - item.unlink() - - # Copy files from repository root - print("📦 Copying files...") - repo_root = Path(".") - files_copied = 0 - dirs_copied = 0 - - for item in repo_root.rglob("*"): - # Skip if in .git directory - if ".git" in item.parts: - continue - - # Skip if in hf_space directory (the cloned Space directory) - if "hf_space" in item.parts: - continue - - # Skip if should be excluded - if should_exclude(item, excluded_dirs, excluded_files): - continue - - # Calculate relative path - try: - rel_path = item.relative_to(repo_root) - except ValueError: - # Item is outside repo root, skip - continue - - # Skip if in excluded directory - if any(part in excluded_dirs for part in rel_path.parts): - continue - - # Destination path - dest_path = Path(local_dir) / rel_path - - # Create parent directories - dest_path.parent.mkdir(parents=True, exist_ok=True) - - # Copy file or directory - if item.is_file(): - shutil.copy2(item, dest_path) - files_copied += 1 - elif item.is_dir(): - # Directory will be created by parent mkdir, but we track it - dirs_copied += 1 - - print(f"✅ Copied {files_copied} files and {dirs_copied} directories") - - # Commit and push changes using git - print("💾 Committing changes...") - - # Change to the Space directory - original_cwd = os.getcwd() - os.chdir(local_dir) - - try: - # Configure git user (required for commit) - subprocess.run( - ["git", "config", "user.name", "github-actions[bot]"], - check=True, - capture_output=True, - ) - subprocess.run( - ["git", "config", "user.email", "github-actions[bot]@users.noreply.github.com"], - check=True, - capture_output=True, - ) - - # Add all files - subprocess.run( - ["git", "add", "."], - check=True, - capture_output=True, - ) - - # Check if there are changes to commit - result = subprocess.run( - ["git", "status", "--porcelain"], - check=False, - capture_output=True, - text=True, - ) - - if result.stdout.strip(): - # There are changes, commit and push - subprocess.run( - ["git", "commit", "-m", "Deploy to Hugging Face Space [skip ci]"], - check=True, - capture_output=True, - ) - print("📤 Pushing to Hugging Face Space...") - # Ensure remote URL uses credential helper (not token in URL) - subprocess.run( - ["git", "remote", "set-url", "origin", f"https://huggingface.co/spaces/{repo_id}"], - check=True, - capture_output=True, - ) - subprocess.run( - ["git", "push"], - check=True, - capture_output=True, - ) - print("✅ Deployment complete!") - else: - print("ℹ️ No changes to commit (repository is up to date)") - except subprocess.CalledProcessError as e: - error_msg = e.stderr if e.stderr else (e.stdout if e.stdout else str(e)) - if isinstance(error_msg, bytes): - error_msg = error_msg.decode("utf-8", errors="replace") - if "nothing to commit" in error_msg.lower(): - print("ℹ️ No changes to commit (repository is up to date)") - else: - print(f"⚠️ Error during git operations: {error_msg}") - raise RuntimeError(f"Git operation failed: {error_msg}") from e - finally: - # Return to original directory - os.chdir(original_cwd) - - # Clean up credential store for security - try: - if credential_store.exists(): - credential_store.unlink() - except Exception: - # Ignore cleanup errors - pass - - print(f"🎉 Successfully deployed to: https://huggingface.co/spaces/{repo_id}") - - -if __name__ == "__main__": - deploy_to_hf_space() diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml deleted file mode 100644 index 3379c544288b6a62f9b004de196aacfce4d9159f..0000000000000000000000000000000000000000 --- a/.github/workflows/ci.yml +++ /dev/null @@ -1,127 +0,0 @@ -name: CI - -on: - push: - branches: [main, dev, develop] - pull_request: - branches: [main, dev, develop] - -jobs: - test: - runs-on: ubuntu-latest - strategy: - matrix: - python-version: ["3.11"] - - steps: - - uses: actions/checkout@v4 - - - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v5 - with: - python-version: ${{ matrix.python-version }} - - - name: Install dependencies - run: | - python -m pip install --upgrade pip - pip install -e ".[dev]" - - - name: Lint with ruff - run: | - ruff check . --exclude tests - ruff format --check . --exclude tests - continue-on-error: true - - - name: Type check with mypy - run: | - mypy src - continue-on-error: true - - - name: Install embedding dependencies - run: | - pip install -e ".[embeddings]" - - - name: Run unit tests (excluding OpenAI and embedding providers) - env: - HF_TOKEN: ${{ secrets.HF_TOKEN }} - run: | - pytest tests/unit/ -v -m "not openai and not embedding_provider" --tb=short -p no:logfire --cov --cov-branch --cov-report=xml --cov-report=term - - - name: Run local embeddings tests - env: - HF_TOKEN: ${{ secrets.HF_TOKEN }} - run: | - pytest tests/ -v -m "local_embeddings" --tb=short -p no:logfire --cov --cov-branch --cov-report=xml --cov-report=term --cov-append || true - continue-on-error: true # Allow failures if dependencies not available - - - name: Run HuggingFace integration tests - env: - HF_TOKEN: ${{ secrets.HF_TOKEN }} - run: | - pytest tests/integration/ -v -m "huggingface and not embedding_provider" --tb=short -p no:logfire --cov --cov-branch --cov-report=xml --cov-report=term --cov-append || true - continue-on-error: true # Allow failures if HF_TOKEN not set - - - name: Run non-OpenAI integration tests (excluding embedding providers) - env: - HF_TOKEN: ${{ secrets.HF_TOKEN }} - run: | - pytest tests/integration/ -v -m "integration and not openai and not embedding_provider" --tb=short -p no:logfire --cov --cov-branch --cov-report=xml --cov-report=term --cov-append || true - continue-on-error: true # Allow failures if dependencies not available - - - name: Upload coverage reports to Codecov - uses: codecov/codecov-action@v5 - with: - token: ${{ secrets.CODECOV_TOKEN }} - slug: DeepCritical/GradioDemo - files: ./coverage.xml - fail_ci_if_error: false - continue-on-error: true - - docs: - runs-on: ubuntu-latest - permissions: - contents: write - if: github.event_name == 'push' && (github.ref == 'refs/heads/main' || github.ref == 'refs/heads/dev' || github.ref == 'refs/heads/develop') - steps: - - uses: actions/checkout@v4 - with: - fetch-depth: 0 - - - name: Set up Python - uses: actions/setup-python@v5 - with: - python-version: '3.11' - - - name: Install uv - uses: astral-sh/setup-uv@v5 - with: - version: "latest" - - - name: Install dependencies - run: | - uv sync --extra dev - - - name: Configure Git - run: | - git config user.name "github-actions[bot]" - git config user.email "github-actions[bot]@users.noreply.github.com" - git remote set-url origin https://x-access-token:${{ secrets.GITHUB_TOKEN }}@github.com/${{ github.repository }}.git - - - name: Deploy to GitHub Pages - run: | - # mkdocs gh-deploy automatically creates .nojekyll, but let's verify - uv run mkdocs gh-deploy --force --message "Deploy docs [skip ci]" --strict - # Verify .nojekyll was created in gh-pages branch - git fetch origin gh-pages:gh-pages || true - git checkout gh-pages || true - if [ -f .nojekyll ]; then - echo "✓ .nojekyll file exists" - else - echo "⚠ .nojekyll file missing, creating it..." - touch .nojekyll - git add .nojekyll - git commit -m "Add .nojekyll to disable Jekyll [skip ci]" || true - git push origin gh-pages || true - fi - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} \ No newline at end of file diff --git a/.github/workflows/deploy-hf-space.yml b/.github/workflows/deploy-hf-space.yml deleted file mode 100644 index e22f89ab05f4f47184e769ff11426d8338285d81..0000000000000000000000000000000000000000 --- a/.github/workflows/deploy-hf-space.yml +++ /dev/null @@ -1,47 +0,0 @@ -name: Deploy to Hugging Face Space - -on: - push: - branches: [main] - workflow_dispatch: # Allow manual triggering - -jobs: - deploy: - runs-on: ubuntu-latest - permissions: - contents: read - # No write permissions needed for GitHub repo (we're pushing to HF Space) - - steps: - - name: Checkout Repository - uses: actions/checkout@v4 - with: - fetch-depth: 0 - - - name: Set up Python - uses: actions/setup-python@v5 - with: - python-version: '3.11' - - - name: Install dependencies - run: | - pip install --upgrade pip - pip install huggingface-hub - - - name: Deploy to Hugging Face Space - env: - # Token from secrets (sensitive data) - HF_TOKEN: ${{ secrets.HF_TOKEN }} - # Username/Organization from repository variables (non-sensitive) - HF_USERNAME: ${{ vars.HF_USERNAME }} - # Space name from repository variables (non-sensitive) - HF_SPACE_NAME: ${{ vars.HF_SPACE_NAME }} - run: | - python .github/scripts/deploy_to_hf_space.py - - - name: Verify deployment - if: success() - run: | - echo "✅ Deployment completed successfully!" - echo "Space URL: https://huggingface.co/spaces/${{ vars.HF_USERNAME }}/${{ vars.HF_SPACE_NAME }}" - diff --git a/.gitignore b/.gitignore deleted file mode 100644 index 9a982ecba57a76ced6b098ad431436049f052514..0000000000000000000000000000000000000000 --- a/.gitignore +++ /dev/null @@ -1,84 +0,0 @@ -folder/ -site/ -.cursor/ -.ruff_cache/ -# Python -__pycache__/ -*.py[cod] -*$py.class -*.so -.Python -build/ -develop-eggs/ -dist/ -downloads/ -eggs/ -.eggs/ -lib/ -lib64/ -parts/ -sdist/ -var/ -wheels/ -*.egg-info/ -.installed.cfg -*.egg - -# Virtual environments -.venv/ -venv/ -ENV/ -env/ - -# IDE -.vscode/ -.idea/ -*.swp -*.swo - -# Environment -.env -.env.local -*.local - -# Claude -.claude/ - -# Burner docs (working drafts, not for commit) -burner_docs/ - -# Reference repos (clone locally, don't commit) -reference_repos/autogen-microsoft/ -reference_repos/claude-agent-sdk/ -reference_repos/pydanticai-research-agent/ -reference_repos/pubmed-mcp-server/ -reference_repos/DeepCritical/ - -# Keep the README in reference_repos -!reference_repos/README.md - -# Development directory -dev/ - -# OS -.DS_Store -Thumbs.db - -# Logs -*.log -logs/ - -# Testing -.pytest_cache/ -.mypy_cache/ -.coverage -htmlcov/ -test_output*.txt - -# Database files -chroma_db/ -*.sqlite3 - - -# Trigger rebuild Wed Nov 26 17:51:41 EST 2025 -.env diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml deleted file mode 100644 index 8b1184f5dec51a8c5abbab8bcc450b9b431a45e5..0000000000000000000000000000000000000000 --- a/.pre-commit-config.yaml +++ /dev/null @@ -1,64 +0,0 @@ -repos: - - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.4.4 - hooks: - - id: ruff - args: [--fix, --exclude, tests] - exclude: ^reference_repos/ - - id: ruff-format - args: [--exclude, tests] - exclude: ^reference_repos/ - - - repo: https://github.com/pre-commit/mirrors-mypy - rev: v1.10.0 - hooks: - - id: mypy - files: ^src/ - exclude: ^folder|^src/app.py - additional_dependencies: - - pydantic>=2.7 - - pydantic-settings>=2.2 - - tenacity>=8.2 - - pydantic-ai>=0.0.16 - args: [--ignore-missing-imports] - - - repo: local - hooks: - - id: pytest-unit - name: pytest unit tests (no OpenAI) - entry: uv - language: system - types: [python] - args: [ - "run", - "pytest", - "tests/unit/", - "-v", - "-m", - "not openai and not embedding_provider", - "--tb=short", - "-p", - "no:logfire", - ] - pass_filenames: false - always_run: true - require_serial: false - - id: pytest-local-embeddings - name: pytest local embeddings tests - entry: uv - language: system - types: [python] - args: [ - "run", - "pytest", - "tests/", - "-v", - "-m", - "local_embeddings", - "--tb=short", - "-p", - "no:logfire", - ] - pass_filenames: false - always_run: true - require_serial: false diff --git a/.pre-commit-hooks/run_pytest.ps1 b/.pre-commit-hooks/run_pytest.ps1 deleted file mode 100644 index 3df4f371b845a48ce3a1ea32e307218abbd5a033..0000000000000000000000000000000000000000 --- a/.pre-commit-hooks/run_pytest.ps1 +++ /dev/null @@ -1,19 +0,0 @@ -# PowerShell pytest runner for pre-commit (Windows) -# Uses uv if available, otherwise falls back to pytest - -if (Get-Command uv -ErrorAction SilentlyContinue) { - # Sync dependencies before running tests - uv sync - uv run pytest $args -} else { - Write-Warning "uv not found, using system pytest (may have missing dependencies)" - pytest $args -} - - - - - - - - diff --git a/.pre-commit-hooks/run_pytest.sh b/.pre-commit-hooks/run_pytest.sh deleted file mode 100644 index b2a4be920113fd340631f64602c24042e8c81086..0000000000000000000000000000000000000000 --- a/.pre-commit-hooks/run_pytest.sh +++ /dev/null @@ -1,20 +0,0 @@ -#!/bin/bash -# Cross-platform pytest runner for pre-commit -# Uses uv if available, otherwise falls back to pytest - -if command -v uv >/dev/null 2>&1; then - # Sync dependencies before running tests - uv sync - uv run pytest "$@" -else - echo "Warning: uv not found, using system pytest (may have missing dependencies)" - pytest "$@" -fi - - - - - - - - diff --git a/.pre-commit-hooks/run_pytest_embeddings.ps1 b/.pre-commit-hooks/run_pytest_embeddings.ps1 deleted file mode 100644 index 47a3e32a202240c42e5a205d2afd778a23292db7..0000000000000000000000000000000000000000 --- a/.pre-commit-hooks/run_pytest_embeddings.ps1 +++ /dev/null @@ -1,14 +0,0 @@ -# PowerShell wrapper to sync embeddings dependencies and run embeddings tests - -$ErrorActionPreference = "Stop" - -if (Get-Command uv -ErrorAction SilentlyContinue) { - Write-Host "Syncing embeddings dependencies..." - uv sync --extra embeddings - Write-Host "Running embeddings tests..." - uv run pytest tests/ -v -m local_embeddings --tb=short -p no:logfire -} else { - Write-Error "uv not found" - exit 1 -} - diff --git a/.pre-commit-hooks/run_pytest_embeddings.sh b/.pre-commit-hooks/run_pytest_embeddings.sh deleted file mode 100644 index 6f1b80746217244367ee86fcd7d69837df648b40..0000000000000000000000000000000000000000 --- a/.pre-commit-hooks/run_pytest_embeddings.sh +++ /dev/null @@ -1,15 +0,0 @@ -#!/bin/bash -# Wrapper script to sync embeddings dependencies and run embeddings tests - -set -e - -if command -v uv >/dev/null 2>&1; then - echo "Syncing embeddings dependencies..." - uv sync --extra embeddings - echo "Running embeddings tests..." - uv run pytest tests/ -v -m local_embeddings --tb=short -p no:logfire -else - echo "Error: uv not found" - exit 1 -fi - diff --git a/.pre-commit-hooks/run_pytest_unit.ps1 b/.pre-commit-hooks/run_pytest_unit.ps1 deleted file mode 100644 index c1196d22e86fe66a56d12f673c003ac88aa6b09f..0000000000000000000000000000000000000000 --- a/.pre-commit-hooks/run_pytest_unit.ps1 +++ /dev/null @@ -1,14 +0,0 @@ -# PowerShell wrapper to sync dependencies and run unit tests - -$ErrorActionPreference = "Stop" - -if (Get-Command uv -ErrorAction SilentlyContinue) { - Write-Host "Syncing dependencies..." - uv sync - Write-Host "Running unit tests..." - uv run pytest tests/unit/ -v -m "not openai and not embedding_provider" --tb=short -p no:logfire -} else { - Write-Error "uv not found" - exit 1 -} - diff --git a/.pre-commit-hooks/run_pytest_unit.sh b/.pre-commit-hooks/run_pytest_unit.sh deleted file mode 100644 index 173ab1b607647ecf4b4a1de6b75abd47fc0130ec..0000000000000000000000000000000000000000 --- a/.pre-commit-hooks/run_pytest_unit.sh +++ /dev/null @@ -1,15 +0,0 @@ -#!/bin/bash -# Wrapper script to sync dependencies and run unit tests - -set -e - -if command -v uv >/dev/null 2>&1; then - echo "Syncing dependencies..." - uv sync - echo "Running unit tests..." - uv run pytest tests/unit/ -v -m "not openai and not embedding_provider" --tb=short -p no:logfire -else - echo "Error: uv not found" - exit 1 -fi - diff --git a/.pre-commit-hooks/run_pytest_with_sync.ps1 b/.pre-commit-hooks/run_pytest_with_sync.ps1 deleted file mode 100644 index 546a5096bc6e4b9a46d039f5761234022b8658dd..0000000000000000000000000000000000000000 --- a/.pre-commit-hooks/run_pytest_with_sync.ps1 +++ /dev/null @@ -1,25 +0,0 @@ -# PowerShell wrapper for pytest runner -# Ensures uv is available and runs the Python script - -param( - [Parameter(Position=0)] - [string]$TestType = "unit" -) - -$ErrorActionPreference = "Stop" - -# Check if uv is available -if (-not (Get-Command uv -ErrorAction SilentlyContinue)) { - Write-Error "uv not found. Please install uv: https://github.com/astral-sh/uv" - exit 1 -} - -# Get the script directory -$ScriptDir = Split-Path -Parent $MyInvocation.MyCommand.Path -$PythonScript = Join-Path $ScriptDir "run_pytest_with_sync.py" - -# Run the Python script using uv -uv run python $PythonScript $TestType - -exit $LASTEXITCODE - diff --git a/.pre-commit-hooks/run_pytest_with_sync.py b/.pre-commit-hooks/run_pytest_with_sync.py deleted file mode 100644 index 70c4eb52f8239d167868ba47b2d4bb80d9ac3173..0000000000000000000000000000000000000000 --- a/.pre-commit-hooks/run_pytest_with_sync.py +++ /dev/null @@ -1,235 +0,0 @@ -#!/usr/bin/env python3 -"""Cross-platform pytest runner that syncs dependencies before running tests.""" - -import shutil -import subprocess -import sys -from pathlib import Path - - -def clean_caches(project_root: Path) -> None: - """Remove pytest and Python cache directories and files. - - Comprehensively removes all cache files and directories to ensure - clean test runs. Only scans specific directories to avoid resource - exhaustion from scanning large directories like .venv on Windows. - """ - # Directories to scan for caches (only project code, not dependencies) - scan_dirs = ["src", "tests", ".pre-commit-hooks"] - - # Directories to exclude (to avoid resource issues) - exclude_dirs = { - ".venv", - "venv", - "ENV", - "env", - ".git", - "node_modules", - "dist", - "build", - ".eggs", - "reference_repos", - "folder", - } - - # Comprehensive list of cache patterns to remove - cache_patterns = [ - ".pytest_cache", - "__pycache__", - "*.pyc", - "*.pyo", - "*.pyd", - ".mypy_cache", - ".ruff_cache", - ".coverage", - "coverage.xml", - "htmlcov", - ".hypothesis", # Hypothesis testing framework cache - ".tox", # Tox cache (if used) - ".cache", # General Python cache - ] - - def should_exclude(path: Path) -> bool: - """Check if a path should be excluded from cache cleanup.""" - # Check if any parent directory is in exclude list - for parent in path.parents: - if parent.name in exclude_dirs: - return True - # Check if the path itself is excluded - if path.name in exclude_dirs: - return True - return False - - cleaned = [] - - # Only scan specific directories to avoid resource exhaustion - for scan_dir in scan_dirs: - scan_path = project_root / scan_dir - if not scan_path.exists(): - continue - - for pattern in cache_patterns: - if "*" in pattern: - # Handle glob patterns for files - try: - for cache_file in scan_path.rglob(pattern): - if should_exclude(cache_file): - continue - try: - if cache_file.is_file(): - cache_file.unlink() - cleaned.append(str(cache_file.relative_to(project_root))) - except OSError: - pass # Ignore errors (file might be locked or already deleted) - except OSError: - pass # Ignore errors during directory traversal - else: - # Handle directory patterns - try: - for cache_dir in scan_path.rglob(pattern): - if should_exclude(cache_dir): - continue - try: - if cache_dir.is_dir(): - shutil.rmtree(cache_dir, ignore_errors=True) - cleaned.append(str(cache_dir.relative_to(project_root))) - except OSError: - pass # Ignore errors (directory might be locked) - except OSError: - pass # Ignore errors during directory traversal - - # Also clean root-level caches (like .pytest_cache in project root) - root_cache_patterns = [ - ".pytest_cache", - ".mypy_cache", - ".ruff_cache", - ".coverage", - "coverage.xml", - "htmlcov", - ".hypothesis", - ".tox", - ".cache", - ".pytest", - ] - for pattern in root_cache_patterns: - cache_path = project_root / pattern - if cache_path.exists(): - try: - if cache_path.is_dir(): - shutil.rmtree(cache_path, ignore_errors=True) - elif cache_path.is_file(): - cache_path.unlink() - cleaned.append(pattern) - except OSError: - pass - - # Also remove any .pyc files in root directory - try: - for pyc_file in project_root.glob("*.pyc"): - try: - pyc_file.unlink() - cleaned.append(pyc_file.name) - except OSError: - pass - except OSError: - pass - - if cleaned: - print( - f"Cleaned {len(cleaned)} cache items: {', '.join(cleaned[:10])}{'...' if len(cleaned) > 10 else ''}" - ) - else: - print("No cache files found to clean") - - -def run_command( - cmd: list[str], check: bool = True, shell: bool = False, cwd: str | None = None -) -> int: - """Run a command and return exit code.""" - try: - result = subprocess.run( - cmd, - check=check, - shell=shell, - cwd=cwd, - env=None, # Use current environment, uv will handle venv - ) - return result.returncode - except subprocess.CalledProcessError as e: - return e.returncode - except FileNotFoundError: - print(f"Error: Command not found: {cmd[0]}") - return 1 - - -def main() -> int: - """Main entry point.""" - import os - - # Get the project root (where pyproject.toml is) - script_dir = Path(__file__).parent - project_root = script_dir.parent - - # Change to project root to ensure uv works correctly - os.chdir(project_root) - - # Clean caches before running tests - print("Cleaning pytest and Python caches...") - clean_caches(project_root) - - # Check if uv is available - if run_command(["uv", "--version"], check=False) != 0: - print("Error: uv not found. Please install uv: https://github.com/astral-sh/uv") - return 1 - - # Parse arguments - test_type = sys.argv[1] if len(sys.argv) > 1 else "unit" - extra_args = sys.argv[2:] if len(sys.argv) > 2 else [] - - # Sync dependencies - always include dev - # Note: embeddings dependencies are now in main dependencies, not optional - # Use --extra dev for [project.optional-dependencies].dev (not --dev which is for [dependency-groups]) - sync_cmd = ["uv", "sync", "--extra", "dev"] - - print(f"Syncing dependencies for {test_type} tests...") - if run_command(sync_cmd, cwd=project_root) != 0: - return 1 - - # Build pytest command - use uv run to ensure correct environment - if test_type == "unit": - pytest_args = [ - "tests/unit/", - "-v", - "-m", - "not openai and not embedding_provider", - "--tb=short", - "-p", - "no:logfire", - "--cache-clear", # Clear pytest cache before running - ] - elif test_type == "embeddings": - pytest_args = [ - "tests/", - "-v", - "-m", - "local_embeddings", - "--tb=short", - "-p", - "no:logfire", - "--cache-clear", # Clear pytest cache before running - ] - else: - pytest_args = [] - - pytest_args.extend(extra_args) - - # Use uv run python -m pytest to ensure we use the venv's pytest - # This is more reliable than uv run pytest which might find system pytest - pytest_cmd = ["uv", "run", "python", "-m", "pytest", *pytest_args] - - print(f"Running {test_type} tests...") - return run_command(pytest_cmd, cwd=project_root) - - -if __name__ == "__main__": - sys.exit(main()) diff --git a/.python-version b/.python-version deleted file mode 100644 index 2c0733315e415bfb5e5b353f9996ecd964d395b2..0000000000000000000000000000000000000000 --- a/.python-version +++ /dev/null @@ -1 +0,0 @@ -3.11 diff --git a/AGENTS.txt b/AGENTS.txt deleted file mode 100644 index 24cb3ed4b8ac8cd3c519cbe24b640faaac1217bd..0000000000000000000000000000000000000000 --- a/AGENTS.txt +++ /dev/null @@ -1,236 +0,0 @@ -# DeepCritical Project - Rules - -## Project-Wide Rules - -**Architecture**: Multi-agent research system using Pydantic AI for agent orchestration, supporting iterative and deep research patterns. Uses middleware for state management, budget tracking, and workflow coordination. - -**Type Safety**: ALWAYS use complete type hints. All functions must have parameter and return type annotations. Use `mypy --strict` compliance. Use `TYPE_CHECKING` imports for circular dependencies: `from typing import TYPE_CHECKING; if TYPE_CHECKING: from src.services.embeddings import EmbeddingService` - -**Async Patterns**: ALL I/O operations must be async (`async def`, `await`). Use `asyncio.gather()` for parallel operations. CPU-bound work must use `run_in_executor()`: `loop = asyncio.get_running_loop(); result = await loop.run_in_executor(None, cpu_bound_function, args)`. Never block the event loop. - -**Error Handling**: Use custom exceptions from `src/utils/exceptions.py`: `DeepCriticalError`, `SearchError`, `RateLimitError`, `JudgeError`, `ConfigurationError`. Always chain exceptions: `raise SearchError(...) from e`. Log with structlog: `logger.error("Operation failed", error=str(e), context=value)`. - -**Logging**: Use `structlog` for ALL logging (NOT `print` or `logging`). Import: `import structlog; logger = structlog.get_logger()`. Log with structured data: `logger.info("event", key=value)`. Use appropriate levels: DEBUG, INFO, WARNING, ERROR. - -**Pydantic Models**: All data exchange uses Pydantic models from `src/utils/models.py`. Models are frozen (`model_config = {"frozen": True}`) for immutability. Use `Field()` with descriptions. Validate with `ge=`, `le=`, `min_length=`, `max_length=` constraints. - -**Code Style**: Ruff with 100-char line length. Ignore rules: `PLR0913` (too many arguments), `PLR0912` (too many branches), `PLR0911` (too many returns), `PLR2004` (magic values), `PLW0603` (global statement), `PLC0415` (lazy imports). - -**Docstrings**: Google-style docstrings for all public functions. Include Args, Returns, Raises sections. Use type hints in docstrings only if needed for clarity. - -**Testing**: Unit tests in `tests/unit/` (mocked, fast). Integration tests in `tests/integration/` (real APIs, marked `@pytest.mark.integration`). Use `respx` for httpx mocking, `pytest-mock` for general mocking. - -**State Management**: Use `ContextVar` in middleware for thread-safe isolation. Never use global mutable state (except singletons via `@lru_cache`). Use `WorkflowState` from `src/middleware/state_machine.py` for workflow state. - -**Citation Validation**: ALWAYS validate references before returning reports. Use `validate_references()` from `src/utils/citation_validator.py`. Remove hallucinated citations. Log warnings for removed citations. - ---- - -## src/agents/ - Agent Implementation Rules - -**Pattern**: All agents use Pydantic AI `Agent` class. Agents have structured output types (Pydantic models) or return strings. Use factory functions in `src/agent_factory/agents.py` for creation. - -**Agent Structure**: -- System prompt as module-level constant (with date injection: `datetime.now().strftime("%Y-%m-%d")`) -- Agent class with `__init__(model: Any | None = None)` -- Main method (e.g., `async def evaluate()`, `async def write_report()`) -- Factory function: `def create_agent_name(model: Any | None = None) -> AgentName` - -**Model Initialization**: Use `get_model()` from `src/agent_factory/judges.py` if no model provided. Support OpenAI/Anthropic/HF Inference via settings. - -**Error Handling**: Return fallback values (e.g., `KnowledgeGapOutput(research_complete=False, outstanding_gaps=[...])`) on failure. Log errors with context. Use retry logic (3 retries) in Pydantic AI Agent initialization. - -**Input Validation**: Validate query/inputs are not empty. Truncate very long inputs with warnings. Handle None values gracefully. - -**Output Types**: Use structured output types from `src/utils/models.py` (e.g., `KnowledgeGapOutput`, `AgentSelectionPlan`, `ReportDraft`). For text output (writer agents), return `str` directly. - -**Agent-Specific Rules**: -- `knowledge_gap.py`: Outputs `KnowledgeGapOutput`. Evaluates research completeness. -- `tool_selector.py`: Outputs `AgentSelectionPlan`. Selects tools (RAG/web/database). -- `writer.py`: Returns markdown string. Includes citations in numbered format. -- `long_writer.py`: Uses `ReportDraft` input/output. Handles section-by-section writing. -- `proofreader.py`: Takes `ReportDraft`, returns polished markdown. -- `thinking.py`: Returns observation string from conversation history. -- `input_parser.py`: Outputs `ParsedQuery` with research mode detection. - ---- - -## src/tools/ - Search Tool Rules - -**Protocol**: All tools implement `SearchTool` protocol from `src/tools/base.py`: `name` property and `async def search(query, max_results) -> list[Evidence]`. - -**Rate Limiting**: Use `@retry` decorator from tenacity: `@retry(stop=stop_after_attempt(3), wait=wait_exponential(...))`. Implement `_rate_limit()` method for APIs with limits. Use shared rate limiters from `src/tools/rate_limiter.py`. - -**Error Handling**: Raise `SearchError` or `RateLimitError` on failures. Handle HTTP errors (429, 500, timeout). Return empty list on non-critical errors (log warning). - -**Query Preprocessing**: Use `preprocess_query()` from `src/tools/query_utils.py` to remove noise and expand synonyms. - -**Evidence Conversion**: Convert API responses to `Evidence` objects with `Citation`. Extract metadata (title, url, date, authors). Set relevance scores (0.0-1.0). Handle missing fields gracefully. - -**Tool-Specific Rules**: -- `pubmed.py`: Use NCBI E-utilities (ESearch → EFetch). Rate limit: 0.34s between requests. Parse XML with `xmltodict`. Handle single vs. multiple articles. -- `clinicaltrials.py`: Use `requests` library (NOT httpx - WAF blocks httpx). Run in thread pool: `await asyncio.to_thread(requests.get, ...)`. Filter: Only interventional studies, active/completed. -- `europepmc.py`: Handle preprint markers: `[PREPRINT - Not peer-reviewed]`. Build URLs from DOI or PMID. -- `rag_tool.py`: Wraps `LlamaIndexRAGService`. Returns Evidence from RAG results. Handles ingestion. -- `search_handler.py`: Orchestrates parallel searches across multiple tools. Uses `asyncio.gather()` with `return_exceptions=True`. Aggregates results into `SearchResult`. - ---- - -## src/middleware/ - Middleware Rules - -**State Management**: Use `ContextVar` for thread-safe isolation. `WorkflowState` uses `ContextVar[WorkflowState | None]`. Initialize with `init_workflow_state(embedding_service)`. Access with `get_workflow_state()` (auto-initializes if missing). - -**WorkflowState**: Tracks `evidence: list[Evidence]`, `conversation: Conversation`, `embedding_service: Any`. Methods: `add_evidence()` (deduplicates by URL), `async search_related()` (semantic search). - -**WorkflowManager**: Manages parallel research loops. Methods: `add_loop()`, `run_loops_parallel()`, `update_loop_status()`, `sync_loop_evidence_to_state()`. Uses `asyncio.gather()` for parallel execution. Handles errors per loop (don't fail all if one fails). - -**BudgetTracker**: Tracks tokens, time, iterations per loop and globally. Methods: `create_budget()`, `add_tokens()`, `start_timer()`, `update_timer()`, `increment_iteration()`, `check_budget()`, `can_continue()`. Token estimation: `estimate_tokens(text)` (~4 chars per token), `estimate_llm_call_tokens(prompt, response)`. - -**Models**: All middleware models in `src/utils/models.py`. `IterationData`, `Conversation`, `ResearchLoop`, `BudgetStatus` are used by middleware. - ---- - -## src/orchestrator/ - Orchestration Rules - -**Research Flows**: Two patterns: `IterativeResearchFlow` (single loop) and `DeepResearchFlow` (plan → parallel loops → synthesis). Both support agent chains (`use_graph=False`) and graph execution (`use_graph=True`). - -**IterativeResearchFlow**: Pattern: Generate observations → Evaluate gaps → Select tools → Execute → Judge → Continue/Complete. Uses `KnowledgeGapAgent`, `ToolSelectorAgent`, `ThinkingAgent`, `WriterAgent`, `JudgeHandler`. Tracks iterations, time, budget. - -**DeepResearchFlow**: Pattern: Planner → Parallel iterative loops per section → Synthesizer. Uses `PlannerAgent`, `IterativeResearchFlow` (per section), `LongWriterAgent` or `ProofreaderAgent`. Uses `WorkflowManager` for parallel execution. - -**Graph Orchestrator**: Uses Pydantic AI Graphs (when available) or agent chains (fallback). Routes based on research mode (iterative/deep/auto). Streams `AgentEvent` objects for UI. - -**State Initialization**: Always call `init_workflow_state()` before running flows. Initialize `BudgetTracker` per loop. Use `WorkflowManager` for parallel coordination. - -**Event Streaming**: Yield `AgentEvent` objects during execution. Event types: "started", "search_complete", "judge_complete", "hypothesizing", "synthesizing", "complete", "error". Include iteration numbers and data payloads. - ---- - -## src/services/ - Service Rules - -**EmbeddingService**: Local sentence-transformers (NO API key required). All operations async-safe via `run_in_executor()`. ChromaDB for vector storage. Deduplication threshold: 0.85 (85% similarity = duplicate). - -**LlamaIndexRAGService**: Uses OpenAI embeddings (requires `OPENAI_API_KEY`). Methods: `ingest_evidence()`, `retrieve()`, `query()`. Returns documents with metadata (source, title, url, date, authors). Lazy initialization with graceful fallback. - -**StatisticalAnalyzer**: Generates Python code via LLM. Executes in Modal sandbox (secure, isolated). Library versions pinned in `SANDBOX_LIBRARIES` dict. Returns `AnalysisResult` with verdict (SUPPORTED/REFUTED/INCONCLUSIVE). - -**Singleton Pattern**: Use `@lru_cache(maxsize=1)` for singletons: `@lru_cache(maxsize=1); def get_service() -> Service: return Service()`. Lazy initialization to avoid requiring dependencies at import time. - ---- - -## src/utils/ - Utility Rules - -**Models**: All Pydantic models in `src/utils/models.py`. Use frozen models (`model_config = {"frozen": True}`) except where mutation needed. Use `Field()` with descriptions. Validate with constraints. - -**Config**: Settings via Pydantic Settings (`src/utils/config.py`). Load from `.env` automatically. Use `settings` singleton: `from src.utils.config import settings`. Validate API keys with properties: `has_openai_key`, `has_anthropic_key`. - -**Exceptions**: Custom exception hierarchy in `src/utils/exceptions.py`. Base: `DeepCriticalError`. Specific: `SearchError`, `RateLimitError`, `JudgeError`, `ConfigurationError`. Always chain exceptions. - -**LLM Factory**: Centralized LLM model creation in `src/utils/llm_factory.py`. Supports OpenAI, Anthropic, HF Inference. Use `get_model()` or factory functions. Check requirements before initialization. - -**Citation Validator**: Use `validate_references()` from `src/utils/citation_validator.py`. Removes hallucinated citations (URLs not in evidence). Logs warnings. Returns validated report string. - ---- - -## src/orchestrator_factory.py Rules - -**Purpose**: Factory for creating orchestrators. Supports "simple" (legacy) and "advanced" (magentic) modes. Auto-detects mode based on API key availability. - -**Pattern**: Lazy import for optional dependencies (`_get_magentic_orchestrator_class()`). Handles `ImportError` gracefully with clear error messages. - -**Mode Detection**: `_determine_mode()` checks explicit mode or auto-detects: "advanced" if `settings.has_openai_key`, else "simple". Maps "magentic" → "advanced". - -**Function Signature**: `create_orchestrator(search_handler, judge_handler, config, mode) -> Any`. Simple mode requires handlers. Advanced mode uses MagenticOrchestrator. - -**Error Handling**: Raise `ValueError` with clear messages if requirements not met. Log mode selection with structlog. - ---- - -## src/orchestrator_hierarchical.py Rules - -**Purpose**: Hierarchical orchestrator using middleware and sub-teams. Adapts Magentic ChatAgent to SubIterationTeam protocol. - -**Pattern**: Uses `SubIterationMiddleware` with `ResearchTeam` and `LLMSubIterationJudge`. Event-driven via callback queue. - -**State Initialization**: Initialize embedding service with graceful fallback. Use `init_magentic_state()` (deprecated, but kept for compatibility). - -**Event Streaming**: Uses `asyncio.Queue` for event coordination. Yields `AgentEvent` objects. Handles event callback pattern with `asyncio.wait()`. - -**Error Handling**: Log errors with context. Yield error events. Process remaining events after task completion. - ---- - -## src/orchestrator_magentic.py Rules - -**Purpose**: Magentic-based orchestrator using ChatAgent pattern. Each agent has internal LLM. Manager orchestrates agents. - -**Pattern**: Uses `MagenticBuilder` with participants (searcher, hypothesizer, judge, reporter). Manager uses `OpenAIChatClient`. Workflow built in `_build_workflow()`. - -**Event Processing**: `_process_event()` converts Magentic events to `AgentEvent`. Handles: `MagenticOrchestratorMessageEvent`, `MagenticAgentMessageEvent`, `MagenticFinalResultEvent`, `MagenticAgentDeltaEvent`, `WorkflowOutputEvent`. - -**Text Extraction**: `_extract_text()` defensively extracts text from messages. Priority: `.content` → `.text` → `str(message)`. Handles buggy message objects. - -**State Initialization**: Initialize embedding service with graceful fallback. Use `init_magentic_state()` (deprecated). - -**Requirements**: Must call `check_magentic_requirements()` in `__init__`. Requires `agent-framework-core` and OpenAI API key. - -**Event Types**: Maps agent names to event types: "search" → "search_complete", "judge" → "judge_complete", "hypothes" → "hypothesizing", "report" → "synthesizing". - ---- - -## src/agent_factory/ - Factory Rules - -**Pattern**: Factory functions for creating agents and handlers. Lazy initialization for optional dependencies. Support OpenAI/Anthropic/HF Inference. - -**Judges**: `create_judge_handler()` creates `JudgeHandler` with structured output (`JudgeAssessment`). Supports `MockJudgeHandler`, `HFInferenceJudgeHandler` as fallbacks. - -**Agents**: Factory functions in `agents.py` for all Pydantic AI agents. Pattern: `create_agent_name(model: Any | None = None) -> AgentName`. Use `get_model()` if model not provided. - -**Graph Builder**: `graph_builder.py` contains utilities for building research graphs. Supports iterative and deep research graph construction. - -**Error Handling**: Raise `ConfigurationError` if required API keys missing. Log agent creation. Handle import errors gracefully. - ---- - -## src/prompts/ - Prompt Rules - -**Pattern**: System prompts stored as module-level constants. Include date injection: `datetime.now().strftime("%Y-%m-%d")`. Format evidence with truncation (1500 chars per item). - -**Judge Prompts**: In `judge.py`. Handle empty evidence case separately. Always request structured JSON output. - -**Hypothesis Prompts**: In `hypothesis.py`. Use diverse evidence selection (MMR algorithm). Sentence-aware truncation. - -**Report Prompts**: In `report.py`. Include full citation details. Use diverse evidence selection (n=20). Emphasize citation validation rules. - ---- - -## Testing Rules - -**Structure**: Unit tests in `tests/unit/` (mocked, fast). Integration tests in `tests/integration/` (real APIs, marked `@pytest.mark.integration`). - -**Mocking**: Use `respx` for httpx mocking. Use `pytest-mock` for general mocking. Mock LLM calls in unit tests (use `MockJudgeHandler`). - -**Fixtures**: Common fixtures in `tests/conftest.py`: `mock_httpx_client`, `mock_llm_response`. - -**Coverage**: Aim for >80% coverage. Test error handling, edge cases, and integration paths. - ---- - -## File-Specific Agent Rules - -**knowledge_gap.py**: Outputs `KnowledgeGapOutput`. System prompt evaluates research completeness. Handles conversation history. Returns fallback on error. - -**writer.py**: Returns markdown string. System prompt includes citation format examples. Validates inputs. Truncates long findings. Retry logic for transient failures. - -**long_writer.py**: Uses `ReportDraft` input/output. Writes sections iteratively. Reformats references (deduplicates, renumbers). Reformats section headings. - -**proofreader.py**: Takes `ReportDraft`, returns polished markdown. Removes duplicates. Adds summary. Preserves references. - -**tool_selector.py**: Outputs `AgentSelectionPlan`. System prompt lists available agents (WebSearchAgent, SiteCrawlerAgent, RAGAgent). Guidelines for when to use each. - -**thinking.py**: Returns observation string. Generates observations from conversation history. Uses query and background context. - -**input_parser.py**: Outputs `ParsedQuery`. Detects research mode (iterative/deep). Extracts entities and research questions. Improves/refines query. - - - diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md deleted file mode 100644 index 9a93fd1f812752141d49e4e27efee17405ed9563..0000000000000000000000000000000000000000 --- a/CONTRIBUTING.md +++ /dev/null @@ -1,494 +0,0 @@ -# Contributing to The DETERMINATOR - -Thank you for your interest in contributing to The DETERMINATOR! This guide will help you get started. - -## Table of Contents - -- [Git Workflow](#git-workflow) -- [Getting Started](#getting-started) -- [Development Commands](#development-commands) -- [MCP Integration](#mcp-integration) -- [Common Pitfalls](#common-pitfalls) -- [Key Principles](#key-principles) -- [Pull Request Process](#pull-request-process) - -> **Note**: Additional sections (Code Style, Error Handling, Testing, Implementation Patterns, Code Quality, and Prompt Engineering) are available as separate pages in the [documentation](https://deepcritical.github.io/GradioDemo/contributing/). -> **Note on Project Names**: "The DETERMINATOR" is the product name, "DeepCritical" is the organization/project name, and "determinator" is the Python package name. - -## Repository Information - -- **GitHub Repository**: [`DeepCritical/GradioDemo`](https://github.com/DeepCritical/GradioDemo) (source of truth, PRs, code review) -- **HuggingFace Space**: [`DataQuests/DeepCritical`](https://huggingface.co/spaces/DataQuests/DeepCritical) (deployment/demo) -- **Package Name**: `determinator` (Python package name in `pyproject.toml`) - -## Git Workflow - -- `main`: Production-ready (GitHub) -- `dev`: Development integration (GitHub) -- Use feature branches: `yourname-dev` -- **NEVER** push directly to `main` or `dev` on HuggingFace -- GitHub is source of truth; HuggingFace is for deployment - -### Dual Repository Setup - -This project uses a dual repository setup: - -- **GitHub (`DeepCritical/GradioDemo`)**: Source of truth for code, PRs, and code review -- **HuggingFace (`DataQuests/DeepCritical`)**: Deployment target for the Gradio demo - -#### Remote Configuration - -When cloning, set up remotes as follows: - -```bash -# Clone from GitHub -git clone https://github.com/DeepCritical/GradioDemo.git -cd GradioDemo - -# Add HuggingFace remote (optional, for deployment) -git remote add huggingface-upstream https://huggingface.co/spaces/DataQuests/DeepCritical -``` - -**Important**: Never push directly to `main` or `dev` on HuggingFace. Always work through GitHub PRs. GitHub is the source of truth; HuggingFace is for deployment/demo only. - -## Getting Started - -1. **Fork the repository** on GitHub: [`DeepCritical/GradioDemo`](https://github.com/DeepCritical/GradioDemo) -2. **Clone your fork**: - - ```bash - git clone https://github.com/yourusername/GradioDemo.git - cd GradioDemo - ``` - -3. **Install dependencies**: - - ```bash - uv sync --all-extras - uv run pre-commit install - ``` - -4. **Create a feature branch**: - - ```bash - git checkout -b yourname-feature-name - ``` - -5. **Make your changes** following the guidelines below -6. **Run checks**: - - ```bash - uv run ruff check src tests - uv run mypy src - uv run pytest --cov=src --cov-report=term-missing tests/unit/ -v -m "not openai" -p no:logfire - ``` - -7. **Commit and push**: - - ```bash - git commit -m "Description of changes" - git push origin yourname-feature-name - ``` - -8. **Create a pull request** on GitHub - -## Package Manager - -This project uses [`uv`](https://github.com/astral-sh/uv) as the package manager. All commands should be prefixed with `uv run` to ensure they run in the correct environment. - -### Installation - -```bash -# Install uv if you haven't already (recommended: standalone installer) -# Unix/macOS/Linux: -curl -LsSf https://astral.sh/uv/install.sh | sh - -# Windows (PowerShell): -powershell -ExecutionPolicy ByPass -c "irm https://astral.sh/uv/install.ps1 | iex" - -# Alternative: pipx install uv -# Or: pip install uv - -# Sync all dependencies including dev extras -uv sync --all-extras - -# Install pre-commit hooks -uv run pre-commit install -``` - -## Development Commands - -```bash -# Installation -uv sync --all-extras # Install all dependencies including dev -uv run pre-commit install # Install pre-commit hooks - -# Code Quality Checks (run all before committing) -uv run ruff check src tests # Lint with ruff -uv run ruff format src tests # Format with ruff -uv run mypy src # Type checking -uv run pytest --cov=src --cov-report=term-missing tests/unit/ -v -m "not openai" -p no:logfire # Tests with coverage - -# Testing Commands -uv run pytest tests/unit/ -v -m "not openai" -p no:logfire # Run unit tests (excludes OpenAI tests) -uv run pytest tests/ -v -m "huggingface" -p no:logfire # Run HuggingFace tests -uv run pytest tests/ -v -p no:logfire # Run all tests -uv run pytest --cov=src --cov-report=term-missing tests/unit/ -v -m "not openai" -p no:logfire # Tests with terminal coverage -uv run pytest --cov=src --cov-report=html -p no:logfire # Generate HTML coverage report (opens htmlcov/index.html) - -# Documentation Commands -uv run mkdocs build # Build documentation -uv run mkdocs serve # Serve documentation locally (http://127.0.0.1:8000) -``` - -### Test Markers - -The project uses pytest markers to categorize tests. See [Testing Guidelines](docs/contributing/testing.md) for details: - -- `unit`: Unit tests (mocked, fast) -- `integration`: Integration tests (real APIs) -- `slow`: Slow tests -- `openai`: Tests requiring OpenAI API key -- `huggingface`: Tests requiring HuggingFace API key -- `embedding_provider`: Tests requiring API-based embedding providers -- `local_embeddings`: Tests using local embeddings - -**Note**: The `-p no:logfire` flag disables the logfire plugin to avoid conflicts during testing. - -## Code Style & Conventions - -### Type Safety - -- **ALWAYS** use type hints for all function parameters and return types -- Use `mypy --strict` compliance (no `Any` unless absolutely necessary) -- Use `TYPE_CHECKING` imports for circular dependencies: - - -[TYPE_CHECKING Import Pattern](../src/utils/citation_validator.py) start_line:8 end_line:11 - - -### Pydantic Models - -- All data exchange uses Pydantic models (`src/utils/models.py`) -- Models are frozen (`model_config = {"frozen": True}`) for immutability -- Use `Field()` with descriptions for all model fields -- Validate with `ge=`, `le=`, `min_length=`, `max_length=` constraints - -### Async Patterns - -- **ALL** I/O operations must be async (`async def`, `await`) -- Use `asyncio.gather()` for parallel operations -- CPU-bound work (embeddings, parsing) must use `run_in_executor()`: - -```python -loop = asyncio.get_running_loop() -result = await loop.run_in_executor(None, cpu_bound_function, args) -``` - -- Never block the event loop with synchronous I/O - -### Linting - -- Ruff with 100-char line length -- Ignore rules documented in `pyproject.toml`: - - `PLR0913`: Too many arguments (agents need many params) - - `PLR0912`: Too many branches (complex orchestrator logic) - - `PLR0911`: Too many return statements (complex agent logic) - - `PLR2004`: Magic values (statistical constants) - - `PLW0603`: Global statement (singleton pattern) - - `PLC0415`: Lazy imports for optional dependencies - -### Pre-commit - -- Pre-commit hooks run automatically on commit -- Must pass: lint + typecheck + test-cov -- Install hooks with: `uv run pre-commit install` -- Note: `uv sync --all-extras` installs the pre-commit package, but you must run `uv run pre-commit install` separately to set up the git hooks - -## Error Handling & Logging - -### Exception Hierarchy - -Use custom exception hierarchy (`src/utils/exceptions.py`): - - -[Exception Hierarchy](../src/utils/exceptions.py) start_line:4 end_line:31 - - -### Error Handling Rules - -- Always chain exceptions: `raise SearchError(...) from e` -- Log errors with context using `structlog`: - -```python -logger.error("Operation failed", error=str(e), context=value) -``` - -- Never silently swallow exceptions -- Provide actionable error messages - -### Logging - -- Use `structlog` for all logging (NOT `print` or `logging`) -- Import: `import structlog; logger = structlog.get_logger()` -- Log with structured data: `logger.info("event", key=value)` -- Use appropriate levels: DEBUG, INFO, WARNING, ERROR - -### Logging Examples - -```python -logger.info("Starting search", query=query, tools=[t.name for t in tools]) -logger.warning("Search tool failed", tool=tool.name, error=str(result)) -logger.error("Assessment failed", error=str(e)) -``` - -### Error Chaining - -Always preserve exception context: - -```python -try: - result = await api_call() -except httpx.HTTPError as e: - raise SearchError(f"API call failed: {e}") from e -``` - -## Testing Requirements - -### Test Structure - -- Unit tests in `tests/unit/` (mocked, fast) -- Integration tests in `tests/integration/` (real APIs, marked `@pytest.mark.integration`) -- Use markers: `unit`, `integration`, `slow` - -### Mocking - -- Use `respx` for httpx mocking -- Use `pytest-mock` for general mocking -- Mock LLM calls in unit tests (use `MockJudgeHandler`) -- Fixtures in `tests/conftest.py`: `mock_httpx_client`, `mock_llm_response` - -### TDD Workflow - -1. Write failing test in `tests/unit/` -2. Implement in `src/` -3. Ensure test passes -4. Run checks: `uv run ruff check src tests && uv run mypy src && uv run pytest --cov=src --cov-report=term-missing tests/unit/ -v -m "not openai" -p no:logfire` - -### Test Examples - -```python -@pytest.mark.unit -async def test_pubmed_search(mock_httpx_client): - tool = PubMedTool() - results = await tool.search("metformin", max_results=5) - assert len(results) > 0 - assert all(isinstance(r, Evidence) for r in results) - -@pytest.mark.integration -async def test_real_pubmed_search(): - tool = PubMedTool() - results = await tool.search("metformin", max_results=3) - assert len(results) <= 3 -``` - -### Test Coverage - -- Run `uv run pytest --cov=src --cov-report=term-missing tests/unit/ -v -m "not openai" -p no:logfire` for coverage report -- Run `uv run pytest --cov=src --cov-report=html -p no:logfire` for HTML coverage report (opens `htmlcov/index.html`) -- Aim for >80% coverage on critical paths -- Exclude: `__init__.py`, `TYPE_CHECKING` blocks - -## Implementation Patterns - -### Search Tools - -All tools implement `SearchTool` protocol (`src/tools/base.py`): - -- Must have `name` property -- Must implement `async def search(query, max_results) -> list[Evidence]` -- Use `@retry` decorator from tenacity for resilience -- Rate limiting: Implement `_rate_limit()` for APIs with limits (e.g., PubMed) -- Error handling: Raise `SearchError` or `RateLimitError` on failures - -Example pattern: - -```python -class MySearchTool: - @property - def name(self) -> str: - return "mytool" - - @retry(stop=stop_after_attempt(3), wait=wait_exponential(...)) - async def search(self, query: str, max_results: int = 10) -> list[Evidence]: - # Implementation - return evidence_list -``` - -### Judge Handlers - -- Implement `JudgeHandlerProtocol` (`async def assess(question, evidence) -> JudgeAssessment`) -- Use pydantic-ai `Agent` with `output_type=JudgeAssessment` -- System prompts in `src/prompts/judge.py` -- Support fallback handlers: `MockJudgeHandler`, `HFInferenceJudgeHandler` -- Always return valid `JudgeAssessment` (never raise exceptions) - -### Agent Factory Pattern - -- Use factory functions for creating agents (`src/agent_factory/`) -- Lazy initialization for optional dependencies (e.g., embeddings, Modal) -- Check requirements before initialization: - - -[Check Magentic Requirements](../src/utils/llm_factory.py) start_line:152 end_line:170 - - -### State Management - -- **Magentic Mode**: Use `ContextVar` for thread-safe state (`src/agents/state.py`) -- **Simple Mode**: Pass state via function parameters -- Never use global mutable state (except singletons via `@lru_cache`) - -### Singleton Pattern - -Use `@lru_cache(maxsize=1)` for singletons: - - -[Singleton Pattern Example](../src/services/statistical_analyzer.py) start_line:252 end_line:255 - - -- Lazy initialization to avoid requiring dependencies at import time - -## Code Quality & Documentation - -### Docstrings - -- Google-style docstrings for all public functions -- Include Args, Returns, Raises sections -- Use type hints in docstrings only if needed for clarity - -Example: - - -[Search Method Docstring Example](../src/tools/pubmed.py) start_line:51 end_line:58 - - -### Code Comments - -- Explain WHY, not WHAT -- Document non-obvious patterns (e.g., why `requests` not `httpx` for ClinicalTrials) -- Mark critical sections: `# CRITICAL: ...` -- Document rate limiting rationale -- Explain async patterns when non-obvious - -## Prompt Engineering & Citation Validation - -### Judge Prompts - -- System prompt in `src/prompts/judge.py` -- Format evidence with truncation (1500 chars per item) -- Handle empty evidence case separately -- Always request structured JSON output -- Use `format_user_prompt()` and `format_empty_evidence_prompt()` helpers - -### Hypothesis Prompts - -- Use diverse evidence selection (MMR algorithm) -- Sentence-aware truncation (`truncate_at_sentence()`) -- Format: Drug → Target → Pathway → Effect -- System prompt emphasizes mechanistic reasoning -- Use `format_hypothesis_prompt()` with embeddings for diversity - -### Report Prompts - -- Include full citation details for validation -- Use diverse evidence selection (n=20) -- **CRITICAL**: Emphasize citation validation rules -- Format hypotheses with support/contradiction counts -- System prompt includes explicit JSON structure requirements - -### Citation Validation - -- **ALWAYS** validate references before returning reports -- Use `validate_references()` from `src/utils/citation_validator.py` -- Remove hallucinated citations (URLs not in evidence) -- Log warnings for removed citations -- Never trust LLM-generated citations without validation - -### Citation Validation Rules - -1. Every reference URL must EXACTLY match a provided evidence URL -2. Do NOT invent, fabricate, or hallucinate any references -3. Do NOT modify paper titles, authors, dates, or URLs -4. If unsure about a citation, OMIT it rather than guess -5. Copy URLs exactly as provided - do not create similar-looking URLs - -### Evidence Selection - -- Use `select_diverse_evidence()` for MMR-based selection -- Balance relevance vs diversity (lambda=0.7 default) -- Sentence-aware truncation preserves meaning -- Limit evidence per prompt to avoid context overflow - -## MCP Integration - -### MCP Tools - -- Functions in `src/mcp_tools.py` for Claude Desktop -- Full type hints required -- Google-style docstrings with Args/Returns sections -- Formatted string returns (markdown) - -### Gradio MCP Server - -- Enable with `mcp_server=True` in `demo.launch()` -- Endpoint: `/gradio_api/mcp/` -- Use `ssr_mode=False` to fix hydration issues in HF Spaces - -## Common Pitfalls - -1. **Blocking the event loop**: Never use sync I/O in async functions -2. **Missing type hints**: All functions must have complete type annotations -3. **Hallucinated citations**: Always validate references -4. **Global mutable state**: Use ContextVar or pass via parameters -5. **Import errors**: Lazy-load optional dependencies (magentic, modal, embeddings) -6. **Rate limiting**: Always implement for external APIs -7. **Error chaining**: Always use `from e` when raising exceptions - -## Key Principles - -1. **Type Safety First**: All code must pass `mypy --strict` -2. **Async Everything**: All I/O must be async -3. **Test-Driven**: Write tests before implementation -4. **No Hallucinations**: Validate all citations -5. **Graceful Degradation**: Support free tier (HF Inference) when no API keys -6. **Lazy Loading**: Don't require optional dependencies at import time -7. **Structured Logging**: Use structlog, never print() -8. **Error Chaining**: Always preserve exception context - -## Pull Request Process - -1. Ensure all checks pass: `uv run ruff check src tests && uv run mypy src && uv run pytest --cov=src --cov-report=term-missing tests/unit/ -v -m "not openai" -p no:logfire` -2. Update documentation if needed -3. Add tests for new features -4. Update CHANGELOG if applicable -5. Request review from maintainers -6. Address review feedback -7. Wait for approval before merging - -## Project Structure - -- `src/`: Main source code -- `tests/`: Test files (`unit/` and `integration/`) -- `docs/`: Documentation source files (MkDocs) -- `examples/`: Example usage scripts -- `pyproject.toml`: Project configuration and dependencies -- `.pre-commit-config.yaml`: Pre-commit hook configuration - -## Questions? - -- Open an issue on [GitHub](https://github.com/DeepCritical/GradioDemo) -- Check existing [documentation](https://deepcritical.github.io/GradioDemo/) -- Review code examples in the codebase - -Thank you for contributing to The DETERMINATOR! diff --git a/Dockerfile b/Dockerfile deleted file mode 100644 index 9d6fc14dce9d1bdbc102a1479304490324313167..0000000000000000000000000000000000000000 --- a/Dockerfile +++ /dev/null @@ -1,52 +0,0 @@ -# Dockerfile for DeepCritical -FROM python:3.11-slim - -# Set working directory -WORKDIR /app - -# Install system dependencies (curl needed for HEALTHCHECK) -RUN apt-get update && apt-get install -y \ - git \ - curl \ - && rm -rf /var/lib/apt/lists/* - -# Install uv -RUN pip install uv==0.5.4 - -# Copy project files -COPY pyproject.toml . -COPY uv.lock . -COPY src/ src/ -COPY README.md . - -# Install runtime dependencies only (no dev/test tools) -RUN uv sync --frozen --no-dev --extra embeddings --extra magentic - -# Create non-root user BEFORE downloading models -RUN useradd --create-home --shell /bin/bash appuser - -# Set cache directory for HuggingFace models (must be writable by appuser) -ENV HF_HOME=/app/.cache -ENV TRANSFORMERS_CACHE=/app/.cache - -# Create cache dir with correct ownership -RUN mkdir -p /app/.cache && chown -R appuser:appuser /app/.cache - -# Pre-download the embedding model during build (as appuser to set correct ownership) -USER appuser -RUN uv run python -c "from sentence_transformers import SentenceTransformer; SentenceTransformer('all-MiniLM-L6-v2')" - -# Expose port -EXPOSE 7860 - -# Health check -HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \ - CMD curl -f http://localhost:7860/ || exit 1 - -# Set environment variables -ENV GRADIO_SERVER_NAME=0.0.0.0 -ENV GRADIO_SERVER_PORT=7860 -ENV PYTHONPATH=/app - -# Run the app -CMD ["uv", "run", "python", "-m", "src.app"] diff --git a/LICENSE.md b/LICENSE.md deleted file mode 100644 index a1f9be9c2733fb22fc43dfc4e5f23c62dbfb02ad..0000000000000000000000000000000000000000 --- a/LICENSE.md +++ /dev/null @@ -1,25 +0,0 @@ -# License - -DeepCritical is licensed under the MIT License. - -## MIT License - -Copyright (c) 2024 DeepCritical Team - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/README.md b/README.md index a4c0e90eae994942af1b1ea0f5cdfe967a29b8f5..e4c9a9698223d89be48e808d1f511cc4a2182141 100644 --- a/README.md +++ b/README.md @@ -1,63 +1,15 @@ --- -title: The DETERMINATOR -emoji: 🐉 -colorFrom: red -colorTo: yellow +title: DeepCritical +emoji: 📈 +colorFrom: blue +colorTo: purple sdk: gradio -sdk_version: "6.0.1" -python_version: "3.11" +sdk_version: 6.0.0 app_file: src/app.py -hf_oauth: true -hf_oauth_expiration_minutes: 480 -hf_oauth_scopes: - # Required for HuggingFace Inference API (includes all third-party providers) - # This scope grants access to: - # - HuggingFace's own Inference API - # - Third-party inference providers (nebius, together, scaleway, hyperbolic, novita, nscale, sambanova, ovh, fireworks, etc.) - # - All models available through the Inference Providers API - - inference-api - # Optional: Uncomment if you need to access user's billing information - # - read-billing -pinned: true +pinned: false license: mit -tags: - - mcp-in-action-track-enterprise - - mcp-hackathon - - deep-research - - biomedical-ai - - pydantic-ai - - llamaindex - - modal - - building-mcp-track-enterprise - - building-mcp-track-consumer - - mcp-in-action-track-enterprise - - mcp-in-action-track-consumer - - building-mcp-track-modal - - building-mcp-track-blaxel - - building-mcp-track-llama-index - - building-mcp-track-HUGGINGFACE +short_description: Deep Search for Critical Research [BigData] -> [Actionable] --- -> [!IMPORTANT] -> **You are reading the Gradio Demo README!** -> -> - 📚 **Documentation**: See our [technical documentation](https://deepcritical.github.io/GradioDemo/) for detailed information -> - 📖 **Complete README**: Check out the [Github README](.github/README.md) for setup, configuration, and contribution guidelines -> - ⚠️**This README is for our Gradio Demo Only !** +### DeepCritical -
- -[![GitHub](https://img.shields.io/github/stars/DeepCritical/GradioDemo?style=for-the-badge&logo=github&logoColor=white&label=GitHub&labelColor=181717&color=181717)](https://github.com/DeepCritical/GradioDemo) -[![Documentation](https://img.shields.io/badge/Docs-0080FF?style=for-the-badge&logo=readthedocs&logoColor=white&labelColor=0080FF&color=0080FF)](deepcritical.github.io/GradioDemo/) -[![Demo](https://img.shields.io/badge/Demo-FFD21E?style=for-the-badge&logo=huggingface&logoColor=white&labelColor=FFD21E&color=FFD21E)](https://huggingface.co/spaces/DataQuests/DeepCritical) -[![codecov](https://codecov.io/gh/DeepCritical/GradioDemo/graph/badge.svg?token=B1f05RCGpz)](https://codecov.io/gh/DeepCritical/GradioDemo) -[![Join us on Discord](https://img.shields.io/discord/1109943800132010065?label=Discord&logo=discord&style=flat-square)](https://discord.gg/qdfnvSPcqP) - - -
- -# The DETERMINATOR - -## About - -The DETERMINATOR is a powerful generalist deep research agent system that stops at nothing until finding precise answers to complex questions. It uses iterative search-and-judge loops to comprehensively investigate any research question from any domain. diff --git a/deployments/README.md b/deployments/README.md deleted file mode 100644 index 3a4f4a4d8d7a3cccf6beacd56ce135117f3ad07a..0000000000000000000000000000000000000000 --- a/deployments/README.md +++ /dev/null @@ -1,46 +0,0 @@ -# Deployments - -This directory contains infrastructure deployment scripts for DeepCritical services. - -## Modal Deployments - -### TTS Service (`modal_tts.py`) - -Deploys the Kokoro TTS (Text-to-Speech) function to Modal's GPU infrastructure. - -**Deploy:** -```bash -modal deploy deployments/modal_tts.py -``` - -**Features:** -- Kokoro 82M TTS model -- GPU-accelerated (T4) -- Voice options: af_heart, af_bella, am_michael, etc. -- Configurable speech speed - -**Requirements:** -- Modal account and credentials (`MODAL_TOKEN_ID`, `MODAL_TOKEN_SECRET` in `.env`) -- GPU quota on Modal - -**After Deployment:** -The function will be available at: -- App: `deepcritical-tts` -- Function: `kokoro_tts_function` - -The main application (`src/services/tts_modal.py`) will call this deployed function. - ---- - -## Adding New Deployments - -When adding new deployment scripts: - -1. Create a new file: `deployments/.py` -2. Use Modal's app pattern: - ```python - import modal - app = modal.App("deepcritical-") - ``` -3. Document in this README -4. Test deployment: `modal deploy deployments/.py` diff --git a/deployments/modal_tts.py b/deployments/modal_tts.py deleted file mode 100644 index 9987a339f6b89eb63cd512eb594dd6a6d488f42a..0000000000000000000000000000000000000000 --- a/deployments/modal_tts.py +++ /dev/null @@ -1,97 +0,0 @@ -"""Deploy Kokoro TTS function to Modal. - -This script deploys the TTS function to Modal so it can be called -from the main DeepCritical application. - -Usage: - modal deploy deploy_modal_tts.py - -After deployment, the function will be available at: - App: deepcritical-tts - Function: kokoro_tts_function -""" - -import modal -import numpy as np - -# Create Modal app -app = modal.App("deepcritical-tts") - -# Define Kokoro TTS dependencies -KOKORO_DEPENDENCIES = [ - "torch>=2.0.0", - "transformers>=4.30.0", - "numpy<2.0", -] - -# Create Modal image with Kokoro -tts_image = ( - modal.Image.debian_slim(python_version="3.11") - .apt_install("git") # Install git first for pip install from github - .pip_install(*KOKORO_DEPENDENCIES) - .pip_install("git+https://github.com/hexgrad/kokoro.git") -) - - -@app.function( - image=tts_image, - gpu="T4", - timeout=60, -) -def kokoro_tts_function(text: str, voice: str, speed: float) -> tuple[int, np.ndarray]: - """Modal GPU function for Kokoro TTS. - - This function runs on Modal's GPU infrastructure. - Based on: https://huggingface.co/spaces/hexgrad/Kokoro-TTS - - Args: - text: Text to synthesize - voice: Voice ID (e.g., af_heart, af_bella, am_michael) - speed: Speech speed multiplier (0.5-2.0) - - Returns: - Tuple of (sample_rate, audio_array) - """ - import numpy as np - - try: - import torch - from kokoro import KModel, KPipeline - - # Initialize model (cached on GPU) - model = KModel().to("cuda").eval() - pipeline = KPipeline(lang_code=voice[0]) - pack = pipeline.load_voice(voice) - - # Generate audio - accumulate all chunks - audio_chunks = [] - for _, ps, _ in pipeline(text, voice, speed): - ref_s = pack[len(ps) - 1] - audio = model(ps, ref_s, speed) - audio_chunks.append(audio.numpy()) - - # Concatenate all audio chunks - if audio_chunks: - full_audio = np.concatenate(audio_chunks) - return (24000, full_audio) - - # If no audio generated, return empty - return (24000, np.zeros(1, dtype=np.float32)) - - except ImportError as e: - raise RuntimeError( - f"Kokoro not installed: {e}. " - "Install with: pip install git+https://github.com/hexgrad/kokoro.git" - ) from e - except Exception as e: - raise RuntimeError(f"TTS synthesis failed: {e}") from e - - -# Optional: Add a test entrypoint -@app.local_entrypoint() -def test(): - """Test the TTS function.""" - print("Testing Modal TTS function...") - sample_rate, audio = kokoro_tts_function.remote("Hello, this is a test.", "af_heart", 1.0) - print(f"Generated audio: {sample_rate}Hz, shape={audio.shape}") - print("✓ TTS function works!") diff --git a/dev/.cursorrules b/dev/.cursorrules deleted file mode 100644 index 1f295e800902da3888e751d3b615b39c75aa2f19..0000000000000000000000000000000000000000 --- a/dev/.cursorrules +++ /dev/null @@ -1,241 +0,0 @@ -# DeepCritical Project - Cursor Rules - -## Project-Wide Rules - -**Architecture**: Multi-agent research system using Pydantic AI for agent orchestration, supporting iterative and deep research patterns. Uses middleware for state management, budget tracking, and workflow coordination. - -**Type Safety**: ALWAYS use complete type hints. All functions must have parameter and return type annotations. Use `mypy --strict` compliance. Use `TYPE_CHECKING` imports for circular dependencies: `from typing import TYPE_CHECKING; if TYPE_CHECKING: from src.services.embeddings import EmbeddingService` - -**Async Patterns**: ALL I/O operations must be async (`async def`, `await`). Use `asyncio.gather()` for parallel operations. CPU-bound work must use `run_in_executor()`: `loop = asyncio.get_running_loop(); result = await loop.run_in_executor(None, cpu_bound_function, args)`. Never block the event loop. - -**Error Handling**: Use custom exceptions from `src/utils/exceptions.py`: `DeepCriticalError`, `SearchError`, `RateLimitError`, `JudgeError`, `ConfigurationError`. Always chain exceptions: `raise SearchError(...) from e`. Log with structlog: `logger.error("Operation failed", error=str(e), context=value)`. - -**Logging**: Use `structlog` for ALL logging (NOT `print` or `logging`). Import: `import structlog; logger = structlog.get_logger()`. Log with structured data: `logger.info("event", key=value)`. Use appropriate levels: DEBUG, INFO, WARNING, ERROR. - -**Pydantic Models**: All data exchange uses Pydantic models from `src/utils/models.py`. Models are frozen (`model_config = {"frozen": True}`) for immutability. Use `Field()` with descriptions. Validate with `ge=`, `le=`, `min_length=`, `max_length=` constraints. - -**Code Style**: Ruff with 100-char line length. Ignore rules: `PLR0913` (too many arguments), `PLR0912` (too many branches), `PLR0911` (too many returns), `PLR2004` (magic values), `PLW0603` (global statement), `PLC0415` (lazy imports). - -**Docstrings**: Google-style docstrings for all public functions. Include Args, Returns, Raises sections. Use type hints in docstrings only if needed for clarity. - -**Testing**: Unit tests in `tests/unit/` (mocked, fast). Integration tests in `tests/integration/` (real APIs, marked `@pytest.mark.integration`). Use `respx` for httpx mocking, `pytest-mock` for general mocking. - -**State Management**: Use `ContextVar` in middleware for thread-safe isolation. Never use global mutable state (except singletons via `@lru_cache`). Use `WorkflowState` from `src/middleware/state_machine.py` for workflow state. - -**Citation Validation**: ALWAYS validate references before returning reports. Use `validate_references()` from `src/utils/citation_validator.py`. Remove hallucinated citations. Log warnings for removed citations. - ---- - -## src/agents/ - Agent Implementation Rules - -**Pattern**: All agents use Pydantic AI `Agent` class. Agents have structured output types (Pydantic models) or return strings. Use factory functions in `src/agent_factory/agents.py` for creation. - -**Agent Structure**: -- System prompt as module-level constant (with date injection: `datetime.now().strftime("%Y-%m-%d")`) -- Agent class with `__init__(model: Any | None = None)` -- Main method (e.g., `async def evaluate()`, `async def write_report()`) -- Factory function: `def create_agent_name(model: Any | None = None) -> AgentName` - -**Model Initialization**: Use `get_model()` from `src/agent_factory/judges.py` if no model provided. Support OpenAI/Anthropic/HF Inference via settings. - -**Error Handling**: Return fallback values (e.g., `KnowledgeGapOutput(research_complete=False, outstanding_gaps=[...])`) on failure. Log errors with context. Use retry logic (3 retries) in Pydantic AI Agent initialization. - -**Input Validation**: Validate query/inputs are not empty. Truncate very long inputs with warnings. Handle None values gracefully. - -**Output Types**: Use structured output types from `src/utils/models.py` (e.g., `KnowledgeGapOutput`, `AgentSelectionPlan`, `ReportDraft`). For text output (writer agents), return `str` directly. - -**Agent-Specific Rules**: -- `knowledge_gap.py`: Outputs `KnowledgeGapOutput`. Evaluates research completeness. -- `tool_selector.py`: Outputs `AgentSelectionPlan`. Selects tools (RAG/web/database). -- `writer.py`: Returns markdown string. Includes citations in numbered format. -- `long_writer.py`: Uses `ReportDraft` input/output. Handles section-by-section writing. -- `proofreader.py`: Takes `ReportDraft`, returns polished markdown. -- `thinking.py`: Returns observation string from conversation history. -- `input_parser.py`: Outputs `ParsedQuery` with research mode detection. - ---- - -## src/tools/ - Search Tool Rules - -**Protocol**: All tools implement `SearchTool` protocol from `src/tools/base.py`: `name` property and `async def search(query, max_results) -> list[Evidence]`. - -**Rate Limiting**: Use `@retry` decorator from tenacity: `@retry(stop=stop_after_attempt(3), wait=wait_exponential(...))`. Implement `_rate_limit()` method for APIs with limits. Use shared rate limiters from `src/tools/rate_limiter.py`. - -**Error Handling**: Raise `SearchError` or `RateLimitError` on failures. Handle HTTP errors (429, 500, timeout). Return empty list on non-critical errors (log warning). - -**Query Preprocessing**: Use `preprocess_query()` from `src/tools/query_utils.py` to remove noise and expand synonyms. - -**Evidence Conversion**: Convert API responses to `Evidence` objects with `Citation`. Extract metadata (title, url, date, authors). Set relevance scores (0.0-1.0). Handle missing fields gracefully. - -**Tool-Specific Rules**: -- `pubmed.py`: Use NCBI E-utilities (ESearch → EFetch). Rate limit: 0.34s between requests. Parse XML with `xmltodict`. Handle single vs. multiple articles. -- `clinicaltrials.py`: Use `requests` library (NOT httpx - WAF blocks httpx). Run in thread pool: `await asyncio.to_thread(requests.get, ...)`. Filter: Only interventional studies, active/completed. -- `europepmc.py`: Handle preprint markers: `[PREPRINT - Not peer-reviewed]`. Build URLs from DOI or PMID. -- `rag_tool.py`: Wraps `LlamaIndexRAGService`. Returns Evidence from RAG results. Handles ingestion. -- `search_handler.py`: Orchestrates parallel searches across multiple tools. Uses `asyncio.gather()` with `return_exceptions=True`. Aggregates results into `SearchResult`. - ---- - -## src/middleware/ - Middleware Rules - -**State Management**: Use `ContextVar` for thread-safe isolation. `WorkflowState` uses `ContextVar[WorkflowState | None]`. Initialize with `init_workflow_state(embedding_service)`. Access with `get_workflow_state()` (auto-initializes if missing). - -**WorkflowState**: Tracks `evidence: list[Evidence]`, `conversation: Conversation`, `embedding_service: Any`. Methods: `add_evidence()` (deduplicates by URL), `async search_related()` (semantic search). - -**WorkflowManager**: Manages parallel research loops. Methods: `add_loop()`, `run_loops_parallel()`, `update_loop_status()`, `sync_loop_evidence_to_state()`. Uses `asyncio.gather()` for parallel execution. Handles errors per loop (don't fail all if one fails). - -**BudgetTracker**: Tracks tokens, time, iterations per loop and globally. Methods: `create_budget()`, `add_tokens()`, `start_timer()`, `update_timer()`, `increment_iteration()`, `check_budget()`, `can_continue()`. Token estimation: `estimate_tokens(text)` (~4 chars per token), `estimate_llm_call_tokens(prompt, response)`. - -**Models**: All middleware models in `src/utils/models.py`. `IterationData`, `Conversation`, `ResearchLoop`, `BudgetStatus` are used by middleware. - ---- - -## src/orchestrator/ - Orchestration Rules - -**Research Flows**: Two patterns: `IterativeResearchFlow` (single loop) and `DeepResearchFlow` (plan → parallel loops → synthesis). Both support agent chains (`use_graph=False`) and graph execution (`use_graph=True`). - -**IterativeResearchFlow**: Pattern: Generate observations → Evaluate gaps → Select tools → Execute → Judge → Continue/Complete. Uses `KnowledgeGapAgent`, `ToolSelectorAgent`, `ThinkingAgent`, `WriterAgent`, `JudgeHandler`. Tracks iterations, time, budget. - -**DeepResearchFlow**: Pattern: Planner → Parallel iterative loops per section → Synthesizer. Uses `PlannerAgent`, `IterativeResearchFlow` (per section), `LongWriterAgent` or `ProofreaderAgent`. Uses `WorkflowManager` for parallel execution. - -**Graph Orchestrator**: Uses Pydantic AI Graphs (when available) or agent chains (fallback). Routes based on research mode (iterative/deep/auto). Streams `AgentEvent` objects for UI. - -**State Initialization**: Always call `init_workflow_state()` before running flows. Initialize `BudgetTracker` per loop. Use `WorkflowManager` for parallel coordination. - -**Event Streaming**: Yield `AgentEvent` objects during execution. Event types: "started", "search_complete", "judge_complete", "hypothesizing", "synthesizing", "complete", "error". Include iteration numbers and data payloads. - ---- - -## src/services/ - Service Rules - -**EmbeddingService**: Local sentence-transformers (NO API key required). All operations async-safe via `run_in_executor()`. ChromaDB for vector storage. Deduplication threshold: 0.85 (85% similarity = duplicate). - -**LlamaIndexRAGService**: Uses OpenAI embeddings (requires `OPENAI_API_KEY`). Methods: `ingest_evidence()`, `retrieve()`, `query()`. Returns documents with metadata (source, title, url, date, authors). Lazy initialization with graceful fallback. - -**StatisticalAnalyzer**: Generates Python code via LLM. Executes in Modal sandbox (secure, isolated). Library versions pinned in `SANDBOX_LIBRARIES` dict. Returns `AnalysisResult` with verdict (SUPPORTED/REFUTED/INCONCLUSIVE). - -**Singleton Pattern**: Use `@lru_cache(maxsize=1)` for singletons: `@lru_cache(maxsize=1); def get_service() -> Service: return Service()`. Lazy initialization to avoid requiring dependencies at import time. - ---- - -## src/utils/ - Utility Rules - -**Models**: All Pydantic models in `src/utils/models.py`. Use frozen models (`model_config = {"frozen": True}`) except where mutation needed. Use `Field()` with descriptions. Validate with constraints. - -**Config**: Settings via Pydantic Settings (`src/utils/config.py`). Load from `.env` automatically. Use `settings` singleton: `from src.utils.config import settings`. Validate API keys with properties: `has_openai_key`, `has_anthropic_key`. - -**Exceptions**: Custom exception hierarchy in `src/utils/exceptions.py`. Base: `DeepCriticalError`. Specific: `SearchError`, `RateLimitError`, `JudgeError`, `ConfigurationError`. Always chain exceptions. - -**LLM Factory**: Centralized LLM model creation in `src/utils/llm_factory.py`. Supports OpenAI, Anthropic, HF Inference. Use `get_model()` or factory functions. Check requirements before initialization. - -**Citation Validator**: Use `validate_references()` from `src/utils/citation_validator.py`. Removes hallucinated citations (URLs not in evidence). Logs warnings. Returns validated report string. - ---- - -## src/orchestrator_factory.py Rules - -**Purpose**: Factory for creating orchestrators. Supports "simple" (legacy) and "advanced" (magentic) modes. Auto-detects mode based on API key availability. - -**Pattern**: Lazy import for optional dependencies (`_get_magentic_orchestrator_class()`). Handles `ImportError` gracefully with clear error messages. - -**Mode Detection**: `_determine_mode()` checks explicit mode or auto-detects: "advanced" if `settings.has_openai_key`, else "simple". Maps "magentic" → "advanced". - -**Function Signature**: `create_orchestrator(search_handler, judge_handler, config, mode) -> Any`. Simple mode requires handlers. Advanced mode uses MagenticOrchestrator. - -**Error Handling**: Raise `ValueError` with clear messages if requirements not met. Log mode selection with structlog. - ---- - -## src/orchestrator_hierarchical.py Rules - -**Purpose**: Hierarchical orchestrator using middleware and sub-teams. Adapts Magentic ChatAgent to SubIterationTeam protocol. - -**Pattern**: Uses `SubIterationMiddleware` with `ResearchTeam` and `LLMSubIterationJudge`. Event-driven via callback queue. - -**State Initialization**: Initialize embedding service with graceful fallback. Use `init_magentic_state()` (deprecated, but kept for compatibility). - -**Event Streaming**: Uses `asyncio.Queue` for event coordination. Yields `AgentEvent` objects. Handles event callback pattern with `asyncio.wait()`. - -**Error Handling**: Log errors with context. Yield error events. Process remaining events after task completion. - ---- - -## src/orchestrator_magentic.py Rules - -**Purpose**: Magentic-based orchestrator using ChatAgent pattern. Each agent has internal LLM. Manager orchestrates agents. - -**Pattern**: Uses `MagenticBuilder` with participants (searcher, hypothesizer, judge, reporter). Manager uses `OpenAIChatClient`. Workflow built in `_build_workflow()`. - -**Event Processing**: `_process_event()` converts Magentic events to `AgentEvent`. Handles: `MagenticOrchestratorMessageEvent`, `MagenticAgentMessageEvent`, `MagenticFinalResultEvent`, `MagenticAgentDeltaEvent`, `WorkflowOutputEvent`. - -**Text Extraction**: `_extract_text()` defensively extracts text from messages. Priority: `.content` → `.text` → `str(message)`. Handles buggy message objects. - -**State Initialization**: Initialize embedding service with graceful fallback. Use `init_magentic_state()` (deprecated). - -**Requirements**: Must call `check_magentic_requirements()` in `__init__`. Requires `agent-framework-core` and OpenAI API key. - -**Event Types**: Maps agent names to event types: "search" → "search_complete", "judge" → "judge_complete", "hypothes" → "hypothesizing", "report" → "synthesizing". - ---- - -## src/agent_factory/ - Factory Rules - -**Pattern**: Factory functions for creating agents and handlers. Lazy initialization for optional dependencies. Support OpenAI/Anthropic/HF Inference. - -**Judges**: `create_judge_handler()` creates `JudgeHandler` with structured output (`JudgeAssessment`). Supports `MockJudgeHandler`, `HFInferenceJudgeHandler` as fallbacks. - -**Agents**: Factory functions in `agents.py` for all Pydantic AI agents. Pattern: `create_agent_name(model: Any | None = None) -> AgentName`. Use `get_model()` if model not provided. - -**Graph Builder**: `graph_builder.py` contains utilities for building research graphs. Supports iterative and deep research graph construction. - -**Error Handling**: Raise `ConfigurationError` if required API keys missing. Log agent creation. Handle import errors gracefully. - ---- - -## src/prompts/ - Prompt Rules - -**Pattern**: System prompts stored as module-level constants. Include date injection: `datetime.now().strftime("%Y-%m-%d")`. Format evidence with truncation (1500 chars per item). - -**Judge Prompts**: In `judge.py`. Handle empty evidence case separately. Always request structured JSON output. - -**Hypothesis Prompts**: In `hypothesis.py`. Use diverse evidence selection (MMR algorithm). Sentence-aware truncation. - -**Report Prompts**: In `report.py`. Include full citation details. Use diverse evidence selection (n=20). Emphasize citation validation rules. - ---- - -## Testing Rules - -**Structure**: Unit tests in `tests/unit/` (mocked, fast). Integration tests in `tests/integration/` (real APIs, marked `@pytest.mark.integration`). - -**Mocking**: Use `respx` for httpx mocking. Use `pytest-mock` for general mocking. Mock LLM calls in unit tests (use `MockJudgeHandler`). - -**Fixtures**: Common fixtures in `tests/conftest.py`: `mock_httpx_client`, `mock_llm_response`. - -**Coverage**: Aim for >80% coverage. Test error handling, edge cases, and integration paths. - ---- - -## File-Specific Agent Rules - -**knowledge_gap.py**: Outputs `KnowledgeGapOutput`. System prompt evaluates research completeness. Handles conversation history. Returns fallback on error. - -**writer.py**: Returns markdown string. System prompt includes citation format examples. Validates inputs. Truncates long findings. Retry logic for transient failures. - -**long_writer.py**: Uses `ReportDraft` input/output. Writes sections iteratively. Reformats references (deduplicates, renumbers). Reformats section headings. - -**proofreader.py**: Takes `ReportDraft`, returns polished markdown. Removes duplicates. Adds summary. Preserves references. - -**tool_selector.py**: Outputs `AgentSelectionPlan`. System prompt lists available agents (WebSearchAgent, SiteCrawlerAgent, RAGAgent). Guidelines for when to use each. - -**thinking.py**: Returns observation string. Generates observations from conversation history. Uses query and background context. - -**input_parser.py**: Outputs `ParsedQuery`. Detects research mode (iterative/deep). Extracts entities and research questions. Improves/refines query. - - - - - - - - diff --git a/dev/AGENTS.txt b/dev/AGENTS.txt deleted file mode 100644 index 24cb3ed4b8ac8cd3c519cbe24b640faaac1217bd..0000000000000000000000000000000000000000 --- a/dev/AGENTS.txt +++ /dev/null @@ -1,236 +0,0 @@ -# DeepCritical Project - Rules - -## Project-Wide Rules - -**Architecture**: Multi-agent research system using Pydantic AI for agent orchestration, supporting iterative and deep research patterns. Uses middleware for state management, budget tracking, and workflow coordination. - -**Type Safety**: ALWAYS use complete type hints. All functions must have parameter and return type annotations. Use `mypy --strict` compliance. Use `TYPE_CHECKING` imports for circular dependencies: `from typing import TYPE_CHECKING; if TYPE_CHECKING: from src.services.embeddings import EmbeddingService` - -**Async Patterns**: ALL I/O operations must be async (`async def`, `await`). Use `asyncio.gather()` for parallel operations. CPU-bound work must use `run_in_executor()`: `loop = asyncio.get_running_loop(); result = await loop.run_in_executor(None, cpu_bound_function, args)`. Never block the event loop. - -**Error Handling**: Use custom exceptions from `src/utils/exceptions.py`: `DeepCriticalError`, `SearchError`, `RateLimitError`, `JudgeError`, `ConfigurationError`. Always chain exceptions: `raise SearchError(...) from e`. Log with structlog: `logger.error("Operation failed", error=str(e), context=value)`. - -**Logging**: Use `structlog` for ALL logging (NOT `print` or `logging`). Import: `import structlog; logger = structlog.get_logger()`. Log with structured data: `logger.info("event", key=value)`. Use appropriate levels: DEBUG, INFO, WARNING, ERROR. - -**Pydantic Models**: All data exchange uses Pydantic models from `src/utils/models.py`. Models are frozen (`model_config = {"frozen": True}`) for immutability. Use `Field()` with descriptions. Validate with `ge=`, `le=`, `min_length=`, `max_length=` constraints. - -**Code Style**: Ruff with 100-char line length. Ignore rules: `PLR0913` (too many arguments), `PLR0912` (too many branches), `PLR0911` (too many returns), `PLR2004` (magic values), `PLW0603` (global statement), `PLC0415` (lazy imports). - -**Docstrings**: Google-style docstrings for all public functions. Include Args, Returns, Raises sections. Use type hints in docstrings only if needed for clarity. - -**Testing**: Unit tests in `tests/unit/` (mocked, fast). Integration tests in `tests/integration/` (real APIs, marked `@pytest.mark.integration`). Use `respx` for httpx mocking, `pytest-mock` for general mocking. - -**State Management**: Use `ContextVar` in middleware for thread-safe isolation. Never use global mutable state (except singletons via `@lru_cache`). Use `WorkflowState` from `src/middleware/state_machine.py` for workflow state. - -**Citation Validation**: ALWAYS validate references before returning reports. Use `validate_references()` from `src/utils/citation_validator.py`. Remove hallucinated citations. Log warnings for removed citations. - ---- - -## src/agents/ - Agent Implementation Rules - -**Pattern**: All agents use Pydantic AI `Agent` class. Agents have structured output types (Pydantic models) or return strings. Use factory functions in `src/agent_factory/agents.py` for creation. - -**Agent Structure**: -- System prompt as module-level constant (with date injection: `datetime.now().strftime("%Y-%m-%d")`) -- Agent class with `__init__(model: Any | None = None)` -- Main method (e.g., `async def evaluate()`, `async def write_report()`) -- Factory function: `def create_agent_name(model: Any | None = None) -> AgentName` - -**Model Initialization**: Use `get_model()` from `src/agent_factory/judges.py` if no model provided. Support OpenAI/Anthropic/HF Inference via settings. - -**Error Handling**: Return fallback values (e.g., `KnowledgeGapOutput(research_complete=False, outstanding_gaps=[...])`) on failure. Log errors with context. Use retry logic (3 retries) in Pydantic AI Agent initialization. - -**Input Validation**: Validate query/inputs are not empty. Truncate very long inputs with warnings. Handle None values gracefully. - -**Output Types**: Use structured output types from `src/utils/models.py` (e.g., `KnowledgeGapOutput`, `AgentSelectionPlan`, `ReportDraft`). For text output (writer agents), return `str` directly. - -**Agent-Specific Rules**: -- `knowledge_gap.py`: Outputs `KnowledgeGapOutput`. Evaluates research completeness. -- `tool_selector.py`: Outputs `AgentSelectionPlan`. Selects tools (RAG/web/database). -- `writer.py`: Returns markdown string. Includes citations in numbered format. -- `long_writer.py`: Uses `ReportDraft` input/output. Handles section-by-section writing. -- `proofreader.py`: Takes `ReportDraft`, returns polished markdown. -- `thinking.py`: Returns observation string from conversation history. -- `input_parser.py`: Outputs `ParsedQuery` with research mode detection. - ---- - -## src/tools/ - Search Tool Rules - -**Protocol**: All tools implement `SearchTool` protocol from `src/tools/base.py`: `name` property and `async def search(query, max_results) -> list[Evidence]`. - -**Rate Limiting**: Use `@retry` decorator from tenacity: `@retry(stop=stop_after_attempt(3), wait=wait_exponential(...))`. Implement `_rate_limit()` method for APIs with limits. Use shared rate limiters from `src/tools/rate_limiter.py`. - -**Error Handling**: Raise `SearchError` or `RateLimitError` on failures. Handle HTTP errors (429, 500, timeout). Return empty list on non-critical errors (log warning). - -**Query Preprocessing**: Use `preprocess_query()` from `src/tools/query_utils.py` to remove noise and expand synonyms. - -**Evidence Conversion**: Convert API responses to `Evidence` objects with `Citation`. Extract metadata (title, url, date, authors). Set relevance scores (0.0-1.0). Handle missing fields gracefully. - -**Tool-Specific Rules**: -- `pubmed.py`: Use NCBI E-utilities (ESearch → EFetch). Rate limit: 0.34s between requests. Parse XML with `xmltodict`. Handle single vs. multiple articles. -- `clinicaltrials.py`: Use `requests` library (NOT httpx - WAF blocks httpx). Run in thread pool: `await asyncio.to_thread(requests.get, ...)`. Filter: Only interventional studies, active/completed. -- `europepmc.py`: Handle preprint markers: `[PREPRINT - Not peer-reviewed]`. Build URLs from DOI or PMID. -- `rag_tool.py`: Wraps `LlamaIndexRAGService`. Returns Evidence from RAG results. Handles ingestion. -- `search_handler.py`: Orchestrates parallel searches across multiple tools. Uses `asyncio.gather()` with `return_exceptions=True`. Aggregates results into `SearchResult`. - ---- - -## src/middleware/ - Middleware Rules - -**State Management**: Use `ContextVar` for thread-safe isolation. `WorkflowState` uses `ContextVar[WorkflowState | None]`. Initialize with `init_workflow_state(embedding_service)`. Access with `get_workflow_state()` (auto-initializes if missing). - -**WorkflowState**: Tracks `evidence: list[Evidence]`, `conversation: Conversation`, `embedding_service: Any`. Methods: `add_evidence()` (deduplicates by URL), `async search_related()` (semantic search). - -**WorkflowManager**: Manages parallel research loops. Methods: `add_loop()`, `run_loops_parallel()`, `update_loop_status()`, `sync_loop_evidence_to_state()`. Uses `asyncio.gather()` for parallel execution. Handles errors per loop (don't fail all if one fails). - -**BudgetTracker**: Tracks tokens, time, iterations per loop and globally. Methods: `create_budget()`, `add_tokens()`, `start_timer()`, `update_timer()`, `increment_iteration()`, `check_budget()`, `can_continue()`. Token estimation: `estimate_tokens(text)` (~4 chars per token), `estimate_llm_call_tokens(prompt, response)`. - -**Models**: All middleware models in `src/utils/models.py`. `IterationData`, `Conversation`, `ResearchLoop`, `BudgetStatus` are used by middleware. - ---- - -## src/orchestrator/ - Orchestration Rules - -**Research Flows**: Two patterns: `IterativeResearchFlow` (single loop) and `DeepResearchFlow` (plan → parallel loops → synthesis). Both support agent chains (`use_graph=False`) and graph execution (`use_graph=True`). - -**IterativeResearchFlow**: Pattern: Generate observations → Evaluate gaps → Select tools → Execute → Judge → Continue/Complete. Uses `KnowledgeGapAgent`, `ToolSelectorAgent`, `ThinkingAgent`, `WriterAgent`, `JudgeHandler`. Tracks iterations, time, budget. - -**DeepResearchFlow**: Pattern: Planner → Parallel iterative loops per section → Synthesizer. Uses `PlannerAgent`, `IterativeResearchFlow` (per section), `LongWriterAgent` or `ProofreaderAgent`. Uses `WorkflowManager` for parallel execution. - -**Graph Orchestrator**: Uses Pydantic AI Graphs (when available) or agent chains (fallback). Routes based on research mode (iterative/deep/auto). Streams `AgentEvent` objects for UI. - -**State Initialization**: Always call `init_workflow_state()` before running flows. Initialize `BudgetTracker` per loop. Use `WorkflowManager` for parallel coordination. - -**Event Streaming**: Yield `AgentEvent` objects during execution. Event types: "started", "search_complete", "judge_complete", "hypothesizing", "synthesizing", "complete", "error". Include iteration numbers and data payloads. - ---- - -## src/services/ - Service Rules - -**EmbeddingService**: Local sentence-transformers (NO API key required). All operations async-safe via `run_in_executor()`. ChromaDB for vector storage. Deduplication threshold: 0.85 (85% similarity = duplicate). - -**LlamaIndexRAGService**: Uses OpenAI embeddings (requires `OPENAI_API_KEY`). Methods: `ingest_evidence()`, `retrieve()`, `query()`. Returns documents with metadata (source, title, url, date, authors). Lazy initialization with graceful fallback. - -**StatisticalAnalyzer**: Generates Python code via LLM. Executes in Modal sandbox (secure, isolated). Library versions pinned in `SANDBOX_LIBRARIES` dict. Returns `AnalysisResult` with verdict (SUPPORTED/REFUTED/INCONCLUSIVE). - -**Singleton Pattern**: Use `@lru_cache(maxsize=1)` for singletons: `@lru_cache(maxsize=1); def get_service() -> Service: return Service()`. Lazy initialization to avoid requiring dependencies at import time. - ---- - -## src/utils/ - Utility Rules - -**Models**: All Pydantic models in `src/utils/models.py`. Use frozen models (`model_config = {"frozen": True}`) except where mutation needed. Use `Field()` with descriptions. Validate with constraints. - -**Config**: Settings via Pydantic Settings (`src/utils/config.py`). Load from `.env` automatically. Use `settings` singleton: `from src.utils.config import settings`. Validate API keys with properties: `has_openai_key`, `has_anthropic_key`. - -**Exceptions**: Custom exception hierarchy in `src/utils/exceptions.py`. Base: `DeepCriticalError`. Specific: `SearchError`, `RateLimitError`, `JudgeError`, `ConfigurationError`. Always chain exceptions. - -**LLM Factory**: Centralized LLM model creation in `src/utils/llm_factory.py`. Supports OpenAI, Anthropic, HF Inference. Use `get_model()` or factory functions. Check requirements before initialization. - -**Citation Validator**: Use `validate_references()` from `src/utils/citation_validator.py`. Removes hallucinated citations (URLs not in evidence). Logs warnings. Returns validated report string. - ---- - -## src/orchestrator_factory.py Rules - -**Purpose**: Factory for creating orchestrators. Supports "simple" (legacy) and "advanced" (magentic) modes. Auto-detects mode based on API key availability. - -**Pattern**: Lazy import for optional dependencies (`_get_magentic_orchestrator_class()`). Handles `ImportError` gracefully with clear error messages. - -**Mode Detection**: `_determine_mode()` checks explicit mode or auto-detects: "advanced" if `settings.has_openai_key`, else "simple". Maps "magentic" → "advanced". - -**Function Signature**: `create_orchestrator(search_handler, judge_handler, config, mode) -> Any`. Simple mode requires handlers. Advanced mode uses MagenticOrchestrator. - -**Error Handling**: Raise `ValueError` with clear messages if requirements not met. Log mode selection with structlog. - ---- - -## src/orchestrator_hierarchical.py Rules - -**Purpose**: Hierarchical orchestrator using middleware and sub-teams. Adapts Magentic ChatAgent to SubIterationTeam protocol. - -**Pattern**: Uses `SubIterationMiddleware` with `ResearchTeam` and `LLMSubIterationJudge`. Event-driven via callback queue. - -**State Initialization**: Initialize embedding service with graceful fallback. Use `init_magentic_state()` (deprecated, but kept for compatibility). - -**Event Streaming**: Uses `asyncio.Queue` for event coordination. Yields `AgentEvent` objects. Handles event callback pattern with `asyncio.wait()`. - -**Error Handling**: Log errors with context. Yield error events. Process remaining events after task completion. - ---- - -## src/orchestrator_magentic.py Rules - -**Purpose**: Magentic-based orchestrator using ChatAgent pattern. Each agent has internal LLM. Manager orchestrates agents. - -**Pattern**: Uses `MagenticBuilder` with participants (searcher, hypothesizer, judge, reporter). Manager uses `OpenAIChatClient`. Workflow built in `_build_workflow()`. - -**Event Processing**: `_process_event()` converts Magentic events to `AgentEvent`. Handles: `MagenticOrchestratorMessageEvent`, `MagenticAgentMessageEvent`, `MagenticFinalResultEvent`, `MagenticAgentDeltaEvent`, `WorkflowOutputEvent`. - -**Text Extraction**: `_extract_text()` defensively extracts text from messages. Priority: `.content` → `.text` → `str(message)`. Handles buggy message objects. - -**State Initialization**: Initialize embedding service with graceful fallback. Use `init_magentic_state()` (deprecated). - -**Requirements**: Must call `check_magentic_requirements()` in `__init__`. Requires `agent-framework-core` and OpenAI API key. - -**Event Types**: Maps agent names to event types: "search" → "search_complete", "judge" → "judge_complete", "hypothes" → "hypothesizing", "report" → "synthesizing". - ---- - -## src/agent_factory/ - Factory Rules - -**Pattern**: Factory functions for creating agents and handlers. Lazy initialization for optional dependencies. Support OpenAI/Anthropic/HF Inference. - -**Judges**: `create_judge_handler()` creates `JudgeHandler` with structured output (`JudgeAssessment`). Supports `MockJudgeHandler`, `HFInferenceJudgeHandler` as fallbacks. - -**Agents**: Factory functions in `agents.py` for all Pydantic AI agents. Pattern: `create_agent_name(model: Any | None = None) -> AgentName`. Use `get_model()` if model not provided. - -**Graph Builder**: `graph_builder.py` contains utilities for building research graphs. Supports iterative and deep research graph construction. - -**Error Handling**: Raise `ConfigurationError` if required API keys missing. Log agent creation. Handle import errors gracefully. - ---- - -## src/prompts/ - Prompt Rules - -**Pattern**: System prompts stored as module-level constants. Include date injection: `datetime.now().strftime("%Y-%m-%d")`. Format evidence with truncation (1500 chars per item). - -**Judge Prompts**: In `judge.py`. Handle empty evidence case separately. Always request structured JSON output. - -**Hypothesis Prompts**: In `hypothesis.py`. Use diverse evidence selection (MMR algorithm). Sentence-aware truncation. - -**Report Prompts**: In `report.py`. Include full citation details. Use diverse evidence selection (n=20). Emphasize citation validation rules. - ---- - -## Testing Rules - -**Structure**: Unit tests in `tests/unit/` (mocked, fast). Integration tests in `tests/integration/` (real APIs, marked `@pytest.mark.integration`). - -**Mocking**: Use `respx` for httpx mocking. Use `pytest-mock` for general mocking. Mock LLM calls in unit tests (use `MockJudgeHandler`). - -**Fixtures**: Common fixtures in `tests/conftest.py`: `mock_httpx_client`, `mock_llm_response`. - -**Coverage**: Aim for >80% coverage. Test error handling, edge cases, and integration paths. - ---- - -## File-Specific Agent Rules - -**knowledge_gap.py**: Outputs `KnowledgeGapOutput`. System prompt evaluates research completeness. Handles conversation history. Returns fallback on error. - -**writer.py**: Returns markdown string. System prompt includes citation format examples. Validates inputs. Truncates long findings. Retry logic for transient failures. - -**long_writer.py**: Uses `ReportDraft` input/output. Writes sections iteratively. Reformats references (deduplicates, renumbers). Reformats section headings. - -**proofreader.py**: Takes `ReportDraft`, returns polished markdown. Removes duplicates. Adds summary. Preserves references. - -**tool_selector.py**: Outputs `AgentSelectionPlan`. System prompt lists available agents (WebSearchAgent, SiteCrawlerAgent, RAGAgent). Guidelines for when to use each. - -**thinking.py**: Returns observation string. Generates observations from conversation history. Uses query and background context. - -**input_parser.py**: Outputs `ParsedQuery`. Detects research mode (iterative/deep). Extracts entities and research questions. Improves/refines query. - - - diff --git a/dev/docs_plugins.py b/dev/docs_plugins.py deleted file mode 100644 index 9fe1ed9c64756aed732e5ede0706f0c5b93bf44c..0000000000000000000000000000000000000000 --- a/dev/docs_plugins.py +++ /dev/null @@ -1,74 +0,0 @@ -"""Custom MkDocs extension to handle code anchor format: ```start:end:filepath""" - -import re -from pathlib import Path - -from markdown import Markdown -from markdown.extensions import Extension -from markdown.preprocessors import Preprocessor - - -class CodeAnchorPreprocessor(Preprocessor): - """Preprocess code blocks with anchor format: ```start:end:filepath""" - - def __init__(self, md: Markdown, base_path: Path): - super().__init__(md) - self.base_path = base_path - self.pattern = re.compile(r"^```(\d+):(\d+):([^\n]+)\n(.*?)```$", re.MULTILINE | re.DOTALL) - - def run(self, lines: list[str]) -> list[str]: - """Process lines and convert code anchor format to standard code blocks.""" - text = "\n".join(lines) - new_text = self.pattern.sub(self._replace_code_anchor, text) - return new_text.split("\n") - - def _replace_code_anchor(self, match) -> str: - """Replace code anchor format with standard code block + link.""" - start_line = int(match.group(1)) - end_line = int(match.group(2)) - file_path = match.group(3).strip() - existing_code = match.group(4) - - # Determine language from file extension - ext = Path(file_path).suffix.lower() - lang_map = { - ".py": "python", - ".js": "javascript", - ".ts": "typescript", - ".md": "markdown", - ".yaml": "yaml", - ".yml": "yaml", - ".toml": "toml", - ".json": "json", - ".html": "html", - ".css": "css", - ".sh": "bash", - } - language = lang_map.get(ext, "python") - - # Generate GitHub link - repo_url = "https://github.com/DeepCritical/GradioDemo" - github_link = f"{repo_url}/blob/main/{file_path}#L{start_line}-L{end_line}" - - # Return standard code block with source link - return ( - f'[View source: `{file_path}` (lines {start_line}-{end_line})]({github_link}){{: target="_blank" }}\n\n' - f"```{language}\n{existing_code}\n```" - ) - - -class CodeAnchorExtension(Extension): - """Markdown extension for code anchors.""" - - def __init__(self, base_path: str = ".", **kwargs): - super().__init__(**kwargs) - self.base_path = Path(base_path) - - def extendMarkdown(self, md: Markdown): # noqa: N802 - """Register the preprocessor.""" - md.preprocessors.register(CodeAnchorPreprocessor(md, self.base_path), "codeanchor", 25) - - -def makeExtension(**kwargs): # noqa: N802 - """Create the extension.""" - return CodeAnchorExtension(**kwargs) diff --git a/docs/LICENSE.md b/docs/LICENSE.md deleted file mode 100644 index 18466be89051cf1fbcf15385a2eddb2875276a13..0000000000000000000000000000000000000000 --- a/docs/LICENSE.md +++ /dev/null @@ -1,35 +0,0 @@ -# License - -DeepCritical is licensed under the MIT License. - -## MIT License - -Copyright (c) 2024 DeepCritical Team - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - - - - - - - - - - diff --git a/docs/api/agents.md b/docs/api/agents.md deleted file mode 100644 index c89e5b66d7c52f59197d3707c139999c0c12babe..0000000000000000000000000000000000000000 --- a/docs/api/agents.md +++ /dev/null @@ -1,211 +0,0 @@ -# Agents API Reference - -This page documents the API for DeepCritical agents. - -## KnowledgeGapAgent - -**Module**: `src.agents.knowledge_gap` - -**Purpose**: Evaluates research state and identifies knowledge gaps. - -### Methods - -#### `evaluate` - - -[KnowledgeGapAgent.evaluate](../src/agents/knowledge_gap.py) start_line:66 end_line:74 - - -Evaluates research completeness and identifies outstanding knowledge gaps. - -**Parameters**: -- `query`: Research query string -- `background_context`: Background context for the query (default: "") -- `conversation_history`: History of actions, findings, and thoughts as string (default: "") -- `iteration`: Current iteration number (default: 0) -- `time_elapsed_minutes`: Elapsed time in minutes (default: 0.0) -- `max_time_minutes`: Maximum time limit in minutes (default: 10) - -**Returns**: `KnowledgeGapOutput` with: -- `research_complete`: Boolean indicating if research is complete -- `outstanding_gaps`: List of remaining knowledge gaps - -## ToolSelectorAgent - -**Module**: `src.agents.tool_selector` - -**Purpose**: Selects appropriate tools for addressing knowledge gaps. - -### Methods - -#### `select_tools` - - -[ToolSelectorAgent.select_tools](../src/agents/tool_selector.py) start_line:78 end_line:84 - - -Selects tools for addressing a knowledge gap. - -**Parameters**: -- `gap`: The knowledge gap to address -- `query`: Research query string -- `background_context`: Optional background context (default: "") -- `conversation_history`: History of actions, findings, and thoughts as string (default: "") - -**Returns**: `AgentSelectionPlan` with list of `AgentTask` objects. - -## WriterAgent - -**Module**: `src.agents.writer` - -**Purpose**: Generates final reports from research findings. - -### Methods - -#### `write_report` - - -[WriterAgent.write_report](../src/agents/writer.py) start_line:67 end_line:73 - - -Generates a markdown report from research findings. - -**Parameters**: -- `query`: Research query string -- `findings`: Research findings to include in report -- `output_length`: Optional description of desired output length (default: "") -- `output_instructions`: Optional additional instructions for report generation (default: "") - -**Returns**: Markdown string with numbered citations. - -## LongWriterAgent - -**Module**: `src.agents.long_writer` - -**Purpose**: Long-form report generation with section-by-section writing. - -### Methods - -#### `write_next_section` - - -[LongWriterAgent.write_next_section](../src/agents/long_writer.py) start_line:94 end_line:100 - - -Writes the next section of a long-form report. - -**Parameters**: -- `original_query`: The original research query -- `report_draft`: Current report draft as string (all sections written so far) -- `next_section_title`: Title of the section to write -- `next_section_draft`: Draft content for the next section - -**Returns**: `LongWriterOutput` with formatted section and references. - -#### `write_report` - - -[LongWriterAgent.write_report](../src/agents/long_writer.py) start_line:263 end_line:268 - - -Generates final report from draft. - -**Parameters**: -- `query`: Research query string -- `report_title`: Title of the report -- `report_draft`: Complete report draft - -**Returns**: Final markdown report string. - -## ProofreaderAgent - -**Module**: `src.agents.proofreader` - -**Purpose**: Proofreads and polishes report drafts. - -### Methods - -#### `proofread` - - -[ProofreaderAgent.proofread](../src/agents/proofreader.py) start_line:72 end_line:76 - - -Proofreads and polishes a report draft. - -**Parameters**: -- `query`: Research query string -- `report_title`: Title of the report -- `report_draft`: Report draft to proofread - -**Returns**: Polished markdown string. - -## ThinkingAgent - -**Module**: `src.agents.thinking` - -**Purpose**: Generates observations from conversation history. - -### Methods - -#### `generate_observations` - - -[ThinkingAgent.generate_observations](../src/agents/thinking.py) start_line:70 end_line:76 - - -Generates observations from conversation history. - -**Parameters**: -- `query`: Research query string -- `background_context`: Optional background context (default: "") -- `conversation_history`: History of actions, findings, and thoughts as string (default: "") -- `iteration`: Current iteration number (default: 1) - -**Returns**: Observation string. - -## InputParserAgent - -**Module**: `src.agents.input_parser` - -**Purpose**: Parses and improves user queries, detects research mode. - -### Methods - -#### `parse` - - -[InputParserAgent.parse](../src/agents/input_parser.py) start_line:82 end_line:82 - - -Parses and improves a user query. - -**Parameters**: -- `query`: Original query string - -**Returns**: `ParsedQuery` with: -- `original_query`: Original query string -- `improved_query`: Refined query string -- `research_mode`: "iterative" or "deep" -- `key_entities`: List of key entities -- `research_questions`: List of research questions - -## Factory Functions - -All agents have factory functions in `src.agent_factory.agents`: - - -[Factory Functions](../src/agent_factory/agents.py) start_line:30 end_line:50 - - -**Parameters**: -- `model`: Optional Pydantic AI model. If None, uses `get_model()` from settings. -- `oauth_token`: Optional OAuth token from HuggingFace login (takes priority over env vars) - -**Returns**: Agent instance. - -## See Also - -- [Architecture - Agents](../architecture/agents.md) - Architecture overview -- [Models API](models.md) - Data models used by agents - diff --git a/docs/api/models.md b/docs/api/models.md deleted file mode 100644 index d9513147a503f9dfaf8c9aab2bc312ed4d672a3d..0000000000000000000000000000000000000000 --- a/docs/api/models.md +++ /dev/null @@ -1,191 +0,0 @@ -# Models API Reference - -This page documents the Pydantic models used throughout DeepCritical. - -## Evidence - -**Module**: `src.utils.models` - -**Purpose**: Represents evidence from search results. - - -[Evidence Model](../src/utils/models.py) start_line:33 end_line:44 - - -**Fields**: -- `citation`: Citation information (title, URL, date, authors) -- `content`: Evidence text content -- `relevance`: Relevance score (0.0-1.0) -- `metadata`: Additional metadata dictionary - -## Citation - -**Module**: `src.utils.models` - -**Purpose**: Citation information for evidence. - - -[Citation Model](../src/utils/models.py) start_line:12 end_line:30 - - -**Fields**: -- `source`: Source name (e.g., "pubmed", "clinicaltrials", "europepmc", "web", "rag") -- `title`: Article/trial title -- `url`: Source URL -- `date`: Publication date (YYYY-MM-DD or "Unknown") -- `authors`: List of authors (optional) - -## KnowledgeGapOutput - -**Module**: `src.utils.models` - -**Purpose**: Output from knowledge gap evaluation. - - -[KnowledgeGapOutput Model](../src/utils/models.py) start_line:494 end_line:504 - - -**Fields**: -- `research_complete`: Boolean indicating if research is complete -- `outstanding_gaps`: List of remaining knowledge gaps - -## AgentSelectionPlan - -**Module**: `src.utils.models` - -**Purpose**: Plan for tool/agent selection. - - -[AgentSelectionPlan Model](../src/utils/models.py) start_line:521 end_line:526 - - -**Fields**: -- `tasks`: List of agent tasks to execute - -## AgentTask - -**Module**: `src.utils.models` - -**Purpose**: Individual agent task. - - -[AgentTask Model](../src/utils/models.py) start_line:507 end_line:518 - - -**Fields**: -- `gap`: The knowledge gap being addressed (optional) -- `agent`: Name of agent to use -- `query`: The specific query for the agent -- `entity_website`: The website of the entity being researched, if known (optional) - -## ReportDraft - -**Module**: `src.utils.models` - -**Purpose**: Draft structure for long-form reports. - - -[ReportDraft Model](../src/utils/models.py) start_line:538 end_line:545 - - -**Fields**: -- `sections`: List of report sections - -## ReportSection - -**Module**: `src.utils.models` - -**Purpose**: Individual section in a report draft. - - -[ReportDraftSection Model](../src/utils/models.py) start_line:529 end_line:535 - - -**Fields**: -- `section_title`: The title of the section -- `section_content`: The content of the section - -## ParsedQuery - -**Module**: `src.utils.models` - -**Purpose**: Parsed and improved query. - - -[ParsedQuery Model](../src/utils/models.py) start_line:557 end_line:572 - - -**Fields**: -- `original_query`: Original query string -- `improved_query`: Refined query string -- `research_mode`: Research mode ("iterative" or "deep") -- `key_entities`: List of key entities -- `research_questions`: List of research questions - -## Conversation - -**Module**: `src.utils.models` - -**Purpose**: Conversation history with iterations. - - -[Conversation Model](../src/utils/models.py) start_line:331 end_line:337 - - -**Fields**: -- `history`: List of iteration data - -## IterationData - -**Module**: `src.utils.models` - -**Purpose**: Data for a single iteration. - - -[IterationData Model](../src/utils/models.py) start_line:315 end_line:328 - - -**Fields**: -- `gap`: The gap addressed in the iteration -- `tool_calls`: The tool calls made -- `findings`: The findings collected from tool calls -- `thought`: The thinking done to reflect on the success of the iteration and next steps - -## AgentEvent - -**Module**: `src.utils.models` - -**Purpose**: Event emitted during research execution. - - -[AgentEvent Model](../src/utils/models.py) start_line:104 end_line:125 - - -**Fields**: -- `type`: Event type (e.g., "started", "search_complete", "complete") -- `iteration`: Iteration number (optional) -- `data`: Event data dictionary - -## BudgetStatus - -**Module**: `src.utils.models` - -**Purpose**: Current budget status. - - -[BudgetStatus Model](../src/middleware/budget_tracker.py) start_line:15 end_line:25 - - -**Fields**: -- `tokens_used`: Total tokens used -- `tokens_limit`: Token budget limit -- `time_elapsed_seconds`: Time elapsed in seconds -- `time_limit_seconds`: Time budget limit (default: 600.0 seconds / 10 minutes) -- `iterations`: Number of iterations completed -- `iterations_limit`: Maximum iterations (default: 10) -- `iteration_tokens`: Tokens used per iteration (iteration number -> token count) - -## See Also - -- [Architecture - Agents](../architecture/agents.md) - How models are used -- [Configuration](../configuration/index.md) - Model configuration diff --git a/docs/api/orchestrators.md b/docs/api/orchestrators.md deleted file mode 100644 index ec1874536e9809064e3a59a220a6aaa5e6d4eb37..0000000000000000000000000000000000000000 --- a/docs/api/orchestrators.md +++ /dev/null @@ -1,149 +0,0 @@ -# Orchestrators API Reference - -This page documents the API for DeepCritical orchestrators. - -## IterativeResearchFlow - -**Module**: `src.orchestrator.research_flow` - -**Purpose**: Single-loop research with search-judge-synthesize cycles. - -### Methods - -#### `run` - - -[IterativeResearchFlow.run](../src/orchestrator/research_flow.py) start_line:134 end_line:140 - - -Runs iterative research flow. - -**Parameters**: -- `query`: Research query string -- `background_context`: Background context (default: "") -- `output_length`: Optional description of desired output length (default: "") -- `output_instructions`: Optional additional instructions for report generation (default: "") -- `message_history`: Optional user conversation history in Pydantic AI `ModelMessage` format (default: None) - -**Returns**: Final report string. - -**Note**: The `message_history` parameter enables multi-turn conversations by providing context from previous interactions. - -**Note**: `max_iterations`, `max_time_minutes`, and `token_budget` are constructor parameters, not `run()` parameters. - -## DeepResearchFlow - -**Module**: `src.orchestrator.research_flow` - -**Purpose**: Multi-section parallel research with planning and synthesis. - -### Methods - -#### `run` - - -[DeepResearchFlow.run](../src/orchestrator/research_flow.py) start_line:778 end_line:778 - - -Runs deep research flow. - -**Parameters**: -- `query`: Research query string -- `message_history`: Optional user conversation history in Pydantic AI `ModelMessage` format (default: None) - -**Returns**: Final report string. - -**Note**: The `message_history` parameter enables multi-turn conversations by providing context from previous interactions. - -**Note**: `max_iterations_per_section`, `max_time_minutes`, and `token_budget` are constructor parameters, not `run()` parameters. - -## GraphOrchestrator - -**Module**: `src.orchestrator.graph_orchestrator` - -**Purpose**: Graph-based execution using Pydantic AI agents as nodes. - -### Methods - -#### `run` - - -[GraphOrchestrator.run](../src/orchestrator/graph_orchestrator.py) start_line:177 end_line:177 - - -Runs graph-based research orchestration. - -**Parameters**: -- `query`: Research query string -- `message_history`: Optional user conversation history in Pydantic AI `ModelMessage` format (default: None) - -**Yields**: `AgentEvent` objects during graph execution. - -**Note**: -- `research_mode` and `use_graph` are constructor parameters, not `run()` parameters. -- The `message_history` parameter enables multi-turn conversations by providing context from previous interactions. Message history is stored in `GraphExecutionContext` and passed to agents during execution. - -## Orchestrator Factory - -**Module**: `src.orchestrator_factory` - -**Purpose**: Factory for creating orchestrators. - -### Functions - -#### `create_orchestrator` - - -[create_orchestrator](../src/orchestrator_factory.py) start_line:44 end_line:50 - - -Creates an orchestrator instance. - -**Parameters**: -- `search_handler`: Search handler protocol implementation (optional, required for simple mode) -- `judge_handler`: Judge handler protocol implementation (optional, required for simple mode) -- `config`: Configuration object (optional) -- `mode`: Orchestrator mode ("simple", "advanced", "magentic", "iterative", "deep", "auto", or None for auto-detect) -- `oauth_token`: Optional OAuth token from HuggingFace login (takes priority over env vars) - -**Returns**: Orchestrator instance. - -**Raises**: -- `ValueError`: If requirements not met - -**Modes**: -- `"simple"`: Legacy orchestrator -- `"advanced"` or `"magentic"`: Magentic orchestrator (requires OpenAI API key) -- `None`: Auto-detect based on API key availability - -## MagenticOrchestrator - -**Module**: `src.orchestrator_magentic` - -**Purpose**: Multi-agent coordination using Microsoft Agent Framework. - -### Methods - -#### `run` - - -[MagenticOrchestrator.run](../src/orchestrator_magentic.py) start_line:101 end_line:101 - - -Runs Magentic orchestration. - -**Parameters**: -- `query`: Research query string - -**Yields**: `AgentEvent` objects converted from Magentic events. - -**Note**: `max_rounds` and `max_stalls` are constructor parameters, not `run()` parameters. - -**Requirements**: -- `agent-framework-core` package -- OpenAI API key - -## See Also - -- [Architecture - Orchestrators](../architecture/orchestrators.md) - Architecture overview -- [Graph Orchestration](../architecture/graph_orchestration.md) - Graph execution details diff --git a/docs/api/services.md b/docs/api/services.md deleted file mode 100644 index 52647e795fa543d8c3a782a07c8d4dbe28b5fbf8..0000000000000000000000000000000000000000 --- a/docs/api/services.md +++ /dev/null @@ -1,279 +0,0 @@ -# Services API Reference - -This page documents the API for DeepCritical services. - -## EmbeddingService - -**Module**: `src.services.embeddings` - -**Purpose**: Local sentence-transformers for semantic search and deduplication. - -### Methods - -#### `embed` - - -[EmbeddingService.embed](../src/services/embeddings.py) start_line:55 end_line:55 - - -Generates embedding for a text string. - -**Parameters**: -- `text`: Text to embed - -**Returns**: Embedding vector as list of floats. - -#### `embed_batch` - -```python -async def embed_batch(self, texts: list[str]) -> list[list[float]] -``` - -Generates embeddings for multiple texts. - -**Parameters**: -- `texts`: List of texts to embed - -**Returns**: List of embedding vectors. - -#### `similarity` - -```python -async def similarity(self, text1: str, text2: str) -> float -``` - -Calculates similarity between two texts. - -**Parameters**: -- `text1`: First text -- `text2`: Second text - -**Returns**: Similarity score (0.0-1.0). - -#### `find_duplicates` - -```python -async def find_duplicates( - self, - texts: list[str], - threshold: float = 0.85 -) -> list[tuple[int, int]] -``` - -Finds duplicate texts based on similarity threshold. - -**Parameters**: -- `texts`: List of texts to check -- `threshold`: Similarity threshold (default: 0.85) - -**Returns**: List of (index1, index2) tuples for duplicate pairs. - -#### `add_evidence` - -```python -async def add_evidence( - self, - evidence_id: str, - content: str, - metadata: dict[str, Any] -) -> None -``` - -Adds evidence to vector store for semantic search. - -**Parameters**: -- `evidence_id`: Unique identifier for the evidence -- `content`: Evidence text content -- `metadata`: Additional metadata dictionary - -#### `search_similar` - -```python -async def search_similar( - self, - query: str, - n_results: int = 5 -) -> list[dict[str, Any]] -``` - -Finds semantically similar evidence. - -**Parameters**: -- `query`: Search query string -- `n_results`: Number of results to return (default: 5) - -**Returns**: List of dictionaries with `id`, `content`, `metadata`, and `distance` keys. - -#### `deduplicate` - -```python -async def deduplicate( - self, - new_evidence: list[Evidence], - threshold: float = 0.9 -) -> list[Evidence] -``` - -Removes semantically duplicate evidence. - -**Parameters**: -- `new_evidence`: List of evidence items to deduplicate -- `threshold`: Similarity threshold (default: 0.9, where 0.9 = 90% similar is duplicate) - -**Returns**: List of unique evidence items (not already in vector store). - -### Factory Function - -#### `get_embedding_service` - -```python -@lru_cache(maxsize=1) -def get_embedding_service() -> EmbeddingService -``` - -Returns singleton EmbeddingService instance. - -## LlamaIndexRAGService - -**Module**: `src.services.rag` - -**Purpose**: Retrieval-Augmented Generation using LlamaIndex. - -### Methods - -#### `ingest_evidence` - - -[LlamaIndexRAGService.ingest_evidence](../src/services/llamaindex_rag.py) start_line:290 end_line:290 - - -Ingests evidence into RAG service. - -**Parameters**: -- `evidence_list`: List of Evidence objects to ingest - -**Note**: Supports multiple embedding providers (OpenAI, local sentence-transformers, Hugging Face). - -#### `retrieve` - -```python -def retrieve( - self, - query: str, - top_k: int | None = None -) -> list[dict[str, Any]] -``` - -Retrieves relevant documents for a query. - -**Parameters**: -- `query`: Search query string -- `top_k`: Number of top results to return (defaults to `similarity_top_k` from constructor) - -**Returns**: List of dictionaries with `text`, `score`, and `metadata` keys. - -#### `query` - -```python -def query( - self, - query_str: str, - top_k: int | None = None -) -> str -``` - -Queries RAG service and returns synthesized response. - -**Parameters**: -- `query_str`: Query string -- `top_k`: Number of results to use (defaults to `similarity_top_k` from constructor) - -**Returns**: Synthesized response string. - -**Raises**: -- `ConfigurationError`: If no LLM API key is available for query synthesis - -#### `ingest_documents` - -```python -def ingest_documents(self, documents: list[Any]) -> None -``` - -Ingests raw LlamaIndex Documents. - -**Parameters**: -- `documents`: List of LlamaIndex Document objects - -#### `clear_collection` - -```python -def clear_collection(self) -> None -``` - -Clears all documents from the collection. - -### Factory Function - -#### `get_rag_service` - -```python -def get_rag_service( - collection_name: str = "deepcritical_evidence", - oauth_token: str | None = None, - **kwargs: Any -) -> LlamaIndexRAGService -``` - -Get or create a RAG service instance. - -**Parameters**: -- `collection_name`: Name of the ChromaDB collection (default: "deepcritical_evidence") -- `oauth_token`: Optional OAuth token from HuggingFace login (takes priority over env vars) -- `**kwargs`: Additional arguments for LlamaIndexRAGService (e.g., `use_openai_embeddings=False`) - -**Returns**: Configured LlamaIndexRAGService instance. - -**Note**: By default, uses local embeddings (sentence-transformers) which require no API keys. - -## StatisticalAnalyzer - -**Module**: `src.services.statistical_analyzer` - -**Purpose**: Secure execution of AI-generated statistical code. - -### Methods - -#### `analyze` - -```python -async def analyze( - self, - query: str, - evidence: list[Evidence], - hypothesis: dict[str, Any] | None = None -) -> AnalysisResult -``` - -Analyzes a research question using statistical methods. - -**Parameters**: -- `query`: The research question -- `evidence`: List of Evidence objects to analyze -- `hypothesis`: Optional hypothesis dict with `drug`, `target`, `pathway`, `effect`, `confidence` keys - -**Returns**: `AnalysisResult` with: -- `verdict`: SUPPORTED, REFUTED, or INCONCLUSIVE -- `confidence`: Confidence in verdict (0.0-1.0) -- `statistical_evidence`: Summary of statistical findings -- `code_generated`: Python code that was executed -- `execution_output`: Output from code execution -- `key_takeaways`: Key takeaways from analysis -- `limitations`: List of limitations - -**Note**: Requires Modal credentials for sandbox execution. - -## See Also - -- [Architecture - Services](../architecture/services.md) - Architecture overview -- [Configuration](../configuration/index.md) - Service configuration - diff --git a/docs/api/tools.md b/docs/api/tools.md deleted file mode 100644 index c3eb95bf88d0d70cf8e2265931a59c774283b021..0000000000000000000000000000000000000000 --- a/docs/api/tools.md +++ /dev/null @@ -1,259 +0,0 @@ -# Tools API Reference - -This page documents the API for DeepCritical search tools. - -## SearchTool Protocol - -All tools implement the `SearchTool` protocol: - -```python -class SearchTool(Protocol): - @property - def name(self) -> str: ... - - async def search( - self, - query: str, - max_results: int = 10 - ) -> list[Evidence]: ... -``` - -## PubMedTool - -**Module**: `src.tools.pubmed` - -**Purpose**: Search peer-reviewed biomedical literature from PubMed. - -### Properties - -#### `name` - -```python -@property -def name(self) -> str -``` - -Returns tool name: `"pubmed"` - -### Methods - -#### `search` - -```python -async def search( - self, - query: str, - max_results: int = 10 -) -> list[Evidence] -``` - -Searches PubMed for articles. - -**Parameters**: -- `query`: Search query string -- `max_results`: Maximum number of results to return (default: 10) - -**Returns**: List of `Evidence` objects with PubMed articles. - -**Raises**: -- `SearchError`: If search fails (timeout, HTTP error, XML parsing error) -- `RateLimitError`: If rate limit is exceeded (429 status code) - -**Note**: Uses NCBI E-utilities (ESearch → EFetch). Rate limit: 0.34s between requests. Handles single vs. multiple articles. - -## ClinicalTrialsTool - -**Module**: `src.tools.clinicaltrials` - -**Purpose**: Search ClinicalTrials.gov for interventional studies. - -### Properties - -#### `name` - -```python -@property -def name(self) -> str -``` - -Returns tool name: `"clinicaltrials"` - -### Methods - -#### `search` - -```python -async def search( - self, - query: str, - max_results: int = 10 -) -> list[Evidence] -``` - -Searches ClinicalTrials.gov for trials. - -**Parameters**: -- `query`: Search query string -- `max_results`: Maximum number of results to return (default: 10) - -**Returns**: List of `Evidence` objects with clinical trials. - -**Note**: Only returns interventional studies with status: COMPLETED, ACTIVE_NOT_RECRUITING, RECRUITING, ENROLLING_BY_INVITATION. Uses `requests` library (NOT httpx - WAF blocks httpx). Runs in thread pool for async compatibility. - -**Raises**: -- `SearchError`: If search fails (HTTP error, request exception) - -## EuropePMCTool - -**Module**: `src.tools.europepmc` - -**Purpose**: Search Europe PMC for preprints and peer-reviewed articles. - -### Properties - -#### `name` - -```python -@property -def name(self) -> str -``` - -Returns tool name: `"europepmc"` - -### Methods - -#### `search` - -```python -async def search( - self, - query: str, - max_results: int = 10 -) -> list[Evidence] -``` - -Searches Europe PMC for articles and preprints. - -**Parameters**: -- `query`: Search query string -- `max_results`: Maximum number of results to return (default: 10) - -**Returns**: List of `Evidence` objects with articles/preprints. - -**Note**: Includes both preprints (marked with `[PREPRINT - Not peer-reviewed]`) and peer-reviewed articles. Handles preprint markers. Builds URLs from DOI or PMID. - -**Raises**: -- `SearchError`: If search fails (HTTP error, connection error) - -## RAGTool - -**Module**: `src.tools.rag_tool` - -**Purpose**: Semantic search within collected evidence. - -### Initialization - -```python -def __init__( - self, - rag_service: LlamaIndexRAGService | None = None, - oauth_token: str | None = None -) -> None -``` - -**Parameters**: -- `rag_service`: Optional RAG service instance. If None, will be lazy-initialized. -- `oauth_token`: Optional OAuth token from HuggingFace login (for RAG LLM) - -### Properties - -#### `name` - -```python -@property -def name(self) -> str -``` - -Returns tool name: `"rag"` - -### Methods - -#### `search` - -```python -async def search( - self, - query: str, - max_results: int = 10 -) -> list[Evidence] -``` - -Searches collected evidence using semantic similarity. - -**Parameters**: -- `query`: Search query string -- `max_results`: Maximum number of results to return (default: 10) - -**Returns**: List of `Evidence` objects from collected evidence. - -**Raises**: -- `ConfigurationError`: If RAG service is unavailable - -**Note**: Requires evidence to be ingested into RAG service first. Wraps `LlamaIndexRAGService`. Returns Evidence from RAG results. - -## SearchHandler - -**Module**: `src.tools.search_handler` - -**Purpose**: Orchestrates parallel searches across multiple tools. - -### Initialization - -```python -def __init__( - self, - tools: list[SearchTool], - timeout: float = 30.0, - include_rag: bool = False, - auto_ingest_to_rag: bool = True, - oauth_token: str | None = None -) -> None -``` - -**Parameters**: -- `tools`: List of search tools to use -- `timeout`: Timeout for each search in seconds (default: 30.0) -- `include_rag`: Whether to include RAG tool in searches (default: False) -- `auto_ingest_to_rag`: Whether to automatically ingest results into RAG (default: True) -- `oauth_token`: Optional OAuth token from HuggingFace login (for RAG LLM) - -### Methods - -#### `execute` - - -[SearchHandler.execute](../src/tools/search_handler.py) start_line:86 end_line:86 - - -Searches multiple tools in parallel. - -**Parameters**: -- `query`: Search query string -- `max_results_per_tool`: Maximum results per tool (default: 10) - -**Returns**: `SearchResult` with: -- `query`: The search query -- `evidence`: Aggregated list of evidence -- `sources_searched`: List of source names searched -- `total_found`: Total number of results -- `errors`: List of error messages from failed tools - -**Raises**: -- `SearchError`: If search times out - -**Note**: Uses `asyncio.gather()` for parallel execution. Handles tool failures gracefully (returns errors in `SearchResult.errors`). Automatically ingests evidence into RAG if enabled. - -## See Also - -- [Architecture - Tools](../architecture/tools.md) - Architecture overview -- [Models API](models.md) - Data models used by tools diff --git a/docs/architecture/agents.md b/docs/architecture/agents.md deleted file mode 100644 index 3bf400ba7bf42cfb3cd836f8ec9af3949731c3a6..0000000000000000000000000000000000000000 --- a/docs/architecture/agents.md +++ /dev/null @@ -1,293 +0,0 @@ -# Agents Architecture - -DeepCritical uses Pydantic AI agents for all AI-powered operations. All agents follow a consistent pattern and use structured output types. - -## Agent Pattern - -### Pydantic AI Agents - -Pydantic AI agents use the `Agent` class with the following structure: - -- **System Prompt**: Module-level constant with date injection -- **Agent Class**: `__init__(model: Any | None = None)` -- **Main Method**: Async method (e.g., `async def evaluate()`, `async def write_report()`) -- **Factory Function**: `def create_agent_name(model: Any | None = None, oauth_token: str | None = None) -> AgentName` - -**Note**: Factory functions accept an optional `oauth_token` parameter for HuggingFace authentication, which takes priority over environment variables. - -## Model Initialization - -Agents use `get_model()` from `src/agent_factory/judges.py` if no model is provided. This supports: - -- OpenAI models -- Anthropic models -- HuggingFace Inference API models - -The model selection is based on the configured `LLM_PROVIDER` in settings. - -## Error Handling - -Agents return fallback values on failure rather than raising exceptions: - -- `KnowledgeGapOutput(research_complete=False, outstanding_gaps=[...])` -- Empty strings for text outputs -- Default structured outputs - -All errors are logged with context using structlog. - -## Input Validation - -All agents validate inputs: - -- Check that queries/inputs are not empty -- Truncate very long inputs with warnings -- Handle None values gracefully - -## Output Types - -Agents use structured output types from `src/utils/models.py`: - -- `KnowledgeGapOutput`: Research completeness evaluation -- `AgentSelectionPlan`: Tool selection plan -- `ReportDraft`: Long-form report structure -- `ParsedQuery`: Query parsing and mode detection - -For text output (writer agents), agents return `str` directly. - -## Agent Types - -### Knowledge Gap Agent - -**File**: `src/agents/knowledge_gap.py` - -**Purpose**: Evaluates research state and identifies knowledge gaps. - -**Output**: `KnowledgeGapOutput` with: -- `research_complete`: Boolean indicating if research is complete -- `outstanding_gaps`: List of remaining knowledge gaps - -**Methods**: -- `async def evaluate(query, background_context, conversation_history, iteration, time_elapsed_minutes, max_time_minutes) -> KnowledgeGapOutput` - -### Tool Selector Agent - -**File**: `src/agents/tool_selector.py` - -**Purpose**: Selects appropriate tools for addressing knowledge gaps. - -**Output**: `AgentSelectionPlan` with list of `AgentTask` objects. - -**Available Agents**: -- `WebSearchAgent`: General web search for fresh information -- `SiteCrawlerAgent`: Research specific entities/companies -- `RAGAgent`: Semantic search within collected evidence - -### Writer Agent - -**File**: `src/agents/writer.py` - -**Purpose**: Generates final reports from research findings. - -**Output**: Markdown string with numbered citations. - -**Methods**: -- `async def write_report(query, findings, output_length, output_instructions) -> str` - -**Features**: -- Validates inputs -- Truncates very long findings (max 50000 chars) with warning -- Retry logic for transient failures (3 retries) -- Citation validation before returning - -### Long Writer Agent - -**File**: `src/agents/long_writer.py` - -**Purpose**: Long-form report generation with section-by-section writing. - -**Input/Output**: Uses `ReportDraft` models. - -**Methods**: -- `async def write_next_section(query, draft, section_title, section_content) -> LongWriterOutput` -- `async def write_report(query, report_title, report_draft) -> str` - -**Features**: -- Writes sections iteratively -- Aggregates references across sections -- Reformats section headings and references -- Deduplicates and renumbers references - -### Proofreader Agent - -**File**: `src/agents/proofreader.py` - -**Purpose**: Proofreads and polishes report drafts. - -**Input**: `ReportDraft` -**Output**: Polished markdown string - -**Methods**: -- `async def proofread(query, report_title, report_draft) -> str` - -**Features**: -- Removes duplicate content across sections -- Adds executive summary if multiple sections -- Preserves all references and citations -- Improves flow and readability - -### Thinking Agent - -**File**: `src/agents/thinking.py` - -**Purpose**: Generates observations from conversation history. - -**Output**: Observation string - -**Methods**: -- `async def generate_observations(query, background_context, conversation_history) -> str` - -### Input Parser Agent - -**File**: `src/agents/input_parser.py` - -**Purpose**: Parses and improves user queries, detects research mode. - -**Output**: `ParsedQuery` with: -- `original_query`: Original query string -- `improved_query`: Refined query string -- `research_mode`: "iterative" or "deep" -- `key_entities`: List of key entities -- `research_questions`: List of research questions - -## Magentic Agents - -The following agents use the `BaseAgent` pattern from `agent-framework` and are used exclusively with `MagenticOrchestrator`: - -### Hypothesis Agent - -**File**: `src/agents/hypothesis_agent.py` - -**Purpose**: Generates mechanistic hypotheses based on evidence. - -**Pattern**: `BaseAgent` from `agent-framework` - -**Methods**: -- `async def run(messages, thread, **kwargs) -> AgentRunResponse` - -**Features**: -- Uses internal Pydantic AI `Agent` with `HypothesisAssessment` output type -- Accesses shared `evidence_store` for evidence -- Uses embedding service for diverse evidence selection (MMR algorithm) -- Stores hypotheses in shared context - -### Search Agent - -**File**: `src/agents/search_agent.py` - -**Purpose**: Wraps `SearchHandler` as an agent for Magentic orchestrator. - -**Pattern**: `BaseAgent` from `agent-framework` - -**Methods**: -- `async def run(messages, thread, **kwargs) -> AgentRunResponse` - -**Features**: -- Executes searches via `SearchHandlerProtocol` -- Deduplicates evidence using embedding service -- Searches for semantically related evidence -- Updates shared evidence store - -### Analysis Agent - -**File**: `src/agents/analysis_agent.py` - -**Purpose**: Performs statistical analysis using Modal sandbox. - -**Pattern**: `BaseAgent` from `agent-framework` - -**Methods**: -- `async def run(messages, thread, **kwargs) -> AgentRunResponse` - -**Features**: -- Wraps `StatisticalAnalyzer` service -- Analyzes evidence and hypotheses -- Returns verdict (SUPPORTED/REFUTED/INCONCLUSIVE) -- Stores analysis results in shared context - -### Report Agent (Magentic) - -**File**: `src/agents/report_agent.py` - -**Purpose**: Generates structured scientific reports from evidence and hypotheses. - -**Pattern**: `BaseAgent` from `agent-framework` - -**Methods**: -- `async def run(messages, thread, **kwargs) -> AgentRunResponse` - -**Features**: -- Uses internal Pydantic AI `Agent` with `ResearchReport` output type -- Accesses shared evidence store and hypotheses -- Validates citations before returning -- Formats report as markdown - -### Judge Agent - -**File**: `src/agents/judge_agent.py` - -**Purpose**: Evaluates evidence quality and determines if sufficient for synthesis. - -**Pattern**: `BaseAgent` from `agent-framework` - -**Methods**: -- `async def run(messages, thread, **kwargs) -> AgentRunResponse` -- `async def run_stream(messages, thread, **kwargs) -> AsyncIterable[AgentRunResponseUpdate]` - -**Features**: -- Wraps `JudgeHandlerProtocol` -- Accesses shared evidence store -- Returns `JudgeAssessment` with sufficient flag, confidence, and recommendation - -## Agent Patterns - -DeepCritical uses two distinct agent patterns: - -### 1. Pydantic AI Agents (Traditional Pattern) - -These agents use the Pydantic AI `Agent` class directly and are used in iterative and deep research flows: - -- **Pattern**: `Agent(model, output_type, system_prompt)` -- **Initialization**: `__init__(model: Any | None = None)` -- **Methods**: Agent-specific async methods (e.g., `async def evaluate()`, `async def write_report()`) -- **Examples**: `KnowledgeGapAgent`, `ToolSelectorAgent`, `WriterAgent`, `LongWriterAgent`, `ProofreaderAgent`, `ThinkingAgent`, `InputParserAgent` - -### 2. Magentic Agents (Agent-Framework Pattern) - -These agents use the `BaseAgent` class from `agent-framework` and are used in Magentic orchestrator: - -- **Pattern**: `BaseAgent` from `agent-framework` with `async def run()` method -- **Initialization**: `__init__(evidence_store, embedding_service, ...)` -- **Methods**: `async def run(messages, thread, **kwargs) -> AgentRunResponse` -- **Examples**: `HypothesisAgent`, `SearchAgent`, `AnalysisAgent`, `ReportAgent`, `JudgeAgent` - -**Note**: Magentic agents are used exclusively with the `MagenticOrchestrator` and follow the agent-framework protocol for multi-agent coordination. - -## Factory Functions - -All agents have factory functions in `src/agent_factory/agents.py`: - - -[Factory Functions](../src/agent_factory/agents.py) start_line:79 end_line:100 - - -Factory functions: -- Use `get_model()` if no model provided -- Accept `oauth_token` parameter for HuggingFace authentication -- Raise `ConfigurationError` if creation fails -- Log agent creation - -## See Also - -- [Orchestrators](orchestrators.md) - How agents are orchestrated -- [API Reference - Agents](../api/agents.md) - API documentation -- [Contributing - Code Style](../contributing/code-style.md) - Development guidelines diff --git a/docs/architecture/graph_orchestration.md b/docs/architecture/graph_orchestration.md deleted file mode 100644 index cdbec2d163918dceb7dc1bf0f9659402a4aa7377..0000000000000000000000000000000000000000 --- a/docs/architecture/graph_orchestration.md +++ /dev/null @@ -1,302 +0,0 @@ -# Graph Orchestration Architecture - -## Overview - -DeepCritical implements a graph-based orchestration system for research workflows using Pydantic AI agents as nodes. This enables better parallel execution, conditional routing, and state management compared to simple agent chains. - -## Conversation History - -DeepCritical supports multi-turn conversations through Pydantic AI's native message history format. The system maintains two types of history: - -1. **User Conversation History**: Multi-turn user interactions (from Gradio chat interface) stored as `list[ModelMessage]` -2. **Research Iteration History**: Internal research process state (existing `Conversation` model) - -### Message History Flow - -``` -Gradio Chat History → convert_gradio_to_message_history() → GraphOrchestrator.run(message_history) - ↓ -GraphExecutionContext (stores message_history) - ↓ -Agent Nodes (receive message_history via agent.run()) - ↓ -WorkflowState (persists user_message_history) -``` - -### Usage - -Message history is automatically converted from Gradio format and passed through the orchestrator: - -```python -# In app.py - automatic conversion -message_history = convert_gradio_to_message_history(history) if history else None -async for event in orchestrator.run(query, message_history=message_history): - yield event -``` - -Agents receive message history through their `run()` methods: - -```python -# In agent execution -if message_history: - result = await agent.run(input_data, message_history=message_history) -``` - -## Graph Patterns - -### Iterative Research Graph - -The iterative research graph follows this pattern: - -``` -[Input] → [Thinking] → [Knowledge Gap] → [Decision: Complete?] - ↓ No ↓ Yes - [Tool Selector] [Writer] - ↓ - [Execute Tools] → [Loop Back] -``` - -**Node IDs**: `thinking` → `knowledge_gap` → `continue_decision` → `tool_selector`/`writer` → `execute_tools` → (loop back to `thinking`) - -**Special Node Handling**: -- `execute_tools`: State node that uses `search_handler` to execute searches and add evidence to workflow state -- `continue_decision`: Decision node that routes based on `research_complete` flag from `KnowledgeGapOutput` - -### Deep Research Graph - -The deep research graph follows this pattern: - -``` -[Input] → [Planner] → [Store Plan] → [Parallel Loops] → [Collect Drafts] → [Synthesizer] - ↓ ↓ ↓ - [Loop1] [Loop2] [Loop3] -``` - -**Node IDs**: `planner` → `store_plan` → `parallel_loops` → `collect_drafts` → `synthesizer` - -**Special Node Handling**: -- `planner`: Agent node that creates `ReportPlan` with report outline -- `store_plan`: State node that stores `ReportPlan` in context for parallel loops -- `parallel_loops`: Parallel node that executes `IterativeResearchFlow` instances for each section -- `collect_drafts`: State node that collects section drafts from parallel loops -- `synthesizer`: Agent node that calls `LongWriterAgent.write_report()` directly with `ReportDraft` - -### Deep Research - -```mermaid - -sequenceDiagram - actor User - participant GraphOrchestrator - participant InputParser - participant GraphBuilder - participant GraphExecutor - participant Agent - participant BudgetTracker - participant WorkflowState - - User->>GraphOrchestrator: run(query) - GraphOrchestrator->>InputParser: detect_research_mode(query) - InputParser-->>GraphOrchestrator: mode (iterative/deep) - GraphOrchestrator->>GraphBuilder: build_graph(mode) - GraphBuilder-->>GraphOrchestrator: ResearchGraph - GraphOrchestrator->>WorkflowState: init_workflow_state() - GraphOrchestrator->>BudgetTracker: create_budget() - GraphOrchestrator->>GraphExecutor: _execute_graph(graph) - - loop For each node in graph - GraphExecutor->>Agent: execute_node(agent_node) - Agent->>Agent: process_input - Agent-->>GraphExecutor: result - GraphExecutor->>WorkflowState: update_state(result) - GraphExecutor->>BudgetTracker: add_tokens(used) - GraphExecutor->>BudgetTracker: check_budget() - alt Budget exceeded - GraphExecutor->>GraphOrchestrator: emit(error_event) - else Continue - GraphExecutor->>GraphOrchestrator: emit(progress_event) - end - end - - GraphOrchestrator->>User: AsyncGenerator[AgentEvent] - -``` - -### Iterative Research - -```mermaid -sequenceDiagram - participant IterativeFlow - participant ThinkingAgent - participant KnowledgeGapAgent - participant ToolSelector - participant ToolExecutor - participant JudgeHandler - participant WriterAgent - - IterativeFlow->>IterativeFlow: run(query) - - loop Until complete or max_iterations - IterativeFlow->>ThinkingAgent: generate_observations() - ThinkingAgent-->>IterativeFlow: observations - - IterativeFlow->>KnowledgeGapAgent: evaluate_gaps() - KnowledgeGapAgent-->>IterativeFlow: KnowledgeGapOutput - - alt Research complete - IterativeFlow->>WriterAgent: create_final_report() - WriterAgent-->>IterativeFlow: final_report - else Gaps remain - IterativeFlow->>ToolSelector: select_agents(gap) - ToolSelector-->>IterativeFlow: AgentSelectionPlan - - IterativeFlow->>ToolExecutor: execute_tool_tasks() - ToolExecutor-->>IterativeFlow: ToolAgentOutput[] - - IterativeFlow->>JudgeHandler: assess_evidence() - JudgeHandler-->>IterativeFlow: should_continue - end - end -``` - - -## Graph Structure - -### Nodes - -Graph nodes represent different stages in the research workflow: - -1. **Agent Nodes**: Execute Pydantic AI agents - - Input: Prompt/query - - Output: Structured or unstructured response - - Examples: `KnowledgeGapAgent`, `ToolSelectorAgent`, `ThinkingAgent` - -2. **State Nodes**: Update or read workflow state - - Input: Current state - - Output: Updated state - - Examples: Update evidence, update conversation history - -3. **Decision Nodes**: Make routing decisions based on conditions - - Input: Current state/results - - Output: Next node ID - - Examples: Continue research vs. complete research - -4. **Parallel Nodes**: Execute multiple nodes concurrently - - Input: List of node IDs - - Output: Aggregated results - - Examples: Parallel iterative research loops - -### Edges - -Edges define transitions between nodes: - -1. **Sequential Edges**: Always traversed (no condition) - - From: Source node - - To: Target node - - Condition: None (always True) - -2. **Conditional Edges**: Traversed based on condition - - From: Source node - - To: Target node - - Condition: Callable that returns bool - - Example: If research complete → go to writer, else → continue loop - -3. **Parallel Edges**: Used for parallel execution branches - - From: Parallel node - - To: Multiple target nodes - - Execution: All targets run concurrently - - -## State Management - -State is managed via `WorkflowState` using `ContextVar` for thread-safe isolation: - -- **Evidence**: Collected evidence from searches -- **Conversation**: Iteration history (gaps, tool calls, findings, thoughts) -- **Embedding Service**: For semantic search - -State transitions occur at state nodes, which update the global workflow state. - -## Execution Flow - -1. **Graph Construction**: Build graph from nodes and edges using `create_iterative_graph()` or `create_deep_graph()` -2. **Graph Validation**: Ensure graph is valid (no cycles, all nodes reachable) via `ResearchGraph.validate_structure()` -3. **Graph Execution**: Traverse graph from entry node using `GraphOrchestrator._execute_graph()` -4. **Node Execution**: Execute each node based on type: - - **Agent Nodes**: Call `agent.run()` with transformed input - - **State Nodes**: Update workflow state via `state_updater` function - - **Decision Nodes**: Evaluate `decision_function` to get next node ID - - **Parallel Nodes**: Execute all parallel nodes concurrently via `asyncio.gather()` -5. **Edge Evaluation**: Determine next node(s) based on edges and conditions -6. **Parallel Execution**: Use `asyncio.gather()` for parallel nodes -7. **State Updates**: Update state at state nodes via `GraphExecutionContext.update_state()` -8. **Event Streaming**: Yield `AgentEvent` objects during execution for UI - -### GraphExecutionContext - -The `GraphExecutionContext` class manages execution state during graph traversal: - -- **State**: Current `WorkflowState` instance -- **Budget Tracker**: `BudgetTracker` instance for budget enforcement -- **Node Results**: Dictionary storing results from each node execution -- **Visited Nodes**: Set of node IDs that have been executed -- **Current Node**: ID of the node currently being executed - -Methods: -- `set_node_result(node_id, result)`: Store result from node execution -- `get_node_result(node_id)`: Retrieve stored result -- `has_visited(node_id)`: Check if node was visited -- `mark_visited(node_id)`: Mark node as visited -- `update_state(updater, data)`: Update workflow state - -## Conditional Routing - -Decision nodes evaluate conditions and return next node IDs: - -- **Knowledge Gap Decision**: If `research_complete` → writer, else → tool selector -- **Budget Decision**: If budget exceeded → exit, else → continue -- **Iteration Decision**: If max iterations → exit, else → continue - -## Parallel Execution - -Parallel nodes execute multiple nodes concurrently: - -- Each parallel branch runs independently -- Results are aggregated after all branches complete -- State is synchronized after parallel execution -- Errors in one branch don't stop other branches - -## Budget Enforcement - -Budget constraints are enforced at decision nodes: - -- **Token Budget**: Track LLM token usage -- **Time Budget**: Track elapsed time -- **Iteration Budget**: Track iteration count - -If any budget is exceeded, execution routes to exit node. - -## Error Handling - -Errors are handled at multiple levels: - -1. **Node Level**: Catch errors in individual node execution -2. **Graph Level**: Handle errors during graph traversal -3. **State Level**: Rollback state changes on error - -Errors are logged and yield error events for UI. - -## Backward Compatibility - -Graph execution is optional via feature flag: - -- `USE_GRAPH_EXECUTION=true`: Use graph-based execution -- `USE_GRAPH_EXECUTION=false`: Use agent chain execution (existing) - -This allows gradual migration and fallback if needed. - -## See Also - -- [Orchestrators](orchestrators.md) - Overview of all orchestrator patterns -- [Workflow Diagrams](workflow-diagrams.md) - Detailed workflow diagrams -- [API Reference - Orchestrators](../api/orchestrators.md) - API documentation diff --git a/docs/architecture/middleware.md b/docs/architecture/middleware.md deleted file mode 100644 index 6a9c40edefb2c1bb6af2c3b58a3ad1344b75d7d9..0000000000000000000000000000000000000000 --- a/docs/architecture/middleware.md +++ /dev/null @@ -1,146 +0,0 @@ -# Middleware Architecture - -DeepCritical uses middleware for state management, budget tracking, and workflow coordination. - -## State Management - -### WorkflowState - -**File**: `src/middleware/state_machine.py` - -**Purpose**: Thread-safe state management for research workflows - -**Implementation**: Uses `ContextVar` for thread-safe isolation - -**State Components**: -- `evidence: list[Evidence]`: Collected evidence from searches -- `conversation: Conversation`: Iteration history (gaps, tool calls, findings, thoughts) -- `embedding_service: Any`: Embedding service for semantic search - -**Methods**: -- `add_evidence(new_evidence: list[Evidence]) -> int`: Adds evidence with URL-based deduplication. Returns the number of new items added (excluding duplicates). -- `async search_related(query: str, n_results: int = 5) -> list[Evidence]`: Semantic search for related evidence using embedding service - -**Initialization**: - - -[Initialize Workflow State](../src/middleware/state_machine.py) start_line:98 end_line:110 - - -**Access**: - - -[Get Workflow State](../src/middleware/state_machine.py) start_line:115 end_line:129 - - -## Workflow Manager - -**File**: `src/middleware/workflow_manager.py` - -**Purpose**: Coordinates parallel research loops - -**Methods**: -- `async add_loop(loop_id: str, query: str) -> ResearchLoop`: Add a new research loop to manage -- `async run_loops_parallel(loop_configs: list[dict], loop_func: Callable, judge_handler: Any | None = None, budget_tracker: Any | None = None) -> list[Any]`: Run multiple research loops in parallel. Takes configuration dicts and a loop function. -- `async update_loop_status(loop_id: str, status: LoopStatus, error: str | None = None)`: Update loop status -- `async sync_loop_evidence_to_state(loop_id: str)`: Synchronize evidence from a specific loop to global state - -**Features**: -- Uses `asyncio.gather()` for parallel execution -- Handles errors per loop (doesn't fail all if one fails) -- Tracks loop status: `pending`, `running`, `completed`, `failed`, `cancelled` -- Evidence deduplication across parallel loops - -**Usage**: -```python -from src.middleware.workflow_manager import WorkflowManager - -manager = WorkflowManager() -await manager.add_loop("loop1", "Research query 1") -await manager.add_loop("loop2", "Research query 2") - -async def run_research(config: dict) -> str: - loop_id = config["loop_id"] - query = config["query"] - # ... research logic ... - return "report" - -results = await manager.run_loops_parallel( - loop_configs=[ - {"loop_id": "loop1", "query": "Research query 1"}, - {"loop_id": "loop2", "query": "Research query 2"}, - ], - loop_func=run_research, -) -``` - -## Budget Tracker - -**File**: `src/middleware/budget_tracker.py` - -**Purpose**: Tracks and enforces resource limits - -**Budget Components**: -- **Tokens**: LLM token usage -- **Time**: Elapsed time in seconds -- **Iterations**: Number of iterations - -**Methods**: -- `create_budget(loop_id: str, tokens_limit: int = 100000, time_limit_seconds: float = 600.0, iterations_limit: int = 10) -> BudgetStatus`: Create a budget for a specific loop -- `add_tokens(loop_id: str, tokens: int)`: Add token usage to a loop's budget -- `start_timer(loop_id: str)`: Start time tracking for a loop -- `update_timer(loop_id: str)`: Update elapsed time for a loop -- `increment_iteration(loop_id: str)`: Increment iteration count for a loop -- `check_budget(loop_id: str) -> tuple[bool, str]`: Check if a loop's budget has been exceeded. Returns (exceeded: bool, reason: str) -- `can_continue(loop_id: str) -> bool`: Check if a loop can continue based on budget - -**Token Estimation**: -- `estimate_tokens(text: str) -> int`: ~4 chars per token -- `estimate_llm_call_tokens(prompt: str, response: str) -> int`: Estimate LLM call tokens - -**Usage**: -```python -from src.middleware.budget_tracker import BudgetTracker - -tracker = BudgetTracker() -budget = tracker.create_budget( - loop_id="research_loop", - tokens_limit=100000, - time_limit_seconds=600, - iterations_limit=10 -) -tracker.start_timer("research_loop") -# ... research operations ... -tracker.add_tokens("research_loop", 5000) -tracker.update_timer("research_loop") -exceeded, reason = tracker.check_budget("research_loop") -if exceeded: - # Budget exceeded, stop research - pass -if not tracker.can_continue("research_loop"): - # Budget exceeded, stop research - pass -``` - -## Models - -All middleware models are defined in `src/utils/models.py`: - -- `IterationData`: Data for a single iteration -- `Conversation`: Conversation history with iterations -- `ResearchLoop`: Research loop state and configuration -- `BudgetStatus`: Current budget status - -## Thread Safety - -All middleware components use `ContextVar` for thread-safe isolation: - -- Each request/thread has its own workflow state -- No global mutable state -- Safe for concurrent requests - -## See Also - -- [Orchestrators](orchestrators.md) - How middleware is used in orchestration -- [API Reference - Orchestrators](../api/orchestrators.md) - API documentation -- [Contributing - Code Style](../contributing/code-style.md) - Development guidelines diff --git a/docs/architecture/orchestrators.md b/docs/architecture/orchestrators.md deleted file mode 100644 index 34ea966177da345a8e0c92254b2c50633cbf2f5e..0000000000000000000000000000000000000000 --- a/docs/architecture/orchestrators.md +++ /dev/null @@ -1,201 +0,0 @@ -# Orchestrators Architecture - -DeepCritical supports multiple orchestration patterns for research workflows. - -## Research Flows - -### IterativeResearchFlow - -**File**: `src/orchestrator/research_flow.py` - -**Pattern**: Generate observations → Evaluate gaps → Select tools → Execute → Judge → Continue/Complete - -**Agents Used**: -- `KnowledgeGapAgent`: Evaluates research completeness -- `ToolSelectorAgent`: Selects tools for addressing gaps -- `ThinkingAgent`: Generates observations -- `WriterAgent`: Creates final report -- `JudgeHandler`: Assesses evidence sufficiency - -**Features**: -- Tracks iterations, time, budget -- Supports graph execution (`use_graph=True`) and agent chains (`use_graph=False`) -- Iterates until research complete or constraints met - -**Usage**: - - -[IterativeResearchFlow Initialization](../src/orchestrator/research_flow.py) start_line:57 end_line:80 - - -### DeepResearchFlow - -**File**: `src/orchestrator/research_flow.py` - -**Pattern**: Planner → Parallel iterative loops per section → Synthesizer - -**Agents Used**: -- `PlannerAgent`: Breaks query into report sections -- `IterativeResearchFlow`: Per-section research (parallel) -- `LongWriterAgent` or `ProofreaderAgent`: Final synthesis - -**Features**: -- Uses `WorkflowManager` for parallel execution -- Budget tracking per section and globally -- State synchronization across parallel loops -- Supports graph execution and agent chains - -**Usage**: - - -[DeepResearchFlow Initialization](../src/orchestrator/research_flow.py) start_line:709 end_line:728 - - -## Graph Orchestrator - -**File**: `src/orchestrator/graph_orchestrator.py` - -**Purpose**: Graph-based execution using Pydantic AI agents as nodes - -**Features**: -- Uses graph execution (`use_graph=True`) or agent chains (`use_graph=False`) as fallback -- Routes based on research mode (iterative/deep/auto) -- Streams `AgentEvent` objects for UI -- Uses `GraphExecutionContext` to manage execution state - -**Node Types**: -- **Agent Nodes**: Execute Pydantic AI agents -- **State Nodes**: Update or read workflow state -- **Decision Nodes**: Make routing decisions -- **Parallel Nodes**: Execute multiple nodes concurrently - -**Edge Types**: -- **Sequential Edges**: Always traversed -- **Conditional Edges**: Traversed based on condition -- **Parallel Edges**: Used for parallel execution branches - -**Special Node Handling**: - -The `GraphOrchestrator` has special handling for certain nodes: - -- **`execute_tools` node**: State node that uses `search_handler` to execute searches and add evidence to workflow state -- **`parallel_loops` node**: Parallel node that executes `IterativeResearchFlow` instances for each section in deep research mode -- **`synthesizer` node**: Agent node that calls `LongWriterAgent.write_report()` directly with `ReportDraft` instead of using `agent.run()` -- **`writer` node**: Agent node that calls `WriterAgent.write_report()` directly with findings instead of using `agent.run()` - -**GraphExecutionContext**: - -The orchestrator uses `GraphExecutionContext` to manage execution state: -- Tracks current node, visited nodes, and node results -- Manages workflow state and budget tracker -- Provides methods to store and retrieve node execution results - -## Orchestrator Factory - -**File**: `src/orchestrator_factory.py` - -**Purpose**: Factory for creating orchestrators - -**Modes**: -- **Simple**: Legacy orchestrator (backward compatible) -- **Advanced**: Magentic orchestrator (requires OpenAI API key) -- **Auto-detect**: Chooses based on API key availability - -**Usage**: - - -[Create Orchestrator](../src/orchestrator_factory.py) start_line:44 end_line:66 - - -## Magentic Orchestrator - -**File**: `src/orchestrator_magentic.py` - -**Purpose**: Multi-agent coordination using Microsoft Agent Framework - -**Features**: -- Uses `agent-framework-core` -- ChatAgent pattern with internal LLMs per agent -- `MagenticBuilder` with participants: - - `searcher`: SearchAgent (wraps SearchHandler) - - `hypothesizer`: HypothesisAgent (generates hypotheses) - - `judge`: JudgeAgent (evaluates evidence) - - `reporter`: ReportAgent (generates final report) -- Manager orchestrates agents via chat client (OpenAI or HuggingFace) -- Event-driven: converts Magentic events to `AgentEvent` for UI streaming via `_process_event()` method -- Supports max rounds, stall detection, and reset handling - -**Event Processing**: - -The orchestrator processes Magentic events and converts them to `AgentEvent`: -- `MagenticOrchestratorMessageEvent` → `AgentEvent` with type based on message content -- `MagenticAgentMessageEvent` → `AgentEvent` with type based on agent name -- `MagenticAgentDeltaEvent` → `AgentEvent` for streaming updates -- `MagenticFinalResultEvent` → `AgentEvent` with type "complete" - -**Requirements**: -- `agent-framework-core` package -- OpenAI API key or HuggingFace authentication - -## Hierarchical Orchestrator - -**File**: `src/orchestrator_hierarchical.py` - -**Purpose**: Hierarchical orchestrator using middleware and sub-teams - -**Features**: -- Uses `SubIterationMiddleware` with `ResearchTeam` and `LLMSubIterationJudge` -- Adapts Magentic ChatAgent to `SubIterationTeam` protocol -- Event-driven via `asyncio.Queue` for coordination -- Supports sub-iteration patterns for complex research tasks - -## Legacy Simple Mode - -**File**: `src/legacy_orchestrator.py` - -**Purpose**: Linear search-judge-synthesize loop - -**Features**: -- Uses `SearchHandlerProtocol` and `JudgeHandlerProtocol` -- Generator-based design yielding `AgentEvent` objects -- Backward compatibility for simple use cases - -## State Initialization - -All orchestrators must initialize workflow state: - - -[Initialize Workflow State](../src/middleware/state_machine.py) start_line:98 end_line:112 - - -## Event Streaming - -All orchestrators yield `AgentEvent` objects: - -**Event Types**: -- `started`: Research started -- `searching`: Search in progress -- `search_complete`: Search completed -- `judging`: Evidence evaluation in progress -- `judge_complete`: Evidence evaluation completed -- `looping`: Iteration in progress -- `hypothesizing`: Generating hypotheses -- `analyzing`: Statistical analysis in progress -- `analysis_complete`: Statistical analysis completed -- `synthesizing`: Synthesizing results -- `complete`: Research completed -- `error`: Error occurred -- `streaming`: Streaming update (delta events) - -**Event Structure**: - - -[AgentEvent Model](../src/utils/models.py) start_line:104 end_line:126 - - -## See Also - -- [Graph Orchestration](graph_orchestration.md) - Graph-based execution details -- [Workflow Diagrams](workflow-diagrams.md) - Detailed workflow diagrams -- [API Reference - Orchestrators](../api/orchestrators.md) - API documentation - diff --git a/docs/architecture/services.md b/docs/architecture/services.md deleted file mode 100644 index 102b95b2340b11ebad8c4a797410258b5d3202b4..0000000000000000000000000000000000000000 --- a/docs/architecture/services.md +++ /dev/null @@ -1,146 +0,0 @@ -# Services Architecture - -DeepCritical provides several services for embeddings, RAG, and statistical analysis. - -## Embedding Service - -**File**: `src/services/embeddings.py` - -**Purpose**: Local sentence-transformers for semantic search and deduplication - -**Features**: -- **No API Key Required**: Uses local sentence-transformers models -- **Async-Safe**: All operations use `run_in_executor()` to avoid blocking the event loop -- **ChromaDB Storage**: In-memory vector storage for embeddings -- **Deduplication**: 0.9 similarity threshold by default (90% similarity = duplicate, configurable) - -**Model**: Configurable via `settings.local_embedding_model` (default: `all-MiniLM-L6-v2`) - -**Methods**: -- `async def embed(text: str) -> list[float]`: Generate embeddings (async-safe via `run_in_executor()`) -- `async def embed_batch(texts: list[str]) -> list[list[float]]`: Batch embedding (more efficient) -- `async def add_evidence(evidence_id: str, content: str, metadata: dict[str, Any]) -> None`: Add evidence to vector store -- `async def search_similar(query: str, n_results: int = 5) -> list[dict[str, Any]]`: Find semantically similar evidence -- `async def deduplicate(new_evidence: list[Evidence], threshold: float = 0.9) -> list[Evidence]`: Remove semantically duplicate evidence - -**Usage**: -```python -from src.services.embeddings import get_embedding_service - -service = get_embedding_service() -embedding = await service.embed("text to embed") -``` - -## LlamaIndex RAG Service - -**File**: `src/services/llamaindex_rag.py` - -**Purpose**: Retrieval-Augmented Generation using LlamaIndex - -**Features**: -- **Multiple Embedding Providers**: OpenAI embeddings (requires `OPENAI_API_KEY`) or local sentence-transformers (no API key) -- **Multiple LLM Providers**: HuggingFace LLM (preferred) or OpenAI LLM (fallback) for query synthesis -- **ChromaDB Storage**: Vector database for document storage (supports in-memory mode) -- **Metadata Preservation**: Preserves source, title, URL, date, authors -- **Lazy Initialization**: Graceful fallback if dependencies not available - -**Initialization Parameters**: -- `use_openai_embeddings: bool | None`: Force OpenAI embeddings (None = auto-detect) -- `use_in_memory: bool`: Use in-memory ChromaDB client (useful for tests) -- `oauth_token: str | None`: Optional OAuth token from HuggingFace login (takes priority over env vars) - -**Methods**: -- `async def ingest_evidence(evidence: list[Evidence]) -> None`: Ingest evidence into RAG -- `async def retrieve(query: str, top_k: int = 5) -> list[Document]`: Retrieve relevant documents -- `async def query(query: str, top_k: int = 5) -> str`: Query with RAG - -**Usage**: -```python -from src.services.llamaindex_rag import get_rag_service - -service = get_rag_service( - use_openai_embeddings=False, # Use local embeddings - use_in_memory=True, # Use in-memory ChromaDB - oauth_token=token # Optional HuggingFace token -) -if service: - documents = await service.retrieve("query", top_k=5) -``` - -## Statistical Analyzer - -**File**: `src/services/statistical_analyzer.py` - -**Purpose**: Secure execution of AI-generated statistical code - -**Features**: -- **Modal Sandbox**: Secure, isolated execution environment -- **Code Generation**: Generates Python code via LLM -- **Library Pinning**: Version-pinned libraries in `SANDBOX_LIBRARIES` -- **Network Isolation**: `block_network=True` by default - -**Libraries Available**: -- pandas, numpy, scipy -- matplotlib, scikit-learn -- statsmodels - -**Output**: `AnalysisResult` with: -- `verdict`: SUPPORTED, REFUTED, or INCONCLUSIVE -- `code`: Generated analysis code -- `output`: Execution output -- `error`: Error message if execution failed - -**Usage**: -```python -from src.services.statistical_analyzer import StatisticalAnalyzer - -analyzer = StatisticalAnalyzer() -result = await analyzer.analyze( - hypothesis="Metformin reduces cancer risk", - evidence=evidence_list -) -``` - -## Singleton Pattern - -Services use singleton patterns for lazy initialization: - -**EmbeddingService**: Uses a global variable pattern: - - -[EmbeddingService Singleton](../src/services/embeddings.py) start_line:164 end_line:172 - - -**LlamaIndexRAGService**: Direct instantiation (no caching): - - -[LlamaIndexRAGService Factory](../src/services/llamaindex_rag.py) start_line:440 end_line:466 - - -This ensures: -- Single instance per process -- Lazy initialization -- No dependencies required at import time - -## Service Availability - -Services check availability before use: - -```python -from src.utils.config import settings - -if settings.modal_available: - # Use Modal sandbox - pass - -if settings.has_openai_key: - # Use OpenAI embeddings for RAG - pass -``` - -## See Also - -- [Tools](tools.md) - How services are used by search tools -- [API Reference - Services](../api/services.md) - API documentation -- [Configuration](../configuration/index.md) - Service configuration - diff --git a/docs/architecture/tools.md b/docs/architecture/tools.md deleted file mode 100644 index 3585874442c672b724f5819deedf683697074bb7..0000000000000000000000000000000000000000 --- a/docs/architecture/tools.md +++ /dev/null @@ -1,167 +0,0 @@ -# Tools Architecture - -DeepCritical implements a protocol-based search tool system for retrieving evidence from multiple sources. - -## SearchTool Protocol - -All tools implement the `SearchTool` protocol from `src/tools/base.py`: - - -[SearchTool Protocol](../src/tools/base.py) start_line:8 end_line:31 - - -## Rate Limiting - -All tools use the `@retry` decorator from tenacity: - - -[Retry Decorator Pattern](../src/tools/pubmed.py) start_line:46 end_line:50 - - -Tools with API rate limits implement `_rate_limit()` method and use shared rate limiters from `src/tools/rate_limiter.py`. - -## Error Handling - -Tools raise custom exceptions: - -- `SearchError`: General search failures -- `RateLimitError`: Rate limit exceeded - -Tools handle HTTP errors (429, 500, timeout) and return empty lists on non-critical errors (with warning logs). - -## Query Preprocessing - -Tools use `preprocess_query()` from `src/tools/query_utils.py` to: - -- Remove noise from queries -- Expand synonyms -- Normalize query format - -## Evidence Conversion - -All tools convert API responses to `Evidence` objects with: - -- `Citation`: Title, URL, date, authors -- `content`: Evidence text -- `relevance_score`: 0.0-1.0 relevance score -- `metadata`: Additional metadata - -Missing fields are handled gracefully with defaults. - -## Tool Implementations - -### PubMed Tool - -**File**: `src/tools/pubmed.py` - -**API**: NCBI E-utilities (ESearch → EFetch) - -**Rate Limiting**: -- 0.34s between requests (3 req/sec without API key) -- 0.1s between requests (10 req/sec with NCBI API key) - -**Features**: -- XML parsing with `xmltodict` -- Handles single vs. multiple articles -- Query preprocessing -- Evidence conversion with metadata extraction - -### ClinicalTrials Tool - -**File**: `src/tools/clinicaltrials.py` - -**API**: ClinicalTrials.gov API v2 - -**Important**: Uses `requests` library (NOT httpx) because WAF blocks httpx TLS fingerprint. - -**Execution**: Runs in thread pool: `await asyncio.to_thread(requests.get, ...)` - -**Filtering**: -- Only interventional studies -- Status: `COMPLETED`, `ACTIVE_NOT_RECRUITING`, `RECRUITING`, `ENROLLING_BY_INVITATION` - -**Features**: -- Parses nested JSON structure -- Extracts trial metadata -- Evidence conversion - -### Europe PMC Tool - -**File**: `src/tools/europepmc.py` - -**API**: Europe PMC REST API - -**Features**: -- Handles preprint markers: `[PREPRINT - Not peer-reviewed]` -- Builds URLs from DOI or PMID -- Checks `pubTypeList` for preprint detection -- Includes both preprints and peer-reviewed articles - -### RAG Tool - -**File**: `src/tools/rag_tool.py` - -**Purpose**: Semantic search within collected evidence - -**Implementation**: Wraps `LlamaIndexRAGService` - -**Features**: -- Returns Evidence from RAG results -- Handles evidence ingestion -- Semantic similarity search -- Metadata preservation - -### Search Handler - -**File**: `src/tools/search_handler.py` - -**Purpose**: Orchestrates parallel searches across multiple tools - -**Initialization Parameters**: -- `tools: list[SearchTool]`: List of search tools to use -- `timeout: float = 30.0`: Timeout for each search in seconds -- `include_rag: bool = False`: Whether to include RAG tool in searches -- `auto_ingest_to_rag: bool = True`: Whether to automatically ingest results into RAG -- `oauth_token: str | None = None`: Optional OAuth token from HuggingFace login (for RAG LLM) - -**Methods**: -- `async def execute(query: str, max_results_per_tool: int = 10) -> SearchResult`: Execute search across all tools in parallel - -**Features**: -- Uses `asyncio.gather()` with `return_exceptions=True` for parallel execution -- Aggregates results into `SearchResult` with evidence and metadata -- Handles tool failures gracefully (continues with other tools) -- Deduplicates results by URL -- Automatically ingests results into RAG if `auto_ingest_to_rag=True` -- Can add RAG tool dynamically via `add_rag_tool()` method - -## Tool Registration - -Tools are registered in the search handler: - -```python -from src.tools.pubmed import PubMedTool -from src.tools.clinicaltrials import ClinicalTrialsTool -from src.tools.europepmc import EuropePMCTool -from src.tools.search_handler import SearchHandler - -search_handler = SearchHandler( - tools=[ - PubMedTool(), - ClinicalTrialsTool(), - EuropePMCTool(), - ], - include_rag=True, # Include RAG tool for semantic search - auto_ingest_to_rag=True, # Automatically ingest results into RAG - oauth_token=token # Optional HuggingFace token for RAG LLM -) - -# Execute search -result = await search_handler.execute("query", max_results_per_tool=10) -``` - -## See Also - -- [Services](services.md) - RAG and embedding services -- [API Reference - Tools](../api/tools.md) - API documentation -- [Contributing - Implementation Patterns](../contributing/implementation-patterns.md) - Development guidelines diff --git a/docs/architecture/workflow-diagrams.md b/docs/architecture/workflow-diagrams.md deleted file mode 100644 index 1d22a77898faa66673192f8d8196e7efa88385da..0000000000000000000000000000000000000000 --- a/docs/architecture/workflow-diagrams.md +++ /dev/null @@ -1,655 +0,0 @@ -# DeepCritical Workflow - Simplified Magentic Architecture - -> **Architecture Pattern**: Microsoft Magentic Orchestration -> **Design Philosophy**: Simple, dynamic, manager-driven coordination -> **Key Innovation**: Intelligent manager replaces rigid sequential phases - ---- - -## 1. High-Level Magentic Workflow - -```mermaid -flowchart TD - Start([User Query]) --> Manager[Magentic Manager
Plan • Select • Assess • Adapt] - - Manager -->|Plans| Task1[Task Decomposition] - Task1 --> Manager - - Manager -->|Selects & Executes| HypAgent[Hypothesis Agent] - Manager -->|Selects & Executes| SearchAgent[Search Agent] - Manager -->|Selects & Executes| AnalysisAgent[Analysis Agent] - Manager -->|Selects & Executes| ReportAgent[Report Agent] - - HypAgent -->|Results| Manager - SearchAgent -->|Results| Manager - AnalysisAgent -->|Results| Manager - ReportAgent -->|Results| Manager - - Manager -->|Assesses Quality| Decision{Good Enough?} - Decision -->|No - Refine| Manager - Decision -->|No - Different Agent| Manager - Decision -->|No - Stalled| Replan[Reset Plan] - Replan --> Manager - - Decision -->|Yes| Synthesis[Synthesize Final Result] - Synthesis --> Output([Research Report]) - - style Start fill:#e1f5e1 - style Manager fill:#ffe6e6 - style HypAgent fill:#fff4e6 - style SearchAgent fill:#fff4e6 - style AnalysisAgent fill:#fff4e6 - style ReportAgent fill:#fff4e6 - style Decision fill:#ffd6d6 - style Synthesis fill:#d4edda - style Output fill:#e1f5e1 -``` - -## 2. Magentic Manager: The 6-Phase Cycle - -```mermaid -flowchart LR - P1[1. Planning
Analyze task
Create strategy] --> P2[2. Agent Selection
Pick best agent
for subtask] - P2 --> P3[3. Execution
Run selected
agent with tools] - P3 --> P4[4. Assessment
Evaluate quality
Check progress] - P4 --> Decision{Quality OK?
Progress made?} - Decision -->|Yes| P6[6. Synthesis
Combine results
Generate report] - Decision -->|No| P5[5. Iteration
Adjust plan
Try again] - P5 --> P2 - P6 --> Done([Complete]) - - style P1 fill:#fff4e6 - style P2 fill:#ffe6e6 - style P3 fill:#e6f3ff - style P4 fill:#ffd6d6 - style P5 fill:#fff3cd - style P6 fill:#d4edda - style Done fill:#e1f5e1 -``` - -## 3. Simplified Agent Architecture - -```mermaid -graph TB - subgraph "Orchestration Layer" - Manager[Magentic Manager
• Plans workflow
• Selects agents
• Assesses quality
• Adapts strategy] - SharedContext[(Shared Context
• Hypotheses
• Search Results
• Analysis
• Progress)] - Manager <--> SharedContext - end - - subgraph "Specialist Agents" - HypAgent[Hypothesis Agent
• Domain understanding
• Hypothesis generation
• Testability refinement] - SearchAgent[Search Agent
• Multi-source search
• RAG retrieval
• Result ranking] - AnalysisAgent[Analysis Agent
• Evidence extraction
• Statistical analysis
• Code execution] - ReportAgent[Report Agent
• Report assembly
• Visualization
• Citation formatting] - end - - subgraph "MCP Tools" - WebSearch[Web Search
PubMed • arXiv • bioRxiv] - CodeExec[Code Execution
Sandboxed Python] - RAG[RAG Retrieval
Vector DB • Embeddings] - Viz[Visualization
Charts • Graphs] - end - - Manager -->|Selects & Directs| HypAgent - Manager -->|Selects & Directs| SearchAgent - Manager -->|Selects & Directs| AnalysisAgent - Manager -->|Selects & Directs| ReportAgent - - HypAgent --> SharedContext - SearchAgent --> SharedContext - AnalysisAgent --> SharedContext - ReportAgent --> SharedContext - - SearchAgent --> WebSearch - SearchAgent --> RAG - AnalysisAgent --> CodeExec - ReportAgent --> CodeExec - ReportAgent --> Viz - - style Manager fill:#ffe6e6 - style SharedContext fill:#ffe6f0 - style HypAgent fill:#fff4e6 - style SearchAgent fill:#fff4e6 - style AnalysisAgent fill:#fff4e6 - style ReportAgent fill:#fff4e6 - style WebSearch fill:#e6f3ff - style CodeExec fill:#e6f3ff - style RAG fill:#e6f3ff - style Viz fill:#e6f3ff -``` - -## 4. Dynamic Workflow Example - -```mermaid -sequenceDiagram - participant User - participant Manager - participant HypAgent - participant SearchAgent - participant AnalysisAgent - participant ReportAgent - - User->>Manager: "Research protein folding in Alzheimer's" - - Note over Manager: PLAN: Generate hypotheses → Search → Analyze → Report - - Manager->>HypAgent: Generate 3 hypotheses - HypAgent-->>Manager: Returns 3 hypotheses - Note over Manager: ASSESS: Good quality, proceed - - Manager->>SearchAgent: Search literature for hypothesis 1 - SearchAgent-->>Manager: Returns 15 papers - Note over Manager: ASSESS: Good results, continue - - Manager->>SearchAgent: Search for hypothesis 2 - SearchAgent-->>Manager: Only 2 papers found - Note over Manager: ASSESS: Insufficient, refine search - - Manager->>SearchAgent: Refined query for hypothesis 2 - SearchAgent-->>Manager: Returns 12 papers - Note over Manager: ASSESS: Better, proceed - - Manager->>AnalysisAgent: Analyze evidence for all hypotheses - AnalysisAgent-->>Manager: Returns analysis with code - Note over Manager: ASSESS: Complete, generate report - - Manager->>ReportAgent: Create comprehensive report - ReportAgent-->>Manager: Returns formatted report - Note over Manager: SYNTHESIZE: Combine all results - - Manager->>User: Final Research Report -``` - -## 5. Manager Decision Logic - -```mermaid -flowchart TD - Start([Manager Receives Task]) --> Plan[Create Initial Plan] - - Plan --> Select[Select Agent for Next Subtask] - Select --> Execute[Execute Agent] - Execute --> Collect[Collect Results] - - Collect --> Assess[Assess Quality & Progress] - - Assess --> Q1{Quality Sufficient?} - Q1 -->|No| Q2{Same Agent Can Fix?} - Q2 -->|Yes| Feedback[Provide Specific Feedback] - Feedback --> Execute - Q2 -->|No| Different[Try Different Agent] - Different --> Select - - Q1 -->|Yes| Q3{Task Complete?} - Q3 -->|No| Q4{Making Progress?} - Q4 -->|Yes| Select - Q4 -->|No - Stalled| Replan[Reset Plan & Approach] - Replan --> Plan - - Q3 -->|Yes| Synth[Synthesize Final Result] - Synth --> Done([Return Report]) - - style Start fill:#e1f5e1 - style Plan fill:#fff4e6 - style Select fill:#ffe6e6 - style Execute fill:#e6f3ff - style Assess fill:#ffd6d6 - style Q1 fill:#ffe6e6 - style Q2 fill:#ffe6e6 - style Q3 fill:#ffe6e6 - style Q4 fill:#ffe6e6 - style Synth fill:#d4edda - style Done fill:#e1f5e1 -``` - -## 6. Hypothesis Agent Workflow - -```mermaid -flowchart LR - Input[Research Query] --> Domain[Identify Domain
& Key Concepts] - Domain --> Context[Retrieve Background
Knowledge] - Context --> Generate[Generate 3-5
Initial Hypotheses] - Generate --> Refine[Refine for
Testability] - Refine --> Rank[Rank by
Quality Score] - Rank --> Output[Return Top
Hypotheses] - - Output --> Struct[Hypothesis Structure:
• Statement
• Rationale
• Testability Score
• Data Requirements
• Expected Outcomes] - - style Input fill:#e1f5e1 - style Output fill:#fff4e6 - style Struct fill:#e6f3ff -``` - -## 7. Search Agent Workflow - -```mermaid -flowchart TD - Input[Hypotheses] --> Strategy[Formulate Search
Strategy per Hypothesis] - - Strategy --> Multi[Multi-Source Search] - - Multi --> PubMed[PubMed Search
via MCP] - Multi --> ArXiv[arXiv Search
via MCP] - Multi --> BioRxiv[bioRxiv Search
via MCP] - - PubMed --> Aggregate[Aggregate Results] - ArXiv --> Aggregate - BioRxiv --> Aggregate - - Aggregate --> Filter[Filter & Rank
by Relevance] - Filter --> Dedup[Deduplicate
Cross-Reference] - Dedup --> Embed[Embed Documents
via MCP] - Embed --> Vector[(Vector DB)] - Vector --> RAGRetrieval[RAG Retrieval
Top-K per Hypothesis] - RAGRetrieval --> Output[Return Contextualized
Search Results] - - style Input fill:#fff4e6 - style Multi fill:#ffe6e6 - style Vector fill:#ffe6f0 - style Output fill:#e6f3ff -``` - -## 8. Analysis Agent Workflow - -```mermaid -flowchart TD - Input1[Hypotheses] --> Extract - Input2[Search Results] --> Extract[Extract Evidence
per Hypothesis] - - Extract --> Methods[Determine Analysis
Methods Needed] - - Methods --> Branch{Requires
Computation?} - Branch -->|Yes| GenCode[Generate Python
Analysis Code] - Branch -->|No| Qual[Qualitative
Synthesis] - - GenCode --> Execute[Execute Code
via MCP Sandbox] - Execute --> Interpret1[Interpret
Results] - Qual --> Interpret2[Interpret
Findings] - - Interpret1 --> Synthesize[Synthesize Evidence
Across Sources] - Interpret2 --> Synthesize - - Synthesize --> Verdict[Determine Verdict
per Hypothesis] - Verdict --> Support[• Supported
• Refuted
• Inconclusive] - Support --> Gaps[Identify Knowledge
Gaps & Limitations] - Gaps --> Output[Return Analysis
Report] - - style Input1 fill:#fff4e6 - style Input2 fill:#e6f3ff - style Execute fill:#ffe6e6 - style Output fill:#e6ffe6 -``` - -## 9. Report Agent Workflow - -```mermaid -flowchart TD - Input1[Query] --> Assemble - Input2[Hypotheses] --> Assemble - Input3[Search Results] --> Assemble - Input4[Analysis] --> Assemble[Assemble Report
Sections] - - Assemble --> Exec[Executive Summary] - Assemble --> Intro[Introduction] - Assemble --> Methods[Methods] - Assemble --> Results[Results per
Hypothesis] - Assemble --> Discussion[Discussion] - Assemble --> Future[Future Directions] - Assemble --> Refs[References] - - Results --> VizCheck{Needs
Visualization?} - VizCheck -->|Yes| GenViz[Generate Viz Code] - GenViz --> ExecViz[Execute via MCP
Create Charts] - ExecViz --> Combine - VizCheck -->|No| Combine[Combine All
Sections] - - Exec --> Combine - Intro --> Combine - Methods --> Combine - Discussion --> Combine - Future --> Combine - Refs --> Combine - - Combine --> Format[Format Output] - Format --> MD[Markdown] - Format --> PDF[PDF] - Format --> JSON[JSON] - - MD --> Output[Return Final
Report] - PDF --> Output - JSON --> Output - - style Input1 fill:#e1f5e1 - style Input2 fill:#fff4e6 - style Input3 fill:#e6f3ff - style Input4 fill:#e6ffe6 - style Output fill:#d4edda -``` - -## 10. Data Flow & Event Streaming - -```mermaid -flowchart TD - User[👤 User] -->|Research Query| UI[Gradio UI] - UI -->|Submit| Manager[Magentic Manager] - - Manager -->|Event: Planning| UI - Manager -->|Select Agent| HypAgent[Hypothesis Agent] - HypAgent -->|Event: Delta/Message| UI - HypAgent -->|Hypotheses| Context[(Shared Context)] - - Context -->|Retrieved by| Manager - Manager -->|Select Agent| SearchAgent[Search Agent] - SearchAgent -->|MCP Request| WebSearch[Web Search Tool] - WebSearch -->|Results| SearchAgent - SearchAgent -->|Event: Delta/Message| UI - SearchAgent -->|Documents| Context - SearchAgent -->|Embeddings| VectorDB[(Vector DB)] - - Context -->|Retrieved by| Manager - Manager -->|Select Agent| AnalysisAgent[Analysis Agent] - AnalysisAgent -->|MCP Request| CodeExec[Code Execution Tool] - CodeExec -->|Results| AnalysisAgent - AnalysisAgent -->|Event: Delta/Message| UI - AnalysisAgent -->|Analysis| Context - - Context -->|Retrieved by| Manager - Manager -->|Select Agent| ReportAgent[Report Agent] - ReportAgent -->|MCP Request| CodeExec - ReportAgent -->|Event: Delta/Message| UI - ReportAgent -->|Report| Context - - Manager -->|Event: Final Result| UI - UI -->|Display| User - - style User fill:#e1f5e1 - style UI fill:#e6f3ff - style Manager fill:#ffe6e6 - style Context fill:#ffe6f0 - style VectorDB fill:#ffe6f0 - style WebSearch fill:#f0f0f0 - style CodeExec fill:#f0f0f0 -``` - -## 11. MCP Tool Architecture - -```mermaid -graph TB - subgraph "Agent Layer" - Manager[Magentic Manager] - HypAgent[Hypothesis Agent] - SearchAgent[Search Agent] - AnalysisAgent[Analysis Agent] - ReportAgent[Report Agent] - end - - subgraph "MCP Protocol Layer" - Registry[MCP Tool Registry
• Discovers tools
• Routes requests
• Manages connections] - end - - subgraph "MCP Servers" - Server1[Web Search Server
localhost:8001
• PubMed
• arXiv
• bioRxiv] - Server2[Code Execution Server
localhost:8002
• Sandboxed Python
• Package management] - Server3[RAG Server
localhost:8003
• Vector embeddings
• Similarity search] - Server4[Visualization Server
localhost:8004
• Chart generation
• Plot rendering] - end - - subgraph "External Services" - PubMed[PubMed API] - ArXiv[arXiv API] - BioRxiv[bioRxiv API] - Modal[Modal Sandbox] - ChromaDB[(ChromaDB)] - end - - SearchAgent -->|Request| Registry - AnalysisAgent -->|Request| Registry - ReportAgent -->|Request| Registry - - Registry --> Server1 - Registry --> Server2 - Registry --> Server3 - Registry --> Server4 - - Server1 --> PubMed - Server1 --> ArXiv - Server1 --> BioRxiv - Server2 --> Modal - Server3 --> ChromaDB - - style Manager fill:#ffe6e6 - style Registry fill:#fff4e6 - style Server1 fill:#e6f3ff - style Server2 fill:#e6f3ff - style Server3 fill:#e6f3ff - style Server4 fill:#e6f3ff -``` - -## 12. Progress Tracking & Stall Detection - -```mermaid -stateDiagram-v2 - [*] --> Initialization: User Query - - Initialization --> Planning: Manager starts - - Planning --> AgentExecution: Select agent - - AgentExecution --> Assessment: Collect results - - Assessment --> QualityCheck: Evaluate output - - QualityCheck --> AgentExecution: Poor quality
(retry < max_rounds) - QualityCheck --> Planning: Poor quality
(try different agent) - QualityCheck --> NextAgent: Good quality
(task incomplete) - QualityCheck --> Synthesis: Good quality
(task complete) - - NextAgent --> AgentExecution: Select next agent - - state StallDetection <> - Assessment --> StallDetection: Check progress - StallDetection --> Planning: No progress
(stall count < max) - StallDetection --> ErrorRecovery: No progress
(max stalls reached) - - ErrorRecovery --> PartialReport: Generate partial results - PartialReport --> [*] - - Synthesis --> FinalReport: Combine all outputs - FinalReport --> [*] - - note right of QualityCheck - Manager assesses: - • Output completeness - • Quality metrics - • Progress made - end note - - note right of StallDetection - Stall = no new progress - after agent execution - Triggers plan reset - end note -``` - -## 13. Gradio UI Integration - -```mermaid -graph TD - App[Gradio App
DeepCritical Research Agent] - - App --> Input[Input Section] - App --> Status[Status Section] - App --> Output[Output Section] - - Input --> Query[Research Question
Text Area] - Input --> Controls[Controls] - Controls --> MaxHyp[Max Hypotheses: 1-10] - Controls --> MaxRounds[Max Rounds: 5-20] - Controls --> Submit[Start Research Button] - - Status --> Log[Real-time Event Log
• Manager planning
• Agent selection
• Execution updates
• Quality assessment] - Status --> Progress[Progress Tracker
• Current agent
• Round count
• Stall count] - - Output --> Tabs[Tabbed Results] - Tabs --> Tab1[Hypotheses Tab
Generated hypotheses with scores] - Tabs --> Tab2[Search Results Tab
Papers & sources found] - Tabs --> Tab3[Analysis Tab
Evidence & verdicts] - Tabs --> Tab4[Report Tab
Final research report] - Tab4 --> Download[Download Report
MD / PDF / JSON] - - Submit -.->|Triggers| Workflow[Magentic Workflow] - Workflow -.->|MagenticOrchestratorMessageEvent| Log - Workflow -.->|MagenticAgentDeltaEvent| Log - Workflow -.->|MagenticAgentMessageEvent| Log - Workflow -.->|MagenticFinalResultEvent| Tab4 - - style App fill:#e1f5e1 - style Input fill:#fff4e6 - style Status fill:#e6f3ff - style Output fill:#e6ffe6 - style Workflow fill:#ffe6e6 -``` - -## 14. Complete System Context - -```mermaid -graph LR - User[👤 Researcher
Asks research questions] -->|Submits query| DC[DeepCritical
Magentic Workflow] - - DC -->|Literature search| PubMed[PubMed API
Medical papers] - DC -->|Preprint search| ArXiv[arXiv API
Scientific preprints] - DC -->|Biology search| BioRxiv[bioRxiv API
Biology preprints] - DC -->|Agent reasoning| Claude[Claude API
Sonnet 4 / Opus] - DC -->|Code execution| Modal[Modal Sandbox
Safe Python env] - DC -->|Vector storage| Chroma[ChromaDB
Embeddings & RAG] - - DC -->|Deployed on| HF[HuggingFace Spaces
Gradio 6.0] - - PubMed -->|Results| DC - ArXiv -->|Results| DC - BioRxiv -->|Results| DC - Claude -->|Responses| DC - Modal -->|Output| DC - Chroma -->|Context| DC - - DC -->|Research report| User - - style User fill:#e1f5e1 - style DC fill:#ffe6e6 - style PubMed fill:#e6f3ff - style ArXiv fill:#e6f3ff - style BioRxiv fill:#e6f3ff - style Claude fill:#ffd6d6 - style Modal fill:#f0f0f0 - style Chroma fill:#ffe6f0 - style HF fill:#d4edda -``` - -## 15. Workflow Timeline (Simplified) - -```mermaid -gantt - title DeepCritical Magentic Workflow - Typical Execution - dateFormat mm:ss - axisFormat %M:%S - - section Manager Planning - Initial planning :p1, 00:00, 10s - - section Hypothesis Agent - Generate hypotheses :h1, after p1, 30s - Manager assessment :h2, after h1, 5s - - section Search Agent - Search hypothesis 1 :s1, after h2, 20s - Search hypothesis 2 :s2, after s1, 20s - Search hypothesis 3 :s3, after s2, 20s - RAG processing :s4, after s3, 15s - Manager assessment :s5, after s4, 5s - - section Analysis Agent - Evidence extraction :a1, after s5, 15s - Code generation :a2, after a1, 20s - Code execution :a3, after a2, 25s - Synthesis :a4, after a3, 20s - Manager assessment :a5, after a4, 5s - - section Report Agent - Report assembly :r1, after a5, 30s - Visualization :r2, after r1, 15s - Formatting :r3, after r2, 10s - - section Manager Synthesis - Final synthesis :f1, after r3, 10s -``` - ---- - -## Key Differences from Original Design - -| Aspect | Original (Judge-in-Loop) | New (Magentic) | -|--------|-------------------------|----------------| -| **Control Flow** | Fixed sequential phases | Dynamic agent selection | -| **Quality Control** | Separate Judge Agent | Manager assessment built-in | -| **Retry Logic** | Phase-level with feedback | Agent-level with adaptation | -| **Flexibility** | Rigid 4-phase pipeline | Adaptive workflow | -| **Complexity** | 5 agents (including Judge) | 4 agents (no Judge) | -| **Progress Tracking** | Manual state management | Built-in round/stall detection | -| **Agent Coordination** | Sequential handoff | Manager-driven dynamic selection | -| **Error Recovery** | Retry same phase | Try different agent or replan | - ---- - -## Simplified Design Principles - -1. **Manager is Intelligent**: LLM-powered manager handles planning, selection, and quality assessment -2. **No Separate Judge**: Manager's assessment phase replaces dedicated Judge Agent -3. **Dynamic Workflow**: Agents can be called multiple times in any order based on need -4. **Built-in Safety**: max_round_count (15) and max_stall_count (3) prevent infinite loops -5. **Event-Driven UI**: Real-time streaming updates to Gradio interface -6. **MCP-Powered Tools**: All external capabilities via Model Context Protocol -7. **Shared Context**: Centralized state accessible to all agents -8. **Progress Awareness**: Manager tracks what's been done and what's needed - ---- - -## Legend - -- 🔴 **Red/Pink**: Manager, orchestration, decision-making -- 🟡 **Yellow/Orange**: Specialist agents, processing -- 🔵 **Blue**: Data, tools, MCP services -- 🟣 **Purple/Pink**: Storage, databases, state -- 🟢 **Green**: User interactions, final outputs -- ⚪ **Gray**: External services, APIs - ---- - -## Implementation Highlights - -**Simple 4-Agent Setup:** - - -[Magentic Workflow Builder](../src/orchestrator_magentic.py) start_line:72 end_line:99 - - -**Manager handles quality assessment in its instructions:** -- Checks hypothesis quality (testable, novel, clear) -- Validates search results (relevant, authoritative, recent) -- Assesses analysis soundness (methodology, evidence, conclusions) -- Ensures report completeness (all sections, proper citations) - -No separate Judge Agent needed - manager does it all! - ---- - -**Document Version**: 2.0 (Magentic Simplified) -**Last Updated**: 2025-11-24 -**Architecture**: Microsoft Magentic Orchestration Pattern -**Agents**: 4 (Hypothesis, Search, Analysis, Report) + 1 Manager -**License**: MIT - -## See Also - -- [Orchestrators](orchestrators.md) - Overview of all orchestrator patterns -- [Graph Orchestration](graph_orchestration.md) - Graph-based execution overview -- [API Reference - Orchestrators](../api/orchestrators.md) - API documentation \ No newline at end of file diff --git a/docs/configuration/index.md b/docs/configuration/index.md deleted file mode 100644 index 729345de47dee08be61b75fcf74e8ac91438ce21..0000000000000000000000000000000000000000 --- a/docs/configuration/index.md +++ /dev/null @@ -1,564 +0,0 @@ -# Configuration Guide - -## Overview - -DeepCritical uses **Pydantic Settings** for centralized configuration management. All settings are defined in the `Settings` class in `src/utils/config.py` and can be configured via environment variables or a `.env` file. - -The configuration system provides: - -- **Type Safety**: Strongly-typed fields with Pydantic validation -- **Environment File Support**: Automatically loads from `.env` file (if present) -- **Case-Insensitive**: Environment variables are case-insensitive -- **Singleton Pattern**: Global `settings` instance for easy access throughout the codebase -- **Validation**: Automatic validation on load with helpful error messages - -## Quick Start - -1. Create a `.env` file in the project root -2. Set at least one LLM API key (`OPENAI_API_KEY`, `ANTHROPIC_API_KEY`, or `HF_TOKEN`) -3. Optionally configure other services as needed -4. The application will automatically load and validate your configuration - -## Configuration System Architecture - -### Settings Class - -The [`Settings`][settings-class] class extends `BaseSettings` from `pydantic_settings` and defines all application configuration: - - -[Settings Class Definition](../src/utils/config.py) start_line:13 end_line:21 - - -[View source](https://github.com/DeepCritical/GradioDemo/blob/main/src/utils/config.py#L13-L21) - -### Singleton Instance - -A global `settings` instance is available for import: - - -[Singleton Instance](../src/utils/config.py) start_line:234 end_line:235 - - -[View source](https://github.com/DeepCritical/GradioDemo/blob/main/src/utils/config.py#L234-L235) - -### Usage Pattern - -Access configuration throughout the codebase: - -```python -from src.utils.config import settings - -# Check if API keys are available -if settings.has_openai_key: - # Use OpenAI - pass - -# Access configuration values -max_iterations = settings.max_iterations -web_search_provider = settings.web_search_provider -``` - -## Required Configuration - -### LLM Provider - -You must configure at least one LLM provider. The system supports: - -- **OpenAI**: Requires `OPENAI_API_KEY` -- **Anthropic**: Requires `ANTHROPIC_API_KEY` -- **HuggingFace**: Optional `HF_TOKEN` or `HUGGINGFACE_API_KEY` (can work without key for public models) - -#### OpenAI Configuration - -```bash -LLM_PROVIDER=openai -OPENAI_API_KEY=your_openai_api_key_here -OPENAI_MODEL=gpt-5.1 -``` - -The default model is defined in the `Settings` class: - - -[OpenAI Model Configuration](../src/utils/config.py) start_line:29 end_line:29 - - -#### Anthropic Configuration - -```bash -LLM_PROVIDER=anthropic -ANTHROPIC_API_KEY=your_anthropic_api_key_here -ANTHROPIC_MODEL=claude-sonnet-4-5-20250929 -``` - -The default model is defined in the `Settings` class: - - -[Anthropic Model Configuration](../src/utils/config.py) start_line:30 end_line:32 - - -#### HuggingFace Configuration - -HuggingFace can work without an API key for public models, but an API key provides higher rate limits: - -```bash -# Option 1: Using HF_TOKEN (preferred) -HF_TOKEN=your_huggingface_token_here - -# Option 2: Using HUGGINGFACE_API_KEY (alternative) -HUGGINGFACE_API_KEY=your_huggingface_api_key_here - -# Default model -HUGGINGFACE_MODEL=meta-llama/Llama-3.1-8B-Instruct -``` - -The HuggingFace token can be set via either environment variable: - - -[HuggingFace Token Configuration](../src/utils/config.py) start_line:33 end_line:35 - - - -[HuggingFace API Key Configuration](../src/utils/config.py) start_line:57 end_line:59 - - -## Optional Configuration - -### Embedding Configuration - -DeepCritical supports multiple embedding providers for semantic search and RAG: - -```bash -# Embedding Provider: "openai", "local", or "huggingface" -EMBEDDING_PROVIDER=local - -# OpenAI Embedding Model (used by LlamaIndex RAG) -OPENAI_EMBEDDING_MODEL=text-embedding-3-small - -# Local Embedding Model (sentence-transformers, used by EmbeddingService) -LOCAL_EMBEDDING_MODEL=all-MiniLM-L6-v2 - -# HuggingFace Embedding Model -HUGGINGFACE_EMBEDDING_MODEL=sentence-transformers/all-MiniLM-L6-v2 -``` - -The embedding provider configuration: - - -[Embedding Provider Configuration](../src/utils/config.py) start_line:47 end_line:50 - - -**Note**: OpenAI embeddings require `OPENAI_API_KEY`. The local provider (default) uses sentence-transformers and requires no API key. - -### Web Search Configuration - -DeepCritical supports multiple web search providers: - -```bash -# Web Search Provider: "serper", "searchxng", "brave", "tavily", or "duckduckgo" -# Default: "duckduckgo" (no API key required) -WEB_SEARCH_PROVIDER=duckduckgo - -# Serper API Key (for Google search via Serper) -SERPER_API_KEY=your_serper_api_key_here - -# SearchXNG Host URL (for self-hosted search) -SEARCHXNG_HOST=http://localhost:8080 - -# Brave Search API Key -BRAVE_API_KEY=your_brave_api_key_here - -# Tavily API Key -TAVILY_API_KEY=your_tavily_api_key_here -``` - -The web search provider configuration: - - -[Web Search Provider Configuration](../src/utils/config.py) start_line:71 end_line:74 - - -**Note**: DuckDuckGo is the default and requires no API key, making it ideal for development and testing. - -### PubMed Configuration - -PubMed search supports optional NCBI API key for higher rate limits: - -```bash -# NCBI API Key (optional, for higher rate limits: 10 req/sec vs 3 req/sec) -NCBI_API_KEY=your_ncbi_api_key_here -``` - -The PubMed tool uses this configuration: - - -[PubMed Tool Configuration](../src/tools/pubmed.py) start_line:22 end_line:29 - - -### Agent Configuration - -Control agent behavior and research loop execution: - -```bash -# Maximum iterations per research loop (1-50, default: 10) -MAX_ITERATIONS=10 - -# Search timeout in seconds -SEARCH_TIMEOUT=30 - -# Use graph-based execution for research flows -USE_GRAPH_EXECUTION=false -``` - -The agent configuration fields: - - -[Agent Configuration](../src/utils/config.py) start_line:80 end_line:85 - - -### Budget & Rate Limiting Configuration - -Control resource limits for research loops: - -```bash -# Default token budget per research loop (1000-1000000, default: 100000) -DEFAULT_TOKEN_LIMIT=100000 - -# Default time limit per research loop in minutes (1-120, default: 10) -DEFAULT_TIME_LIMIT_MINUTES=10 - -# Default iterations limit per research loop (1-50, default: 10) -DEFAULT_ITERATIONS_LIMIT=10 -``` - -The budget configuration with validation: - - -[Budget Configuration](../src/utils/config.py) start_line:87 end_line:105 - - -### RAG Service Configuration - -Configure the Retrieval-Augmented Generation service: - -```bash -# ChromaDB collection name for RAG -RAG_COLLECTION_NAME=deepcritical_evidence - -# Number of top results to retrieve from RAG (1-50, default: 5) -RAG_SIMILARITY_TOP_K=5 - -# Automatically ingest evidence into RAG -RAG_AUTO_INGEST=true -``` - -The RAG configuration: - - -[RAG Service Configuration](../src/utils/config.py) start_line:127 end_line:141 - - -### ChromaDB Configuration - -Configure the vector database for embeddings and RAG: - -```bash -# ChromaDB storage path -CHROMA_DB_PATH=./chroma_db - -# Whether to persist ChromaDB to disk -CHROMA_DB_PERSIST=true - -# ChromaDB server host (for remote ChromaDB, optional) -CHROMA_DB_HOST=localhost - -# ChromaDB server port (for remote ChromaDB, optional) -CHROMA_DB_PORT=8000 -``` - -The ChromaDB configuration: - - -[ChromaDB Configuration](../src/utils/config.py) start_line:113 end_line:125 - - -### External Services - -#### Modal Configuration - -Modal is used for secure sandbox execution of statistical analysis: - -```bash -# Modal Token ID (for Modal sandbox execution) -MODAL_TOKEN_ID=your_modal_token_id_here - -# Modal Token Secret -MODAL_TOKEN_SECRET=your_modal_token_secret_here -``` - -The Modal configuration: - - -[Modal Configuration](../src/utils/config.py) start_line:110 end_line:112 - - -### Logging Configuration - -Configure structured logging: - -```bash -# Log Level: "DEBUG", "INFO", "WARNING", or "ERROR" -LOG_LEVEL=INFO -``` - -The logging configuration: - - -[Logging Configuration](../src/utils/config.py) start_line:107 end_line:108 - - -Logging is configured via the `configure_logging()` function: - - -[Configure Logging Function](../src/utils/config.py) start_line:212 end_line:231 - - -## Configuration Properties - -The `Settings` class provides helpful properties for checking configuration state: - -### API Key Availability - -Check which API keys are available: - - -[API Key Availability Properties](../src/utils/config.py) start_line:171 end_line:189 - - -**Usage:** - -```python -from src.utils.config import settings - -# Check API key availability -if settings.has_openai_key: - # Use OpenAI - pass - -if settings.has_anthropic_key: - # Use Anthropic - pass - -if settings.has_huggingface_key: - # Use HuggingFace - pass - -if settings.has_any_llm_key: - # At least one LLM is available - pass -``` - -### Service Availability - -Check if external services are configured: - - -[Modal Availability Property](../src/utils/config.py) start_line:143 end_line:146 - - - -[Web Search Availability Property](../src/utils/config.py) start_line:191 end_line:204 - - -**Usage:** - -```python -from src.utils.config import settings - -# Check service availability -if settings.modal_available: - # Use Modal sandbox - pass - -if settings.web_search_available: - # Web search is configured - pass -``` - -### API Key Retrieval - -Get the API key for the configured provider: - - -[Get API Key Method](../src/utils/config.py) start_line:148 end_line:160 - - -For OpenAI-specific operations (e.g., Magentic mode): - - -[Get OpenAI API Key Method](../src/utils/config.py) start_line:162 end_line:169 - - -## Configuration Usage in Codebase - -The configuration system is used throughout the codebase: - -### LLM Factory - -The LLM factory uses settings to create appropriate models: - - -[LLM Factory Usage](../src/utils/llm_factory.py) start_line:129 end_line:144 - - -### Embedding Service - -The embedding service uses local embedding model configuration: - - -[Embedding Service Usage](../src/services/embeddings.py) start_line:29 end_line:31 - - -### Orchestrator Factory - -The orchestrator factory uses settings to determine mode: - - -[Orchestrator Factory Mode Detection](../src/orchestrator_factory.py) start_line:69 end_line:80 - - -## Environment Variables Reference - -### Required (at least one LLM) - -- `OPENAI_API_KEY` - OpenAI API key (required for OpenAI provider) -- `ANTHROPIC_API_KEY` - Anthropic API key (required for Anthropic provider) -- `HF_TOKEN` or `HUGGINGFACE_API_KEY` - HuggingFace API token (optional, can work without for public models) - -#### LLM Configuration Variables - -- `LLM_PROVIDER` - Provider to use: `"openai"`, `"anthropic"`, or `"huggingface"` (default: `"huggingface"`) -- `OPENAI_MODEL` - OpenAI model name (default: `"gpt-5.1"`) -- `ANTHROPIC_MODEL` - Anthropic model name (default: `"claude-sonnet-4-5-20250929"`) -- `HUGGINGFACE_MODEL` - HuggingFace model ID (default: `"meta-llama/Llama-3.1-8B-Instruct"`) - -#### Embedding Configuration Variables - -- `EMBEDDING_PROVIDER` - Provider: `"openai"`, `"local"`, or `"huggingface"` (default: `"local"`) -- `OPENAI_EMBEDDING_MODEL` - OpenAI embedding model (default: `"text-embedding-3-small"`) -- `LOCAL_EMBEDDING_MODEL` - Local sentence-transformers model (default: `"all-MiniLM-L6-v2"`) -- `HUGGINGFACE_EMBEDDING_MODEL` - HuggingFace embedding model (default: `"sentence-transformers/all-MiniLM-L6-v2"`) - -#### Web Search Configuration Variables - -- `WEB_SEARCH_PROVIDER` - Provider: `"serper"`, `"searchxng"`, `"brave"`, `"tavily"`, or `"duckduckgo"` (default: `"duckduckgo"`) -- `SERPER_API_KEY` - Serper API key (required for Serper provider) -- `SEARCHXNG_HOST` - SearchXNG host URL (required for SearchXNG provider) -- `BRAVE_API_KEY` - Brave Search API key (required for Brave provider) -- `TAVILY_API_KEY` - Tavily API key (required for Tavily provider) - -#### PubMed Configuration Variables - -- `NCBI_API_KEY` - NCBI API key (optional, increases rate limit from 3 to 10 req/sec) - -#### Agent Configuration Variables - -- `MAX_ITERATIONS` - Maximum iterations per research loop (1-50, default: `10`) -- `SEARCH_TIMEOUT` - Search timeout in seconds (default: `30`) -- `USE_GRAPH_EXECUTION` - Use graph-based execution (default: `false`) - -#### Budget Configuration Variables - -- `DEFAULT_TOKEN_LIMIT` - Default token budget per research loop (1000-1000000, default: `100000`) -- `DEFAULT_TIME_LIMIT_MINUTES` - Default time limit in minutes (1-120, default: `10`) -- `DEFAULT_ITERATIONS_LIMIT` - Default iterations limit (1-50, default: `10`) - -#### RAG Configuration Variables - -- `RAG_COLLECTION_NAME` - ChromaDB collection name (default: `"deepcritical_evidence"`) -- `RAG_SIMILARITY_TOP_K` - Number of top results to retrieve (1-50, default: `5`) -- `RAG_AUTO_INGEST` - Automatically ingest evidence into RAG (default: `true`) - -#### ChromaDB Configuration Variables - -- `CHROMA_DB_PATH` - ChromaDB storage path (default: `"./chroma_db"`) -- `CHROMA_DB_PERSIST` - Whether to persist ChromaDB to disk (default: `true`) -- `CHROMA_DB_HOST` - ChromaDB server host (optional, for remote ChromaDB) -- `CHROMA_DB_PORT` - ChromaDB server port (optional, for remote ChromaDB) - -#### External Services Variables - -- `MODAL_TOKEN_ID` - Modal token ID (optional, for Modal sandbox execution) -- `MODAL_TOKEN_SECRET` - Modal token secret (optional, for Modal sandbox execution) - -#### Logging Configuration Variables - -- `LOG_LEVEL` - Log level: `"DEBUG"`, `"INFO"`, `"WARNING"`, or `"ERROR"` (default: `"INFO"`) - -## Validation - -Settings are validated on load using Pydantic validation: - -- **Type Checking**: All fields are strongly typed -- **Range Validation**: Numeric fields have min/max constraints (e.g., `ge=1, le=50` for `max_iterations`) -- **Literal Validation**: Enum fields only accept specific values (e.g., `Literal["openai", "anthropic", "huggingface"]`) -- **Required Fields**: API keys are checked when accessed via `get_api_key()` or `get_openai_api_key()` - -### Validation Examples - -The `max_iterations` field has range validation: - - -[Max Iterations Validation](../src/utils/config.py) start_line:81 end_line:81 - - -The `llm_provider` field has literal validation: - - -[LLM Provider Literal Validation](../src/utils/config.py) start_line:26 end_line:28 - - -## Error Handling - -Configuration errors raise `ConfigurationError` from `src/utils/exceptions.py`: - -```22:25:src/utils/exceptions.py -class ConfigurationError(DeepCriticalError): - """Raised when configuration is invalid.""" - - pass -``` - -### Error Handling Example - -```python -from src.utils.config import settings -from src.utils.exceptions import ConfigurationError - -try: - api_key = settings.get_api_key() -except ConfigurationError as e: - print(f"Configuration error: {e}") -``` - -### Common Configuration Errors - -1. **Missing API Key**: When `get_api_key()` is called but the required API key is not set -2. **Invalid Provider**: When `llm_provider` is set to an unsupported value -3. **Out of Range**: When numeric values exceed their min/max constraints -4. **Invalid Literal**: When enum fields receive unsupported values - -## Configuration Best Practices - -1. **Use `.env` File**: Store sensitive keys in `.env` file (add to `.gitignore`) -2. **Check Availability**: Use properties like `has_openai_key` before accessing API keys -3. **Handle Errors**: Always catch `ConfigurationError` when calling `get_api_key()` -4. **Validate Early**: Configuration is validated on import, so errors surface immediately -5. **Use Defaults**: Leverage sensible defaults for optional configuration - -## Future Enhancements - -The following configurations are planned for future phases: - -1. **Additional LLM Providers**: DeepSeek, OpenRouter, Gemini, Perplexity, Azure OpenAI, Local models -2. **Model Selection**: Reasoning/main/fast model configuration -3. **Service Integration**: Additional service integrations and configurations diff --git a/docs/contributing/code-quality.md b/docs/contributing/code-quality.md deleted file mode 100644 index 8e8bcf54bf5a77296abcb3581730e07c3d489274..0000000000000000000000000000000000000000 --- a/docs/contributing/code-quality.md +++ /dev/null @@ -1,120 +0,0 @@ -# Code Quality & Documentation - -This document outlines code quality standards and documentation requirements for The DETERMINATOR. - -## Linting - -- Ruff with 100-char line length -- Ignore rules documented in `pyproject.toml`: - - `PLR0913`: Too many arguments (agents need many params) - - `PLR0912`: Too many branches (complex orchestrator logic) - - `PLR0911`: Too many return statements (complex agent logic) - - `PLR2004`: Magic values (statistical constants) - - `PLW0603`: Global statement (singleton pattern) - - `PLC0415`: Lazy imports for optional dependencies - - `E402`: Module level import not at top (needed for pytest.importorskip) - - `E501`: Line too long (ignore line length violations) - - `RUF100`: Unused noqa (version differences between local/CI) - -## Type Checking - -- `mypy --strict` compliance -- `ignore_missing_imports = true` (for optional dependencies) -- Exclude: `reference_repos/`, `examples/` -- All functions must have complete type annotations - -## Pre-commit - -Pre-commit hooks run automatically on commit to ensure code quality. Configuration is in `.pre-commit-config.yaml`. - -### Installation - -```bash -# Install dependencies (includes pre-commit package) -uv sync --all-extras - -# Set up git hooks (must be run separately) -uv run pre-commit install -``` - -**Note**: `uv sync --all-extras` installs the pre-commit package, but you must run `uv run pre-commit install` separately to set up the git hooks. - -### Pre-commit Hooks - -The following hooks run automatically on commit: - -1. **ruff**: Lints code and fixes issues automatically - - Runs on: `src/` (excludes `tests/`, `reference_repos/`) - - Auto-fixes: Yes - -2. **ruff-format**: Formats code with ruff - - Runs on: `src/` (excludes `tests/`, `reference_repos/`) - - Auto-fixes: Yes - -3. **mypy**: Type checking - - Runs on: `src/` (excludes `folder/`) - - Additional dependencies: pydantic, pydantic-settings, tenacity, pydantic-ai - -4. **pytest-unit**: Runs unit tests (excludes OpenAI and embedding_provider tests) - - Runs: `tests/unit/` with `-m "not openai and not embedding_provider"` - - Always runs: Yes (not just on changed files) - -5. **pytest-local-embeddings**: Runs local embedding tests - - Runs: `tests/` with `-m "local_embeddings"` - - Always runs: Yes - -### Manual Pre-commit Run - -To run pre-commit hooks manually (without committing): - -```bash -uv run pre-commit run --all-files -``` - -### Troubleshooting - -- **Hooks failing**: Fix the issues shown in the output, then commit again -- **Skipping hooks**: Use `git commit --no-verify` (not recommended) -- **Hook not running**: Ensure hooks are installed with `uv run pre-commit install` -- **Type errors**: Check that all dependencies are installed with `uv sync --all-extras` - -## Documentation - -### Building Documentation - -Documentation is built using MkDocs. Source files are in `docs/`, and the configuration is in `mkdocs.yml`. - -```bash -# Build documentation -uv run mkdocs build - -# Serve documentation locally (http://127.0.0.1:8000) -uv run mkdocs serve -``` - -The documentation site is published at: - -### Docstrings - -- Google-style docstrings for all public functions -- Include Args, Returns, Raises sections -- Use type hints in docstrings only if needed for clarity - -Example: - - -[Search Method Docstring Example](../src/tools/pubmed.py) start_line:51 end_line:70 - - -### Code Comments - -- Explain WHY, not WHAT -- Document non-obvious patterns (e.g., why `requests` not `httpx` for ClinicalTrials) -- Mark critical sections: `# CRITICAL: ...` -- Document rate limiting rationale -- Explain async patterns when non-obvious - -## See Also - -- [Code Style](code-style.md) - Code style guidelines -- [Testing](testing.md) - Testing guidelines diff --git a/docs/contributing/code-style.md b/docs/contributing/code-style.md deleted file mode 100644 index e6f0b32ca5a90082e9c2789738f7ee2c44aab35f..0000000000000000000000000000000000000000 --- a/docs/contributing/code-style.md +++ /dev/null @@ -1,83 +0,0 @@ -# Code Style & Conventions - -This document outlines the code style and conventions for The DETERMINATOR. - -## Package Manager - -This project uses [`uv`](https://github.com/astral-sh/uv) as the package manager. All commands should be prefixed with `uv run` to ensure they run in the correct environment. - -### Installation - -```bash -# Install uv if you haven't already (recommended: standalone installer) -# Unix/macOS/Linux: -curl -LsSf https://astral.sh/uv/install.sh | sh - -# Windows (PowerShell): -powershell -ExecutionPolicy ByPass -c "irm https://astral.sh/uv/install.ps1 | iex" - -# Alternative: pipx install uv -# Or: pip install uv - -# Sync all dependencies including dev extras -uv sync --all-extras -``` - -### Running Commands - -All development commands should use `uv run` prefix: - -```bash -# Instead of: pytest tests/ -uv run pytest tests/ - -# Instead of: ruff check src -uv run ruff check src - -# Instead of: mypy src -uv run mypy src -``` - -This ensures commands run in the correct virtual environment managed by `uv`. - -## Type Safety - -- **ALWAYS** use type hints for all function parameters and return types -- Use `mypy --strict` compliance (no `Any` unless absolutely necessary) -- Use `TYPE_CHECKING` imports for circular dependencies: - - -[TYPE_CHECKING Import Pattern](../src/utils/citation_validator.py) start_line:8 end_line:11 - - -## Pydantic Models - -- All data exchange uses Pydantic models (`src/utils/models.py`) -- Models are frozen (`model_config = {"frozen": True}`) for immutability -- Use `Field()` with descriptions for all model fields -- Validate with `ge=`, `le=`, `min_length=`, `max_length=` constraints - -## Async Patterns - -- **ALL** I/O operations must be async (`async def`, `await`) -- Use `asyncio.gather()` for parallel operations -- CPU-bound work (embeddings, parsing) must use `run_in_executor()`: - -```python -loop = asyncio.get_running_loop() -result = await loop.run_in_executor(None, cpu_bound_function, args) -``` - -- Never block the event loop with synchronous I/O - -## Common Pitfalls - -1. **Blocking the event loop**: Never use sync I/O in async functions -2. **Missing type hints**: All functions must have complete type annotations -3. **Global mutable state**: Use ContextVar or pass via parameters -4. **Import errors**: Lazy-load optional dependencies (magentic, modal, embeddings) - -## See Also - -- [Error Handling](error-handling.md) - Error handling guidelines -- [Implementation Patterns](implementation-patterns.md) - Common patterns diff --git a/docs/contributing/error-handling.md b/docs/contributing/error-handling.md deleted file mode 100644 index 626fec9f5a5a7786813943d38c0673a3cb24d3c7..0000000000000000000000000000000000000000 --- a/docs/contributing/error-handling.md +++ /dev/null @@ -1,54 +0,0 @@ -# Error Handling & Logging - -This document outlines error handling and logging conventions for The DETERMINATOR. - -## Exception Hierarchy - -Use custom exception hierarchy (`src/utils/exceptions.py`): - - -[Exception Hierarchy](../src/utils/exceptions.py) start_line:4 end_line:31 - - -## Error Handling Rules - -- Always chain exceptions: `raise SearchError(...) from e` -- Log errors with context using `structlog`: - -```python -logger.error("Operation failed", error=str(e), context=value) -``` - -- Never silently swallow exceptions -- Provide actionable error messages - -## Logging - -- Use `structlog` for all logging (NOT `print` or `logging`) -- Import: `import structlog; logger = structlog.get_logger()` -- Log with structured data: `logger.info("event", key=value)` -- Use appropriate levels: DEBUG, INFO, WARNING, ERROR - -## Logging Examples - -```python -logger.info("Starting search", query=query, tools=[t.name for t in tools]) -logger.warning("Search tool failed", tool=tool.name, error=str(result)) -logger.error("Assessment failed", error=str(e)) -``` - -## Error Chaining - -Always preserve exception context: - -```python -try: - result = await api_call() -except httpx.HTTPError as e: - raise SearchError(f"API call failed: {e}") from e -``` - -## See Also - -- [Code Style](code-style.md) - Code style guidelines -- [Testing](testing.md) - Testing guidelines diff --git a/docs/contributing/implementation-patterns.md b/docs/contributing/implementation-patterns.md deleted file mode 100644 index 8590ec3ea4b226b13a388d027a52e70984ea553c..0000000000000000000000000000000000000000 --- a/docs/contributing/implementation-patterns.md +++ /dev/null @@ -1,67 +0,0 @@ -# Implementation Patterns - -This document outlines common implementation patterns used in The DETERMINATOR. - -## Search Tools - -All tools implement `SearchTool` protocol (`src/tools/base.py`): - -- Must have `name` property -- Must implement `async def search(query, max_results) -> list[Evidence]` -- Use `@retry` decorator from tenacity for resilience -- Rate limiting: Implement `_rate_limit()` for APIs with limits (e.g., PubMed) -- Error handling: Raise `SearchError` or `RateLimitError` on failures - -Example pattern: - -```python -class MySearchTool: - @property - def name(self) -> str: - return "mytool" - - @retry(stop=stop_after_attempt(3), wait=wait_exponential(...)) - async def search(self, query: str, max_results: int = 10) -> list[Evidence]: - # Implementation - return evidence_list -``` - -## Judge Handlers - -- Implement `JudgeHandlerProtocol` (`async def assess(question, evidence) -> JudgeAssessment`) -- Use pydantic-ai `Agent` with `output_type=JudgeAssessment` -- System prompts in `src/prompts/judge.py` -- Support fallback handlers: `MockJudgeHandler`, `HFInferenceJudgeHandler` -- Always return valid `JudgeAssessment` (never raise exceptions) - -## Agent Factory Pattern - -- Use factory functions for creating agents (`src/agent_factory/`) -- Lazy initialization for optional dependencies (e.g., embeddings, Modal) -- Check requirements before initialization: - - -[Check Magentic Requirements](../src/utils/llm_factory.py) start_line:152 end_line:170 - - -## State Management - -- **Magentic Mode**: Use `ContextVar` for thread-safe state (`src/agents/state.py`) -- **Simple Mode**: Pass state via function parameters -- Never use global mutable state (except singletons via `@lru_cache`) - -## Singleton Pattern - -Use `@lru_cache(maxsize=1)` for singletons: - - -[Singleton Pattern Example](../src/services/statistical_analyzer.py) start_line:252 end_line:255 - - -- Lazy initialization to avoid requiring dependencies at import time - -## See Also - -- [Code Style](code-style.md) - Code style guidelines -- [Error Handling](error-handling.md) - Error handling guidelines - diff --git a/docs/contributing/index.md b/docs/contributing/index.md deleted file mode 100644 index 466fcaf16f41e49dc36201307f95204ee7b9ef6c..0000000000000000000000000000000000000000 --- a/docs/contributing/index.md +++ /dev/null @@ -1,254 +0,0 @@ -# Contributing to The DETERMINATOR - -Thank you for your interest in contributing to The DETERMINATOR! This guide will help you get started. - -> **Note on Project Names**: "The DETERMINATOR" is the product name, "DeepCritical" is the organization/project name, and "determinator" is the Python package name. - -## Git Workflow - -- `main`: Production-ready (GitHub) -- `dev`: Development integration (GitHub) -- Use feature branches: `yourname-dev` -- **NEVER** push directly to `main` or `dev` on HuggingFace -- GitHub is source of truth; HuggingFace is for deployment - -## Repository Information - -- **GitHub Repository**: [`DeepCritical/GradioDemo`](https://github.com/DeepCritical/GradioDemo) (source of truth, PRs, code review) -- **HuggingFace Space**: [`DataQuests/DeepCritical`](https://huggingface.co/spaces/DataQuests/DeepCritical) (deployment/demo) -- **Package Name**: `determinator` (Python package name in `pyproject.toml`) - -### Dual Repository Setup - -This project uses a dual repository setup: - -- **GitHub (`DeepCritical/GradioDemo`)**: Source of truth for code, PRs, and code review -- **HuggingFace (`DataQuests/DeepCritical`)**: Deployment target for the Gradio demo - -#### Remote Configuration - -When cloning, set up remotes as follows: - -```bash -# Clone from GitHub -git clone https://github.com/DeepCritical/GradioDemo.git -cd GradioDemo - -# Add HuggingFace remote (optional, for deployment) -git remote add huggingface-upstream https://huggingface.co/spaces/DataQuests/DeepCritical -``` - -**Important**: Never push directly to `main` or `dev` on HuggingFace. Always work through GitHub PRs. GitHub is the source of truth; HuggingFace is for deployment/demo only. - -## Package Manager - -This project uses [`uv`](https://github.com/astral-sh/uv) as the package manager. All commands should be prefixed with `uv run` to ensure they run in the correct environment. - -### Installation - -```bash -# Install uv if you haven't already (recommended: standalone installer) -# Unix/macOS/Linux: -curl -LsSf https://astral.sh/uv/install.sh | sh - -# Windows (PowerShell): -powershell -ExecutionPolicy ByPass -c "irm https://astral.sh/uv/install.ps1 | iex" - -# Alternative: pipx install uv -# Or: pip install uv - -# Sync all dependencies including dev extras -uv sync --all-extras - -# Install pre-commit hooks -uv run pre-commit install -``` - -## Development Commands - -```bash -# Installation -uv sync --all-extras # Install all dependencies including dev -uv run pre-commit install # Install pre-commit hooks - -# Code Quality Checks (run all before committing) -uv run ruff check src tests # Lint with ruff -uv run ruff format src tests # Format with ruff -uv run mypy src # Type checking -uv run pytest --cov=src --cov-report=term-missing tests/unit/ -v -m "not openai" -p no:logfire # Tests with coverage - -# Testing Commands -uv run pytest tests/unit/ -v -m "not openai" -p no:logfire # Run unit tests (excludes OpenAI tests) -uv run pytest tests/ -v -m "huggingface" -p no:logfire # Run HuggingFace tests -uv run pytest tests/ -v -p no:logfire # Run all tests -uv run pytest --cov=src --cov-report=term-missing tests/unit/ -v -m "not openai" -p no:logfire # Tests with terminal coverage -uv run pytest --cov=src --cov-report=html -p no:logfire # Generate HTML coverage report (opens htmlcov/index.html) - -# Documentation Commands -uv run mkdocs build # Build documentation -uv run mkdocs serve # Serve documentation locally (http://127.0.0.1:8000) -``` - -### Test Markers - -The project uses pytest markers to categorize tests. See [Testing Guidelines](testing.md) for details: - -- `unit`: Unit tests (mocked, fast) -- `integration`: Integration tests (real APIs) -- `slow`: Slow tests -- `openai`: Tests requiring OpenAI API key -- `huggingface`: Tests requiring HuggingFace API key -- `embedding_provider`: Tests requiring API-based embedding providers -- `local_embeddings`: Tests using local embeddings - -**Note**: The `-p no:logfire` flag disables the logfire plugin to avoid conflicts during testing. - -## Getting Started - -1. **Fork the repository** on GitHub: [`DeepCritical/GradioDemo`](https://github.com/DeepCritical/GradioDemo) - -2. **Clone your fork**: - - ```bash - git clone https://github.com/yourusername/GradioDemo.git - cd GradioDemo - ``` - -3. **Install dependencies**: - - ```bash - uv sync --all-extras - uv run pre-commit install - ``` - -4. **Create a feature branch**: - - ```bash - git checkout -b yourname-feature-name - ``` - -5. **Make your changes** following the guidelines below - -6. **Run checks**: - - ```bash - uv run ruff check src tests - uv run mypy src - uv run pytest --cov=src --cov-report=term-missing tests/unit/ -v -m "not openai" -p no:logfire - ``` - -7. **Commit and push**: - - ```bash - git commit -m "Description of changes" - git push origin yourname-feature-name - ``` - -8. **Create a pull request** on GitHub - -## Development Guidelines - -### Code Style - -- Follow [Code Style Guidelines](code-style.md) -- All code must pass `mypy --strict` -- Use `ruff` for linting and formatting -- Line length: 100 characters - -### Error Handling - -- Follow [Error Handling Guidelines](error-handling.md) -- Always chain exceptions: `raise SearchError(...) from e` -- Use structured logging with `structlog` -- Never silently swallow exceptions - -### Testing - -- Follow [Testing Guidelines](testing.md) -- Write tests before implementation (TDD) -- Aim for >80% coverage on critical paths -- Use markers: `unit`, `integration`, `slow` - -### Implementation Patterns - -- Follow [Implementation Patterns](implementation-patterns.md) -- Use factory functions for agent/tool creation -- Implement protocols for extensibility -- Use singleton pattern with `@lru_cache(maxsize=1)` - -### Prompt Engineering - -- Follow [Prompt Engineering Guidelines](prompt-engineering.md) -- Always validate citations -- Use diverse evidence selection -- Never trust LLM-generated citations without validation - -### Code Quality - -- Follow [Code Quality Guidelines](code-quality.md) -- Google-style docstrings for all public functions -- Explain WHY, not WHAT in comments -- Mark critical sections: `# CRITICAL: ...` - -## MCP Integration - -### MCP Tools - -- Functions in `src/mcp_tools.py` for Claude Desktop -- Full type hints required -- Google-style docstrings with Args/Returns sections -- Formatted string returns (markdown) - -### Gradio MCP Server - -- Enable with `mcp_server=True` in `demo.launch()` -- Endpoint: `/gradio_api/mcp/` -- Use `ssr_mode=False` to fix hydration issues in HF Spaces - -## Common Pitfalls - -1. **Blocking the event loop**: Never use sync I/O in async functions -2. **Missing type hints**: All functions must have complete type annotations -3. **Hallucinated citations**: Always validate references -4. **Global mutable state**: Use ContextVar or pass via parameters -5. **Import errors**: Lazy-load optional dependencies (magentic, modal, embeddings) -6. **Rate limiting**: Always implement for external APIs -7. **Error chaining**: Always use `from e` when raising exceptions - -## Key Principles - -1. **Type Safety First**: All code must pass `mypy --strict` -2. **Async Everything**: All I/O must be async -3. **Test-Driven**: Write tests before implementation -4. **No Hallucinations**: Validate all citations -5. **Graceful Degradation**: Support free tier (HF Inference) when no API keys -6. **Lazy Loading**: Don't require optional dependencies at import time -7. **Structured Logging**: Use structlog, never print() -8. **Error Chaining**: Always preserve exception context - -## Pull Request Process - -1. Ensure all checks pass: `uv run ruff check src tests && uv run mypy src && uv run pytest --cov=src --cov-report=term-missing tests/unit/ -v -m "not openai" -p no:logfire` -2. Update documentation if needed -3. Add tests for new features -4. Update CHANGELOG if applicable -5. Request review from maintainers -6. Address review feedback -7. Wait for approval before merging - -## Project Structure - -- `src/`: Main source code -- `tests/`: Test files (`unit/` and `integration/`) -- `docs/`: Documentation source files (MkDocs) -- `examples/`: Example usage scripts -- `pyproject.toml`: Project configuration and dependencies -- `.pre-commit-config.yaml`: Pre-commit hook configuration - -## Questions? - -- Open an issue on [GitHub](https://github.com/DeepCritical/GradioDemo) -- Check existing [documentation](https://deepcritical.github.io/GradioDemo/) -- Review code examples in the codebase - -Thank you for contributing to The DETERMINATOR! diff --git a/docs/contributing/prompt-engineering.md b/docs/contributing/prompt-engineering.md deleted file mode 100644 index c90cd094112854a3eda7b05f705871ce77b6874f..0000000000000000000000000000000000000000 --- a/docs/contributing/prompt-engineering.md +++ /dev/null @@ -1,55 +0,0 @@ -# Prompt Engineering & Citation Validation - -This document outlines prompt engineering guidelines and citation validation rules. - -## Judge Prompts - -- System prompt in `src/prompts/judge.py` -- Format evidence with truncation (1500 chars per item) -- Handle empty evidence case separately -- Always request structured JSON output -- Use `format_user_prompt()` and `format_empty_evidence_prompt()` helpers - -## Hypothesis Prompts - -- Use diverse evidence selection (MMR algorithm) -- Sentence-aware truncation (`truncate_at_sentence()`) -- Format: Drug → Target → Pathway → Effect -- System prompt emphasizes mechanistic reasoning -- Use `format_hypothesis_prompt()` with embeddings for diversity - -## Report Prompts - -- Include full citation details for validation -- Use diverse evidence selection (n=20) -- **CRITICAL**: Emphasize citation validation rules -- Format hypotheses with support/contradiction counts -- System prompt includes explicit JSON structure requirements - -## Citation Validation - -- **ALWAYS** validate references before returning reports -- Use `validate_references()` from `src/utils/citation_validator.py` -- Remove hallucinated citations (URLs not in evidence) -- Log warnings for removed citations -- Never trust LLM-generated citations without validation - -## Citation Validation Rules - -1. Every reference URL must EXACTLY match a provided evidence URL -2. Do NOT invent, fabricate, or hallucinate any references -3. Do NOT modify paper titles, authors, dates, or URLs -4. If unsure about a citation, OMIT it rather than guess -5. Copy URLs exactly as provided - do not create similar-looking URLs - -## Evidence Selection - -- Use `select_diverse_evidence()` for MMR-based selection -- Balance relevance vs diversity (lambda=0.7 default) -- Sentence-aware truncation preserves meaning -- Limit evidence per prompt to avoid context overflow - -## See Also - -- [Code Quality](code-quality.md) - Code quality guidelines -- [Error Handling](error-handling.md) - Error handling guidelines diff --git a/docs/contributing/testing.md b/docs/contributing/testing.md deleted file mode 100644 index 38149535306ae90071238d61bdb67ed1e21e3718..0000000000000000000000000000000000000000 --- a/docs/contributing/testing.md +++ /dev/null @@ -1,115 +0,0 @@ -# Testing Requirements - -This document outlines testing requirements and guidelines for The DETERMINATOR. - -## Test Structure - -- Unit tests in `tests/unit/` (mocked, fast) -- Integration tests in `tests/integration/` (real APIs, marked `@pytest.mark.integration`) -- Use markers: `unit`, `integration`, `slow`, `openai`, `huggingface`, `embedding_provider`, `local_embeddings` - -## Test Markers - -The project uses pytest markers to categorize tests. These markers are defined in `pyproject.toml`: - -- `@pytest.mark.unit`: Unit tests (mocked, fast) - Run with `-m "unit"` -- `@pytest.mark.integration`: Integration tests (real APIs) - Run with `-m "integration"` -- `@pytest.mark.slow`: Slow tests - Run with `-m "slow"` -- `@pytest.mark.openai`: Tests requiring OpenAI API key - Run with `-m "openai"` or exclude with `-m "not openai"` -- `@pytest.mark.huggingface`: Tests requiring HuggingFace API key or using HuggingFace models - Run with `-m "huggingface"` -- `@pytest.mark.embedding_provider`: Tests requiring API-based embedding providers (OpenAI, etc.) - Run with `-m "embedding_provider"` -- `@pytest.mark.local_embeddings`: Tests using local embeddings (sentence-transformers, ChromaDB) - Run with `-m "local_embeddings"` - -### Running Tests by Marker - -```bash -# Run only unit tests (excludes OpenAI tests by default) -uv run pytest tests/unit/ -v -m "not openai" -p no:logfire - -# Run HuggingFace tests -uv run pytest tests/ -v -m "huggingface" -p no:logfire - -# Run all tests -uv run pytest tests/ -v -p no:logfire - -# Run only local embedding tests -uv run pytest tests/ -v -m "local_embeddings" -p no:logfire - -# Exclude slow tests -uv run pytest tests/ -v -m "not slow" -p no:logfire -``` - -**Note**: The `-p no:logfire` flag disables the logfire plugin to avoid conflicts during testing. - -## Mocking - -- Use `respx` for httpx mocking -- Use `pytest-mock` for general mocking -- Mock LLM calls in unit tests (use `MockJudgeHandler`) -- Fixtures in `tests/conftest.py`: `mock_httpx_client`, `mock_llm_response` - -## TDD Workflow - -1. Write failing test in `tests/unit/` -2. Implement in `src/` -3. Ensure test passes -4. Run checks: `uv run ruff check src tests && uv run mypy src && uv run pytest --cov=src --cov-report=term-missing tests/unit/ -v -m "not openai" -p no:logfire` - -### Test Command Examples - -```bash -# Run unit tests (default, excludes OpenAI tests) -uv run pytest tests/unit/ -v -m "not openai" -p no:logfire - -# Run HuggingFace tests -uv run pytest tests/ -v -m "huggingface" -p no:logfire - -# Run all tests -uv run pytest tests/ -v -p no:logfire -``` - -## Test Examples - -```python -@pytest.mark.unit -async def test_pubmed_search(mock_httpx_client): - tool = PubMedTool() - results = await tool.search("metformin", max_results=5) - assert len(results) > 0 - assert all(isinstance(r, Evidence) for r in results) - -@pytest.mark.integration -async def test_real_pubmed_search(): - tool = PubMedTool() - results = await tool.search("metformin", max_results=3) - assert len(results) <= 3 -``` - -## Test Coverage - -### Terminal Coverage Report - -```bash -uv run pytest --cov=src --cov-report=term-missing tests/unit/ -v -m "not openai" -p no:logfire -``` - -This shows coverage with missing lines highlighted in the terminal output. - -### HTML Coverage Report - -```bash -uv run pytest --cov=src --cov-report=html -p no:logfire -``` - -This generates an HTML coverage report in `htmlcov/index.html`. Open this file in your browser to see detailed coverage information. - -### Coverage Goals - -- Aim for >80% coverage on critical paths -- Exclude: `__init__.py`, `TYPE_CHECKING` blocks -- Coverage configuration is in `pyproject.toml` under `[tool.coverage.*]` - -## See Also - -- [Code Style](code-style.md) - Code style guidelines -- [Implementation Patterns](implementation-patterns.md) - Common patterns diff --git a/docs/getting-started/examples.md b/docs/getting-started/examples.md deleted file mode 100644 index c5b7f4150787a8e66f8eafec399fe89c11b449e8..0000000000000000000000000000000000000000 --- a/docs/getting-started/examples.md +++ /dev/null @@ -1,198 +0,0 @@ -# Examples - -This page provides examples of using The DETERMINATOR for various research tasks. - -## Basic Research Query - -### Example 1: Drug Information - -**Query**: -``` -What are the latest treatments for Alzheimer's disease? -``` - -**What The DETERMINATOR Does**: -1. Searches PubMed for recent papers -2. Searches ClinicalTrials.gov for active trials -3. Evaluates evidence quality -4. Synthesizes findings into a comprehensive report - -### Example 2: Clinical Trial Search - -**Query**: -``` -What clinical trials are investigating metformin for cancer prevention? -``` - -**What The DETERMINATOR Does**: - -1. Searches ClinicalTrials.gov for relevant trials -2. Searches PubMed for supporting literature -3. Provides trial details and status -4. Summarizes findings - -## Advanced Research Queries - -### Example 3: Comprehensive Review - -**Query**: - -``` -Review the evidence for using metformin as an anti-aging intervention, -including clinical trials, mechanisms of action, and safety profile. -``` - -**What The DETERMINATOR Does**: -1. Uses deep research mode (multi-section) -2. Searches multiple sources in parallel -3. Generates sections on: - - Clinical trials - - Mechanisms of action - - Safety profile -4. Synthesizes comprehensive report - -### Example 4: Hypothesis Testing - -**Query**: -``` -Test the hypothesis that regular exercise reduces Alzheimer's disease risk. -``` - -**What The DETERMINATOR Does**: -1. Generates testable hypotheses -2. Searches for supporting/contradicting evidence -3. Performs statistical analysis (if Modal configured) -4. Provides verdict: SUPPORTED, REFUTED, or INCONCLUSIVE - -## MCP Tool Examples - -### Using search_pubmed - -``` -Search PubMed for "CRISPR gene editing cancer therapy" -``` - -### Using search_clinical_trials - -``` -Find active clinical trials for "diabetes type 2 treatment" -``` - -### Using search_all - -``` -Search all sources for "COVID-19 vaccine side effects" -``` - -### Using analyze_hypothesis - -``` -Analyze whether vitamin D supplementation reduces COVID-19 severity -``` - -## Code Examples - -### Python API Usage - -```python -from src.orchestrator_factory import create_orchestrator -from src.tools.search_handler import SearchHandler -from src.agent_factory.judges import create_judge_handler - -# Create orchestrator -search_handler = SearchHandler() -judge_handler = create_judge_handler() -``` - - -[Create Orchestrator](../src/orchestrator_factory.py) start_line:44 end_line:66 - - -```python -# Run research query -query = "What are the latest treatments for Alzheimer's disease?" -async for event in orchestrator.run(query): - print(f"Event: {event.type} - {event.data}") -``` - -### Gradio UI Integration - -```python -import gradio as gr -from src.app import create_research_interface - -# Create interface -interface = create_research_interface() - -# Launch -interface.launch(server_name="0.0.0.0", server_port=7860) -``` - -## Research Patterns - -### Iterative Research - -Single-loop research with search-judge-synthesize cycles: - -```python -from src.orchestrator.research_flow import IterativeResearchFlow -``` - - -[IterativeResearchFlow Initialization](../src/orchestrator/research_flow.py) start_line:56 end_line:77 - - -```python -async for event in flow.run(query): - # Handle events - pass -``` - -### Deep Research - -Multi-section parallel research: - -```python -from src.orchestrator.research_flow import DeepResearchFlow -``` - - -[DeepResearchFlow Initialization](../src/orchestrator/research_flow.py) start_line:674 end_line:697 - - -```python -async for event in flow.run(query): - # Handle events - pass -``` - -## Configuration Examples - -### Basic Configuration - -```bash -# .env file -LLM_PROVIDER=openai -OPENAI_API_KEY=your_key_here -MAX_ITERATIONS=10 -``` - -### Advanced Configuration - -```bash -# .env file -LLM_PROVIDER=anthropic -ANTHROPIC_API_KEY=your_key_here -EMBEDDING_PROVIDER=local -WEB_SEARCH_PROVIDER=duckduckgo -MAX_ITERATIONS=20 -DEFAULT_TOKEN_LIMIT=200000 -USE_GRAPH_EXECUTION=true -``` - -## Next Steps - -- Read the [Configuration Guide](../configuration/index.md) for all options -- Explore the [Architecture Documentation](../architecture/graph_orchestration.md) -- Check out the [API Reference](../api/agents.md) for programmatic usage - diff --git a/docs/getting-started/installation.md b/docs/getting-started/installation.md deleted file mode 100644 index 34c0b2848a1c86097f9a73e90df7ed18219658fc..0000000000000000000000000000000000000000 --- a/docs/getting-started/installation.md +++ /dev/null @@ -1,152 +0,0 @@ -# Installation - -This guide will help you install and set up DeepCritical on your system. - -## Prerequisites - -- Python 3.11 or higher -- `uv` package manager (recommended) or `pip` -- At least one LLM API key (OpenAI, Anthropic, or HuggingFace) - -## Installation Steps - -### 1. Install uv (Recommended) - -`uv` is a fast Python package installer and resolver. Install it using the standalone installer (recommended): - -**Unix/macOS/Linux:** -```bash -curl -LsSf https://astral.sh/uv/install.sh | sh -``` - -**Windows (PowerShell):** -```powershell -powershell -ExecutionPolicy ByPass -c "irm https://astral.sh/uv/install.ps1 | iex" -``` - -**Alternative methods:** -```bash -# Using pipx (recommended if you have pipx installed) -pipx install uv - -# Or using pip -pip install uv -``` - -After installation, restart your terminal or add `~/.cargo/bin` to your PATH. - -### 2. Clone the Repository - -```bash -git clone https://github.com/DeepCritical/GradioDemo.git -cd GradioDemo -``` - -### 3. Install Dependencies - -Using `uv` (recommended): - -```bash -uv sync -``` - -Using `pip`: - -```bash -pip install -e . -``` - -### 4. Install Optional Dependencies - -For embeddings support (local sentence-transformers): - -```bash -uv sync --extra embeddings -``` - -For Modal sandbox execution: - -```bash -uv sync --extra modal -``` - -For Magentic orchestration: - -```bash -uv sync --extra magentic -``` - -Install all extras: - -```bash -uv sync --all-extras -``` - -### 5. Configure Environment Variables - -Create a `.env` file in the project root: - -```bash -# Required: At least one LLM provider -LLM_PROVIDER=openai # or "anthropic" or "huggingface" -OPENAI_API_KEY=your_openai_api_key_here - -# Optional: Other services -NCBI_API_KEY=your_ncbi_api_key_here # For higher PubMed rate limits -MODAL_TOKEN_ID=your_modal_token_id -MODAL_TOKEN_SECRET=your_modal_token_secret -``` - -See the [Configuration Guide](../configuration/index.md) for all available options. - -### 6. Verify Installation - -Run the application: - -```bash -uv run gradio run src/app.py -``` - -Open your browser to `http://localhost:7860` to verify the installation. - -## Development Setup - -For development, install dev dependencies: - -```bash -uv sync --all-extras --dev -``` - -Install pre-commit hooks: - -```bash -uv run pre-commit install -``` - -## Troubleshooting - -### Common Issues - -**Import Errors**: -- Ensure you've installed all required dependencies -- Check that Python 3.11+ is being used - -**API Key Errors**: -- Verify your `.env` file is in the project root -- Check that API keys are correctly formatted -- Ensure at least one LLM provider is configured - -**Module Not Found**: -- Run `uv sync` or `pip install -e .` again -- Check that you're in the correct virtual environment - -**Port Already in Use**: -- Change the port in `src/app.py` or use environment variable -- Kill the process using port 7860 - -## Next Steps - -- Read the [Quick Start Guide](quick-start.md) -- Learn about [MCP Integration](mcp-integration.md) -- Explore [Examples](examples.md) - diff --git a/docs/getting-started/mcp-integration.md b/docs/getting-started/mcp-integration.md deleted file mode 100644 index a06201f3603d6ded79a4c1bf5cb1a91515de0e5e..0000000000000000000000000000000000000000 --- a/docs/getting-started/mcp-integration.md +++ /dev/null @@ -1,203 +0,0 @@ -# MCP Integration - -The DETERMINATOR exposes a Model Context Protocol (MCP) server, allowing you to use its search tools directly from Claude Desktop or other MCP clients. - -## What is MCP? - -The Model Context Protocol (MCP) is a standard for connecting AI assistants to external tools and data sources. The DETERMINATOR implements an MCP server that exposes its search capabilities as MCP tools. - -## MCP Server URL - -When running locally: - -``` -http://localhost:7860/gradio_api/mcp/ -``` - -## Claude Desktop Configuration - -### 1. Locate Configuration File - -**macOS**: -``` -~/Library/Application Support/Claude/claude_desktop_config.json -``` - -**Windows**: -``` -%APPDATA%\Claude\claude_desktop_config.json -``` - -**Linux**: -``` -~/.config/Claude/claude_desktop_config.json -``` - -### 2. Add The DETERMINATOR Server - -Edit `claude_desktop_config.json` and add: - -```json -{ - "mcpServers": { - "determinator": { - "url": "http://localhost:7860/gradio_api/mcp/" - } - } -} -``` - -### 3. Restart Claude Desktop - -Close and restart Claude Desktop for changes to take effect. - -### 4. Verify Connection - -In Claude Desktop, you should see The DETERMINATOR tools available: -- `search_pubmed` -- `search_clinical_trials` -- `search_biorxiv` -- `search_all` -- `analyze_hypothesis` - -## Available Tools - -### search_pubmed - -Search peer-reviewed biomedical literature from PubMed. - -**Parameters**: -- `query` (string): Search query -- `max_results` (integer, optional): Maximum number of results (default: 10) - -**Example**: -``` -Search PubMed for "metformin diabetes" -``` - -### search_clinical_trials - -Search ClinicalTrials.gov for interventional studies. - -**Parameters**: -- `query` (string): Search query -- `max_results` (integer, optional): Maximum number of results (default: 10) - -**Example**: -``` -Search clinical trials for "Alzheimer's disease treatment" -``` - -### search_biorxiv - -Search bioRxiv/medRxiv preprints via Europe PMC. - -**Parameters**: -- `query` (string): Search query -- `max_results` (integer, optional): Maximum number of results (default: 10) - -**Example**: -``` -Search bioRxiv for "CRISPR gene editing" -``` - -### search_all - -Search all sources simultaneously (PubMed, ClinicalTrials.gov, Europe PMC). - -**Parameters**: -- `query` (string): Search query -- `max_results` (integer, optional): Maximum number of results per source (default: 10) - -**Example**: -``` -Search all sources for "COVID-19 vaccine efficacy" -``` - -### analyze_hypothesis - -Perform secure statistical analysis using Modal sandboxes. - -**Parameters**: -- `hypothesis` (string): Hypothesis to analyze -- `data` (string, optional): Data description or code - -**Example**: -``` -Analyze the hypothesis that metformin reduces cancer risk -``` - -## Using Tools in Claude Desktop - -Once configured, you can ask Claude to use DeepCritical tools: - -``` -Use DeepCritical to search PubMed for recent papers on Alzheimer's disease treatments. -``` - -Claude will automatically: -1. Call the appropriate DeepCritical tool -2. Retrieve results -3. Use the results in its response - -## Troubleshooting - -### Connection Issues - -**Server Not Found**: -- Ensure DeepCritical is running (`uv run gradio run src/app.py`) -- Verify the URL in `claude_desktop_config.json` is correct -- Check that port 7860 is not blocked by firewall - -**Tools Not Appearing**: -- Restart Claude Desktop after configuration changes -- Check Claude Desktop logs for errors -- Verify MCP server is accessible at the configured URL - -### Authentication - -If DeepCritical requires authentication: -- Configure API keys in DeepCritical settings -- Use HuggingFace OAuth login -- Ensure API keys are valid - -## Advanced Configuration - -### Custom Port - -If running on a different port, update the URL: - -```json -{ - "mcpServers": { - "deepcritical": { - "url": "http://localhost:8080/gradio_api/mcp/" - } - } -} -``` - -### Multiple Instances - -You can configure multiple DeepCritical instances: - -```json -{ - "mcpServers": { - "deepcritical-local": { - "url": "http://localhost:7860/gradio_api/mcp/" - }, - "deepcritical-remote": { - "url": "https://your-server.com/gradio_api/mcp/" - } - } -} -``` - -## Next Steps - -- Learn about [Configuration](../configuration/index.md) for advanced settings -- Explore [Examples](examples.md) for use cases -- Read the [Architecture Documentation](../architecture/graph_orchestration.md) - - diff --git a/docs/getting-started/quick-start.md b/docs/getting-started/quick-start.md deleted file mode 100644 index 45dfe1cb18c5cf4d1d20b0b62b0f227661ac50b5..0000000000000000000000000000000000000000 --- a/docs/getting-started/quick-start.md +++ /dev/null @@ -1,142 +0,0 @@ -# Single Command Deploy - -Deploy with docker instandly with a single command : - -```bash -docker run -it -p 7860:7860 --platform=linux/amd64 \ - -e DB_KEY="YOUR_VALUE_HERE" \ - -e SERP_API="YOUR_VALUE_HERE" \ - -e INFERENCE_API="YOUR_VALUE_HERE" \ - -e MODAL_TOKEN_ID="YOUR_VALUE_HERE" \ - -e MODAL_TOKEN_SECRET="YOUR_VALUE_HERE" \ - -e NCBI_API_KEY="YOUR_VALUE_HERE" \ - -e SERPER_API_KEY="YOUR_VALUE_HERE" \ - -e CHROMA_DB_PATH="./chroma_db" \ - -e CHROMA_DB_HOST="localhost" \ - -e CHROMA_DB_PORT="8000" \ - -e RAG_COLLECTION_NAME="deepcritical_evidence" \ - -e RAG_SIMILARITY_TOP_K="5" \ - -e RAG_AUTO_INGEST="true" \ - -e USE_GRAPH_EXECUTION="false" \ - -e DEFAULT_TOKEN_LIMIT="100000" \ - -e DEFAULT_TIME_LIMIT_MINUTES="10" \ - -e DEFAULT_ITERATIONS_LIMIT="10" \ - -e WEB_SEARCH_PROVIDER="duckduckgo" \ - -e MAX_ITERATIONS="10" \ - -e SEARCH_TIMEOUT="30" \ - -e LOG_LEVEL="DEBUG" \ - -e EMBEDDING_PROVIDER="local" \ - -e OPENAI_EMBEDDING_MODEL="text-embedding-3-small" \ - -e LOCAL_EMBEDDING_MODEL="BAAI/bge-small-en-v1.5" \ - -e HUGGINGFACE_EMBEDDING_MODEL="sentence-transformers/all-MiniLM-L6-v2" \ - -e HF_FALLBACK_MODELS="Qwen/Qwen3-Next-80B-A3B-Thinking,Qwen/Qwen3-Next-80B-A3B-Instruct,meta-llama/Llama-3.3-70B-Instruct,meta-llama/Llama-3.1-8B-Instruct,HuggingFaceH4/zephyr-7b-beta,Qwen/Qwen2-7B-Instruct" \ - -e HUGGINGFACE_MODEL="Qwen/Qwen3-Next-80B-A3B-Thinking" \ - registry.hf.space/dataquests-deepcritical:latest python src/app.py - ``` - -## Quick start guide - -Get up and running with The DETERMINATOR in minutes. - -## Start the Application - -```bash -gradio src/app.py -``` - -Open your browser to `http://localhost:7860`. - -## First Research Query - -1. **Enter a Research Question** - - Type your research question in the chat interface, for example: - - "What are the latest treatments for Alzheimer's disease?" - - "Review the evidence for metformin in cancer prevention" - - "What clinical trials are investigating COVID-19 vaccines?" - -2. **Submit the Query** - - Click "Submit" or press Enter. The system will: - - Generate observations about your query - - Identify knowledge gaps - - Search multiple sources (PubMed, ClinicalTrials.gov, Europe PMC) - - Evaluate evidence quality - - Synthesize findings into a report - -3. **Review Results** - - Watch the real-time progress in the chat interface: - - Search operations and results - - Evidence evaluation - - Report generation - - Final research report with citations - -## Authentication - -### HuggingFace OAuth (Recommended) - -1. Click "Sign in with HuggingFace" at the top of the app -2. Authorize the application -3. Your HuggingFace API token will be automatically used -4. No need to manually enter API keys - -### Manual API Key - -1. Open the Settings accordion -2. Enter your API key: - - OpenAI API key - - Anthropic API key - - HuggingFace API key -3. Click "Save Settings" -4. Manual keys take priority over OAuth tokens - -## Understanding the Interface - -### Chat Interface - -- **Input**: Enter your research questions here -- **Messages**: View conversation history and research progress -- **Streaming**: Real-time updates as research progresses - -### Status Indicators - -- **Searching**: Active search operations -- **Evaluating**: Evidence quality assessment -- **Synthesizing**: Report generation -- **Complete**: Research finished - -### Settings - -- **API Keys**: Configure LLM providers -- **Research Mode**: Choose iterative or deep research -- **Budget Limits**: Set token, time, and iteration limits - -## Example Queries - -### Simple Query - -``` -What are the side effects of metformin? -``` - -### Complex Query - -``` -Review the evidence for using metformin as an anti-aging intervention, -including clinical trials, mechanisms of action, and safety profile. -``` - -### Clinical Trial Query - -``` -What are the active clinical trials investigating Alzheimer's disease treatments? -``` - -## Next Steps - -- Learn about [MCP Integration](mcp-integration.md) to use The DETERMINATOR from Claude Desktop -- Explore [Examples](examples.md) for more use cases -- Read the [Configuration Guide](../configuration/index.md) for advanced settings -- Check out the [Architecture Documentation](../architecture/graph_orchestration.md) to understand how it works - diff --git a/docs/index.md b/docs/index.md deleted file mode 100644 index 3ad907ad29c4ba891f32cc28d8ac471724a8469a..0000000000000000000000000000000000000000 --- a/docs/index.md +++ /dev/null @@ -1,82 +0,0 @@ -# The DETERMINATOR - -**Generalist Deep Research Agent - Stops at Nothing Until Finding Precise Answers** - -The DETERMINATOR is a powerful generalist deep research agent system that uses iterative search-and-judge loops to comprehensively investigate any research question. It stops at nothing until finding precise answers, only stopping at configured limits (budget, time, iterations). - -**Key Features**: -- **Generalist**: Handles queries from any domain (medical, technical, business, scientific, etc.) -- **Automatic Source Selection**: Automatically determines if medical knowledge sources (PubMed, ClinicalTrials.gov) are needed -- **Multi-Source Search**: Web search, PubMed, ClinicalTrials.gov, Europe PMC, RAG -- **Iterative Refinement**: Continues searching and refining until precise answers are found -- **Evidence Synthesis**: Comprehensive reports with proper citations - -**Important**: The DETERMINATOR is a research tool that synthesizes evidence. It cannot provide medical advice or answer medical questions directly. - -## Features - -- **Generalist Research**: Handles any research question from any domain -- **Automatic Medical Detection**: Automatically determines if medical knowledge sources are needed -- **Multi-Source Search**: Web search, PubMed, ClinicalTrials.gov, Europe PMC (includes bioRxiv/medRxiv), RAG -- **Iterative Until Precise**: Stops at nothing until finding precise answers (only stops at configured limits) -- **MCP Integration**: Use our tools from Claude Desktop or any MCP client -- **HuggingFace OAuth**: Sign in with your HuggingFace account to automatically use your API token -- **Modal Sandbox**: Secure execution of AI-generated statistical code -- **LlamaIndex RAG**: Semantic search and evidence synthesis -- **HuggingFace Inference**: Free tier support with automatic fallback -- **Strongly Typed Composable Graphs**: Graph-based orchestration with Pydantic AI -- **Specialized Research Teams of Agents**: Multi-agent coordination for complex research tasks - -## Quick Start - -```bash -# Install uv if you haven't already (recommended: standalone installer) -# Unix/macOS/Linux: -curl -LsSf https://astral.sh/uv/install.sh | sh - -# Windows (PowerShell): -powershell -ExecutionPolicy ByPass -c "irm https://astral.sh/uv/install.ps1 | iex" - -# Alternative: pipx install uv -# Or: pip install uv - -# Sync dependencies -uv sync - -# Start the Gradio app -uv run gradio run src/app.py -``` - -Open your browser to `http://localhost:7860`. - -For detailed installation and setup instructions, see the [Getting Started Guide](getting-started/installation.md). - -## Architecture - -The DETERMINATOR uses a Vertical Slice Architecture: - -1. **Search Slice**: Retrieving evidence from multiple sources (web, PubMed, ClinicalTrials.gov, Europe PMC, RAG) based on query analysis -2. **Judge Slice**: Evaluating evidence quality using LLMs -3. **Orchestrator Slice**: Managing the research loop and UI - -The system supports three main research patterns: - -- **Iterative Research**: Single research loop with search-judge-synthesize cycles -- **Deep Research**: Multi-section parallel research with planning and synthesis -- **Research Team**: Multi-agent coordination using Magentic framework - -Learn more about the [Architecture](overview/architecture.md). - -## Documentation - -- [Overview](overview/architecture.md) - System architecture and design -- [Getting Started](getting-started/installation.md) - Installation and setup -- [Configuration](configuration/index.md) - Configuration guide -- [API Reference](api/agents.md) - API documentation -- [Contributing](contributing/index.md) - Development guidelines - -## Links - -- [GitHub Repository](https://github.com/DeepCritical/GradioDemo) -- [HuggingFace Space](https://huggingface.co/spaces/DataQuests/DeepCritical) - diff --git a/docs/overview/architecture.md b/docs/overview/architecture.md deleted file mode 100644 index 3f9c071dda107f9722b24610aae83617ad130693..0000000000000000000000000000000000000000 --- a/docs/overview/architecture.md +++ /dev/null @@ -1,194 +0,0 @@ -# Architecture Overview - -The DETERMINATOR is a powerful generalist deep research agent system that uses iterative search-and-judge loops to comprehensively investigate any research question. It stops at nothing until finding precise answers, only stopping at configured limits (budget, time, iterations). The system automatically determines if medical knowledge sources are needed and adapts its search strategy accordingly. It supports multiple orchestration patterns, graph-based execution, parallel research workflows, and long-running task management with real-time streaming. - -## Core Architecture - -### Orchestration Patterns - -1. **Graph Orchestrator** (`src/orchestrator/graph_orchestrator.py`): - - Graph-based execution using Pydantic AI agents as nodes - - Supports both iterative and deep research patterns - - Node types: Agent, State, Decision, Parallel - - Edge types: Sequential, Conditional, Parallel - - Conditional routing based on knowledge gaps, budget, and iterations - - Parallel execution for concurrent research loops - - Event streaming via `AsyncGenerator[AgentEvent]` for real-time UI updates - - Fallback to agent chains when graph execution is disabled - -2. **Deep Research Flow** (`src/orchestrator/research_flow.py`): - - **Pattern**: Planner → Parallel Iterative Loops (one per section) → Synthesis - - Uses `PlannerAgent` to break query into report sections - - Runs `IterativeResearchFlow` instances in parallel per section via `WorkflowManager` - - Synthesizes results using `LongWriterAgent` or `ProofreaderAgent` - - Supports both graph execution (`use_graph=True`) and agent chains (`use_graph=False`) - - Budget tracking per section and globally - - State synchronization across parallel loops - -3. **Iterative Research Flow** (`src/orchestrator/research_flow.py`): - - **Pattern**: Generate observations → Evaluate gaps → Select tools → Execute → Judge → Continue/Complete - - Uses `KnowledgeGapAgent`, `ToolSelectorAgent`, `ThinkingAgent`, `WriterAgent` - - `JudgeHandler` assesses evidence sufficiency - - Iterates until research complete or constraints met (iterations, time, tokens) - - Supports graph execution and agent chains - -4. **Magentic Orchestrator** (`src/orchestrator_magentic.py`): - - Multi-agent coordination using `agent-framework-core` - - ChatAgent pattern with internal LLMs per agent - - Uses `MagenticBuilder` with participants: searcher, hypothesizer, judge, reporter - - Manager orchestrates agents via `OpenAIChatClient` - - Requires OpenAI API key (function calling support) - - Event-driven: converts Magentic events to `AgentEvent` for UI streaming - - Supports long-running workflows with max rounds and stall/reset handling - -5. **Hierarchical Orchestrator** (`src/orchestrator_hierarchical.py`): - - Uses `SubIterationMiddleware` with `ResearchTeam` and `LLMSubIterationJudge` - - Adapts Magentic ChatAgent to `SubIterationTeam` protocol - - Event-driven via `asyncio.Queue` for coordination - - Supports sub-iteration patterns for complex research tasks - -6. **Legacy Simple Mode** (`src/legacy_orchestrator.py`): - - Linear search-judge-synthesize loop - - Uses `SearchHandlerProtocol` and `JudgeHandlerProtocol` - - Generator-based design yielding `AgentEvent` objects - - Backward compatibility for simple use cases - -## Long-Running Task Support - -The system is designed for long-running research tasks with comprehensive state management and streaming: - -1. **Event Streaming**: - - All orchestrators yield `AgentEvent` objects via `AsyncGenerator` - - Real-time UI updates through Gradio chat interface - - Event types: `started`, `searching`, `search_complete`, `judging`, `judge_complete`, `looping`, `synthesizing`, `hypothesizing`, `complete`, `error` - - Metadata includes iteration numbers, tool names, result counts, durations - -2. **Budget Tracking** (`src/middleware/budget_tracker.py`): - - Per-loop and global budget management - - Tracks: tokens, time (seconds), iterations - - Budget enforcement at decision nodes - - Token estimation (~4 chars per token) - - Early termination when budgets exceeded - - Budget summaries for monitoring - -3. **Workflow Manager** (`src/middleware/workflow_manager.py`): - - Coordinates parallel research loops - - Tracks loop status: `pending`, `running`, `completed`, `failed`, `cancelled` - - Synchronizes evidence between loops and global state - - Handles errors per loop (doesn't fail all if one fails) - - Supports loop cancellation and timeout handling - - Evidence deduplication across parallel loops - -4. **State Management** (`src/middleware/state_machine.py`): - - Thread-safe isolation using `ContextVar` for concurrent requests - - `WorkflowState` tracks: evidence, conversation history, embedding service - - Evidence deduplication by URL - - Semantic search via embedding service - - State persistence across long-running workflows - - Supports both iterative and deep research patterns - -5. **Gradio UI** (`src/app.py`): - - Real-time streaming of research progress - - Accordion-based UI for pending/done operations - - OAuth integration (HuggingFace) - - Multiple backend support (API keys, free tier) - - Handles long-running tasks with progress indicators - - Event accumulation for pending operations - -## Graph Architecture - -The graph orchestrator (`src/orchestrator/graph_orchestrator.py`) implements a flexible graph-based execution model: - -**Node Types**: - -- **Agent Nodes**: Execute Pydantic AI agents (e.g., `KnowledgeGapAgent`, `ToolSelectorAgent`) -- **State Nodes**: Update or read workflow state (evidence, conversation) -- **Decision Nodes**: Make routing decisions (research complete?, budget exceeded?) -- **Parallel Nodes**: Execute multiple nodes concurrently (parallel research loops) - -**Edge Types**: - -- **Sequential Edges**: Always traversed (no condition) -- **Conditional Edges**: Traversed based on condition (e.g., if research complete → writer, else → tool selector) -- **Parallel Edges**: Used for parallel execution branches - -**Graph Patterns**: - -- **Iterative Graph**: `[Input] → [Thinking] → [Knowledge Gap] → [Decision: Complete?] → [Tool Selector] or [Writer]` -- **Deep Research Graph**: `[Input] → [Planner] → [Parallel Iterative Loops] → [Synthesizer]` - -**Execution Flow**: - -1. Graph construction from nodes and edges -2. Graph validation (no cycles, all nodes reachable) -3. Graph execution from entry node -4. Node execution based on type -5. Edge evaluation for next node(s) -6. Parallel execution via `asyncio.gather()` -7. State updates at state nodes -8. Event streaming for UI - -## Key Components - -- **Orchestrators**: Multiple orchestration patterns (`src/orchestrator/`, `src/orchestrator_*.py`) -- **Research Flows**: Iterative and deep research patterns (`src/orchestrator/research_flow.py`) -- **Graph Builder**: Graph construction utilities (`src/agent_factory/graph_builder.py`) -- **Agents**: Pydantic AI agents (`src/agents/`, `src/agent_factory/agents.py`) -- **Search Tools**: Neo4j knowledge graph, PubMed, ClinicalTrials.gov, Europe PMC, Web search, RAG (`src/tools/`) -- **Judge Handler**: LLM-based evidence assessment (`src/agent_factory/judges.py`) -- **Embeddings**: Semantic search & deduplication (`src/services/embeddings.py`) -- **Statistical Analyzer**: Modal sandbox execution (`src/services/statistical_analyzer.py`) -- **Multimodal Processing**: Image OCR and audio STT/TTS services (`src/services/multimodal_processing.py`, `src/services/audio_processing.py`) -- **Middleware**: State management, budget tracking, workflow coordination (`src/middleware/`) -- **MCP Tools**: Claude Desktop integration (`src/mcp_tools.py`) -- **Gradio UI**: Web interface with MCP server and streaming (`src/app.py`) - -## Research Team & Parallel Execution - -The system supports complex research workflows through: - -1. **WorkflowManager**: Coordinates multiple parallel research loops - - Creates and tracks `ResearchLoop` instances - - Runs loops in parallel via `asyncio.gather()` - - Synchronizes evidence to global state - - Handles loop failures gracefully - -2. **Deep Research Pattern**: Breaks complex queries into sections - - Planner creates report outline with sections - - Each section runs as independent iterative research loop - - Loops execute in parallel - - Evidence shared across loops via global state - - Final synthesis combines all section results - -3. **State Synchronization**: Thread-safe evidence sharing - - Evidence deduplication by URL - - Global state accessible to all loops - - Semantic search across all collected evidence - - Conversation history tracking per iteration - -## Configuration & Modes - -- **Orchestrator Factory** (`src/orchestrator_factory.py`): - - Auto-detects mode: "advanced" if OpenAI key available, else "simple" - - Supports explicit mode selection: "simple", "magentic" (alias for "advanced"), "advanced", "iterative", "deep", "auto" - - Lazy imports for optional dependencies - -- **Orchestrator Modes** (selected in UI or via factory): - - `simple`: Legacy linear search-judge loop (Free Tier) - - `advanced` or `magentic`: Multi-agent coordination using Microsoft Agent Framework (requires OpenAI API key) - - `iterative`: Knowledge-gap-driven research with single loop (Free Tier) - - `deep`: Parallel section-based research with planning (Free Tier) - - `auto`: Intelligent mode detection based on query complexity (Free Tier) - -- **Graph Research Modes** (used within graph orchestrator, separate from orchestrator mode): - - `iterative`: Single research loop pattern - - `deep`: Multi-section parallel research pattern - - `auto`: Auto-detect pattern based on query complexity - -- **Execution Modes**: - - `use_graph=True`: Graph-based execution (parallel, conditional routing) - - `use_graph=False`: Agent chains (sequential, backward compatible) - -**Note**: The UI provides separate controls for orchestrator mode and graph research mode. When using graph-based orchestrators (iterative/deep/auto), the graph research mode determines the specific pattern used within the graph execution. - - diff --git a/docs/overview/features.md b/docs/overview/features.md deleted file mode 100644 index 0afb21e6317f24d447fd3c6cf1103d5a9684104b..0000000000000000000000000000000000000000 --- a/docs/overview/features.md +++ /dev/null @@ -1,169 +0,0 @@ -# Features - -The DETERMINATOR provides a comprehensive set of features for AI-assisted research: - -## Core Features - -### Multi-Source Search - -- **General Web Search**: Search general knowledge sources for any domain -- **Neo4j Knowledge Graph**: Search structured knowledge graph for papers and disease relationships -- **PubMed**: Search peer-reviewed biomedical literature via NCBI E-utilities (automatically used when medical knowledge needed) -- **ClinicalTrials.gov**: Search interventional clinical trials (automatically used when medical knowledge needed) -- **Europe PMC**: Search preprints and peer-reviewed articles (includes bioRxiv/medRxiv) -- **RAG**: Semantic search within collected evidence using LlamaIndex -- **Automatic Source Selection**: Automatically determines which sources are needed based on query analysis - -### MCP Integration - -- **Model Context Protocol**: Expose search tools via MCP server -- **Claude Desktop**: Use The DETERMINATOR tools directly from Claude Desktop -- **MCP Clients**: Compatible with any MCP-compatible client - -### Authentication - -- **REQUIRED**: Authentication is mandatory before using the application -- **HuggingFace OAuth**: Sign in with HuggingFace account for automatic API token usage (recommended) -- **Manual API Keys**: Support for HuggingFace API keys via environment variables (`HF_TOKEN` or `HUGGINGFACE_API_KEY`) -- **Free Tier Support**: Automatic fallback to HuggingFace Inference API (public models) when no API key is available -- **Authentication Check**: The application will display an error message if authentication is not provided - -### Secure Code Execution - -- **Modal Sandbox**: Secure execution of AI-generated statistical code -- **Isolated Environment**: Network isolation and package version pinning -- **Safe Execution**: Prevents malicious code execution - -### Semantic Search & RAG - -- **LlamaIndex Integration**: Advanced RAG capabilities -- **Vector Storage**: ChromaDB for embedding storage -- **Semantic Deduplication**: Automatic detection of similar evidence -- **Embedding Service**: Local sentence-transformers (no API key required) - -### Orchestration Patterns - -- **Graph-Based Execution**: Flexible graph orchestration with conditional routing -- **Parallel Research Loops**: Run multiple research tasks concurrently -- **Iterative Research**: Single-loop research with search-judge-synthesize cycles that continues until precise answers are found -- **Deep Research**: Multi-section parallel research with planning and synthesis -- **Magentic Orchestration**: Multi-agent coordination using Microsoft Agent Framework (alias: "advanced" mode) -- **Stops at Nothing**: Only stops at configured limits (budget, time, iterations), otherwise continues until finding precise answers - -**Orchestrator Modes**: -- `simple`: Legacy linear search-judge loop -- `advanced` (or `magentic`): Multi-agent coordination (requires OpenAI API key) -- `iterative`: Knowledge-gap-driven research with single loop -- `deep`: Parallel section-based research with planning -- `auto`: Intelligent mode detection based on query complexity - -**Graph Research Modes** (used within graph orchestrator): -- `iterative`: Single research loop pattern -- `deep`: Multi-section parallel research pattern -- `auto`: Auto-detect pattern based on query complexity - -**Execution Modes**: -- `use_graph=True`: Graph-based execution with parallel and conditional routing -- `use_graph=False`: Agent chains with sequential execution (backward compatible) - -### Real-Time Streaming - -- **Event Streaming**: Real-time updates via `AsyncGenerator[AgentEvent]` -- **Progress Tracking**: Monitor research progress with detailed event metadata -- **UI Integration**: Seamless integration with Gradio chat interface - -### Budget Management - -- **Token Budget**: Track and limit LLM token usage -- **Time Budget**: Enforce time limits per research loop -- **Iteration Budget**: Limit maximum iterations -- **Per-Loop Budgets**: Independent budgets for parallel research loops - -### State Management - -- **Thread-Safe Isolation**: ContextVar-based state management -- **Evidence Deduplication**: Automatic URL-based deduplication -- **Conversation History**: Track iteration history and agent interactions -- **State Synchronization**: Share evidence across parallel loops - -### Multimodal Input & Output - -- **Image Input (OCR)**: Upload images and extract text using optical character recognition -- **Audio Input (STT)**: Record or upload audio files and transcribe to text using speech-to-text -- **Audio Output (TTS)**: Generate audio responses with text-to-speech synthesis -- **Configurable Settings**: Enable/disable multimodal features via sidebar settings -- **Voice Selection**: Choose from multiple TTS voices (American English: af_*, am_*) -- **Speech Speed Control**: Adjust TTS speech speed (0.5x to 2.0x) -- **Multimodal Processing Service**: Integrated service for processing images and audio files - -## Advanced Features - -### Agent System - -- **Pydantic AI Agents**: Type-safe agent implementation -- **Structured Output**: Pydantic models for agent responses -- **Agent Factory**: Centralized agent creation with fallback support -- **Specialized Agents**: Knowledge gap, tool selector, writer, proofreader, and more - -### Search Tools - -- **Rate Limiting**: Built-in rate limiting for external APIs -- **Retry Logic**: Automatic retry with exponential backoff -- **Query Preprocessing**: Automatic query enhancement and synonym expansion -- **Evidence Conversion**: Automatic conversion to structured Evidence objects - -### Error Handling - -- **Custom Exceptions**: Hierarchical exception system -- **Error Chaining**: Preserve exception context -- **Structured Logging**: Comprehensive logging with structlog -- **Graceful Degradation**: Fallback handlers for missing dependencies - -### Configuration - -- **Pydantic Settings**: Type-safe configuration management -- **Environment Variables**: Support for `.env` files -- **Validation**: Automatic configuration validation -- **Flexible Providers**: Support for multiple LLM and embedding providers - -### Testing - -- **Unit Tests**: Comprehensive unit test coverage -- **Integration Tests**: Real API integration tests -- **Mock Support**: Extensive mocking utilities -- **Coverage Reports**: Code coverage tracking - -## UI Features - -### Gradio Interface - -- **Real-Time Chat**: Interactive chat interface with multimodal support -- **Streaming Updates**: Live progress updates -- **Accordion UI**: Organized display of pending/done operations -- **OAuth Integration**: Seamless HuggingFace authentication -- **Multimodal Input**: Support for text, images, and audio input in the same interface -- **Sidebar Settings**: Configuration accordions for research, multimodal, and audio settings - -### MCP Server - -- **RESTful API**: HTTP-based MCP server -- **Tool Discovery**: Automatic tool registration -- **Request Handling**: Async request processing -- **Error Responses**: Structured error responses - -## Development Features - -### Code Quality - -- **Type Safety**: Full type hints with mypy strict mode -- **Linting**: Ruff for code quality -- **Formatting**: Automatic code formatting -- **Pre-commit Hooks**: Automated quality checks - -### Documentation - -- **Comprehensive Docs**: Detailed documentation for all components -- **Code Examples**: Extensive code examples -- **Architecture Diagrams**: Visual architecture documentation -- **API Reference**: Complete API documentation - diff --git a/docs/overview/quick-start.md b/docs/overview/quick-start.md deleted file mode 100644 index 8b36d4864c1284f5ad97e330093d6ba9760313ad..0000000000000000000000000000000000000000 --- a/docs/overview/quick-start.md +++ /dev/null @@ -1,103 +0,0 @@ -# Quick Start - -Get started with DeepCritical in minutes. - -## Installation - -```bash -# Install uv if you haven't already (recommended: standalone installer) -# Unix/macOS/Linux: -curl -LsSf https://astral.sh/uv/install.sh | sh - -# Windows (PowerShell): -powershell -ExecutionPolicy ByPass -c "irm https://astral.sh/uv/install.ps1 | iex" - -# Alternative: pipx install uv -# Or: pip install uv - -# Sync dependencies -uv sync -``` - -## Run the UI - -```bash -# Start the Gradio app -uv run gradio run src/app.py -``` - -Open your browser to `http://localhost:7860`. - -## Basic Usage - -### 1. Authentication (REQUIRED) - -**Authentication is mandatory** - you must authenticate before using the application. The app will display an error message if you try to use it without authentication. - -**HuggingFace OAuth Login** (Recommended): -- Click the "Sign in with HuggingFace" button at the top of the app -- Your HuggingFace API token will be automatically used for AI inference -- No need to manually enter API keys when logged in - -**Manual API Key** (Alternative): -- Set environment variable `HF_TOKEN` or `HUGGINGFACE_API_KEY` before starting the app -- The app will automatically use these tokens if OAuth login is not available -- Supports HuggingFace API keys only (OpenAI/Anthropic keys are not used in the current implementation) - -### 2. Start a Research Query - -1. Enter your research question in the chat interface - - **Text Input**: Type your question directly - - **Image Input**: Click the 📷 icon to upload images (OCR will extract text) - - **Audio Input**: Click the 🎤 icon to record or upload audio (STT will transcribe to text) -2. Click "Submit" or press Enter -3. Watch the real-time progress as the system: - - Generates observations - - Identifies knowledge gaps - - Searches multiple sources - - Evaluates evidence - - Synthesizes findings -4. Review the final research report - - **Audio Output**: If enabled, the final response will include audio synthesis (TTS) - -**Multimodal Features**: -- Configure image/audio input and output in the sidebar settings -- Image OCR and audio STT/TTS can be enabled/disabled independently -- TTS voice and speed can be customized in the Audio Output settings - -### 3. MCP Integration (Optional) - -Connect DeepCritical to Claude Desktop: - -1. Add to your `claude_desktop_config.json`: -```json -{ - "mcpServers": { - "deepcritical": { - "url": "http://localhost:7860/gradio_api/mcp/" - } - } -} -``` - -2. Restart Claude Desktop -3. Use DeepCritical tools directly from Claude Desktop - -## Available Tools - -- `search_pubmed`: Search peer-reviewed biomedical literature -- `search_clinical_trials`: Search ClinicalTrials.gov -- `search_biorxiv`: Search bioRxiv/medRxiv preprints -- `search_neo4j`: Search Neo4j knowledge graph for papers and disease relationships -- `search_all`: Search all sources simultaneously -- `analyze_hypothesis`: Secure statistical analysis using Modal sandboxes - -**Note**: The application automatically uses all available search tools (Neo4j, PubMed, ClinicalTrials.gov, Europe PMC, Web search, RAG) based on query analysis. Neo4j knowledge graph search is included by default for biomedical queries. - -## Next Steps - -- Read the [Installation Guide](../getting-started/installation.md) for detailed setup -- Learn about [Configuration](../configuration/index.md) -- Explore the [Architecture](../architecture/graph_orchestration.md) -- Check out [Examples](../getting-started/examples.md) - diff --git a/docs/team.md b/docs/team.md deleted file mode 100644 index 6e9be5c763245d2caec553acc314c1532b335950..0000000000000000000000000000000000000000 --- a/docs/team.md +++ /dev/null @@ -1,44 +0,0 @@ -# Team - -DeepCritical is developed by a team of researchers and developers working on AI-assisted research. - -## Team Members - -### ZJ - -- 💼 [LinkedIn](https://www.linkedin.com/in//) - -### Mario Aderman - -- 🤗 [HuggingFace](https://huggingface.co/SeasonalFall84) -- 💼 [LinkedIn](https://www.linkedin.com/in/mario-aderman/) -- 𝕏 [X](https://x.com/marioaderman) - -### Joseph Pollack - -- 🤗 [HuggingFace](https://huggingface.co/Tonic) -- 💼 [LinkedIn](https://www.linkedin.com/in/josephpollack/) -- 𝕏 [X](https://x.com/josephpollack) - -### Virat Chauran - -- 𝕏 [X](https://x.com/viratzzs/) -- 💼 [LinkedIn](https://www.linkedin.com/in/viratchauhan/) -- 🤗 [HuggingFace](https://huggingface.co/ViratChauhan) - -### Anna Bossler - -- 💼 [LinkedIn](https://www.linkedin.com/in/ana-bossler-07304717) - -## About - -The DeepCritical team met online in the Alzheimer's Critical Literature Review Group in the Hugging Science initiative. We're building the agent framework we want to use for AI-assisted research to turn the vast amounts of clinical data into cures. - -## Contributing - -We welcome contributions! See the [Contributing Guide](contributing/index.md) for details. - -## Links - -- [GitHub Repository](https://github.com/DeepCritical/GradioDemo) -- [HuggingFace Space](https://huggingface.co/spaces/DataQuests/DeepCritical) diff --git a/mkdocs.yml b/mkdocs.yml deleted file mode 100644 index cd3cee71d85f1dc3660ccd047e1feddbbcceb6ee..0000000000000000000000000000000000000000 --- a/mkdocs.yml +++ /dev/null @@ -1,166 +0,0 @@ -site_name: The DETERMINATOR -site_description: Generalist Deep Research Agent that Stops at Nothing -site_author: The DeepCritical Team -site_url: https://deepcritical.github.io/GradioDemo/ - -repo_name: DeepCritical/GradioDemo -repo_url: https://github.com/DeepCritical/GradioDemo -edit_uri: edit/dev/docs/ - -# Ensure all files are included even if not in nav -# strict: false - -theme: - name: material - palette: - # Light mode - - scheme: default - primary: orange - accent: red - toggle: - icon: material/brightness-7 - name: Switch to dark mode - # Dark mode - - scheme: slate - primary: orange - accent: red - toggle: - icon: material/brightness-4 - name: Switch to light mode - features: - # Navigation features - - navigation.tabs - - navigation.sections - - navigation.expand - - navigation.top - - navigation.indexes - - navigation.instant - - navigation.tracking - - navigation.smooth - # Search features - - search.suggest - - search.highlight - # Content features - - content.code.annotate - - content.code.copy - - content.tabs.link - - content.tooltips - - toc.integrate - icon: - repo: fontawesome/brands/github - language: en - -plugins: - - search: - lang: - - en - separator: '[\s\-,:!=\[\]()"`/]+|\.(?!\d)|&[lg]t;|&' - - mermaid2 - - codeinclude - - git-revision-date-localized: - enable_creation_date: true - enable_git_follow: false # Disable follow to avoid timestamp ordering issues - strict: false # Bypass warnings about timestamp ordering issues - type: timeago # Shows "2 days ago" format - fallback_to_build_date: true - - minify: - minify_html: true - minify_js: true - minify_css: true - -markdown_extensions: - - dev.docs_plugins: - base_path: "." - - pymdownx.highlight: - anchor_linenums: true - line_spans: __span # Allow line spans for highlighting - pygments_lang_class: true # Add language class to code blocks - use_pygments: true - noclasses: false # Use CSS classes for better theming - - pymdownx.inlinehilite - - pymdownx.superfences: - custom_fences: - - name: mermaid - class: mermaid - format: !!python/name:pymdownx.superfences.fence_code_format - preserve_tabs: true - - pymdownx.tabbed: - alternate_style: true - combine_header_slug: true # Better tab linking - - pymdownx.tasklist: - custom_checkbox: true - - pymdownx.emoji: - emoji_generator: !!python/name:pymdownx.emoji.to_svg - emoji_index: !!python/name:pymdownx.emoji.twemoji - - pymdownx.snippets - - admonition - - pymdownx.details - - attr_list - - md_in_html - - tables - - meta # Frontmatter support for tags, categories, etc. - - toc: - permalink: true - permalink_title: "Anchor link to this section" - baselevel: 1 - toc_depth: 3 - slugify: !!python/object/apply:pymdownx.slugs.slugify - kwds: - case: lower - -nav: - - Home: index.md - - Overview: - - overview/architecture.md - - overview/features.md - - Getting Started: - - getting-started/installation.md - - getting-started/quick-start.md - - getting-started/mcp-integration.md - - getting-started/examples.md - - Configuration: - - configuration/index.md - - Architecture: - - "Graph Orchestration": architecture/graph_orchestration.md - - "Workflow Diagrams": architecture/workflow-diagrams.md - - "Agents": architecture/agents.md - - "Orchestrators": architecture/orchestrators.md - - "Tools": architecture/tools.md - - "Middleware": architecture/middleware.md - - "Services": architecture/services.md - - API Reference: - - api/agents.md - - api/tools.md - - api/orchestrators.md - - api/services.md - - api/models.md - - Contributing: - - contributing/index.md - - contributing/code-quality.md - - contributing/code-style.md - - contributing/error-handling.md - - contributing/implementation-patterns.md - - contributing/prompt-engineering.md - - contributing/testing.md - - License: LICENSE.md - - Team: team.md - -extra: - social: - - icon: fontawesome/brands/github - link: https://github.com/DeepCritical/GradioDemo - name: GitHub - - icon: fontawesome/brands/twitter - link: https://twitter.com/josephpollack - name: Twitter - - icon: material/web - link: https://huggingface.co/spaces/DataQuests/DeepCritical - name: Live Demo - - icon: fontawesome/brands/discord - link: https://discord.gg/n8ytYeh25n - name: Discord - generator: - enabled: false # Hide generator meta tag - -copyright: Copyright © 2024 DeepCritical Team - diff --git a/pyproject.toml b/pyproject.toml deleted file mode 100644 index 6f59cac5edf5e957ee88a6fec36ad45db3040bd0..0000000000000000000000000000000000000000 --- a/pyproject.toml +++ /dev/null @@ -1,205 +0,0 @@ -[project] -name = "determinator" -version = "0.1.0" -description = "The DETERMINATOR - the Deep Research Agent that Stops at Nothing" -readme = "README.md" -requires-python = ">=3.11" -dependencies = [ - "pydantic>=2.7", - "pydantic-settings>=2.2", # For BaseSettings (config) - "pydantic-ai>=0.0.16", # Agent framework - "openai>=1.0.0", - "anthropic>=0.18.0", - "httpx>=0.27", # Async HTTP client (PubMed) - "beautifulsoup4>=4.12", # HTML parsing - "xmltodict>=0.13", # PubMed XML -> dict - "huggingface-hub>=0.20.0", # Hugging Face Inference API - "gradio[mcp,oauth]>=6.0.0", # Chat interface with MCP server support (6.0 required for css in launch()) - "python-dotenv>=1.0", # .env loading - "tenacity>=8.2", # Retry logic - "structlog>=24.1", # Structured logging - "requests>=2.32.5", # ClinicalTrials.gov (httpx blocked by WAF) - "pydantic-graph>=1.22.0", - "limits>=3.0", # Web search - "llama-index-llms-huggingface>=0.6.1", - "llama-index-llms-huggingface-api>=0.6.1", - "llama-index-vector-stores-chroma>=0.5.3", - "llama-index>=0.14.8", - "gradio-client>=1.0.0", # For STT/OCR API calls - "soundfile>=0.12.0", # For audio file I/O - "pillow>=10.0.0", # For image processing - "torch>=2.0.0", # Required by Kokoro TTS - "transformers>=4.57.2", # Required by Kokoro TTS - "modal>=0.63.0", # Required for TTS GPU execution - "tokenizers>=0.22.0,<=0.23.0", - "rpds-py>=0.29.0", - "pydantic-ai-slim[huggingface]>=0.0.18", - "agent-framework-core>=1.0.0b251120,<2.0.0", - "chromadb>=0.4.0", - "sentence-transformers>=2.2.0", - "numpy<2.0", - "llama-index-llms-openai>=0.6.9", - "llama-index-embeddings-openai>=0.5.1", - "ddgs>=9.9.2", - "aiohttp>=3.13.2", - "lxml>=6.0.2", - "fake-useragent==2.2.0", - "socksio==1.0.0", - "neo4j>=6.0.3", - "md2pdf>=1.0.1", -] - -[project.optional-dependencies] -dev = [ - # Testing - "pytest>=8.0", - "pytest-asyncio>=0.23", - "pytest-sugar>=1.0", - "pytest-cov>=5.0", - "pytest-mock>=3.12", - "respx>=0.21", # Mock httpx requests - "typer>=0.9.0", # Gradio CLI dependency for smoke tests - - # Quality - "ruff>=0.4.0", - "mypy>=1.10", - "pre-commit>=3.7", - - # Documentation - "mkdocs>=1.6.0", - "mkdocs-material>=9.0.0", - "mkdocs-mermaid2-plugin>=1.1.0", - "mkdocs-codeinclude-plugin>=0.2.0", - "mkdocs-git-revision-date-localized-plugin>=1.2.0", - "mkdocs-minify-plugin>=0.8.0", - "pymdown-extensions>=10.17.2", -] -magentic = [ - "agent-framework-core>=1.0.0b251120,<2.0.0", # Microsoft Agent Framework (PyPI) -] -embeddings = [ - "chromadb>=0.4.0", - "sentence-transformers>=2.2.0", - "numpy<2.0", # chromadb compatibility: uses np.float_ removed in NumPy 2.0 -] -modal = [ - # Mario's Modal code execution + LlamaIndex RAG - # Note: modal>=0.63.0 is now in main dependencies for TTS support - "llama-index>=0.11.0", - "llama-index-llms-openai>=0.6.9", - "llama-index-embeddings-openai>=0.5.1", - "llama-index-vector-stores-chroma", - "chromadb>=0.4.0", - "numpy<2.0", # chromadb compatibility: uses np.float_ removed in NumPy 2.0 -] - -[build-system] -requires = ["hatchling"] -build-backend = "hatchling.build" - -[tool.hatch.build.targets.wheel] -packages = ["src"] - -# ============== RUFF CONFIG ============== -[tool.ruff] -line-length = 100 -target-version = "py311" -src = ["src"] -exclude = [ - "tests/", - "examples/", - "reference_repos/", - "folder/", -] - -[tool.ruff.lint] -select = [ - "E", # pycodestyle errors - "F", # pyflakes - "B", # flake8-bugbear - "I", # isort - "N", # pep8-naming - "UP", # pyupgrade - "PL", # pylint - "RUF", # ruff-specific -] -ignore = [ - "PLR0913", # Too many arguments (agents need many params) - "PLR0912", # Too many branches (complex orchestrator logic) - "PLR0911", # Too many return statements (complex agent logic) - "PLR0915", # Too many statements (Gradio UI setup functions) - "PLR2004", # Magic values (statistical constants like p-values) - "PLW0603", # Global statement (singleton pattern for Modal) - "PLC0415", # Lazy imports for optional dependencies - "E402", # Module level import not at top (needed for pytest.importorskip) - "E501", # Line too long (ignore line length violations) - "RUF100", # Unused noqa (version differences between local/CI) -] - -[tool.ruff.lint.isort] -known-first-party = ["src"] - -# ============== MYPY CONFIG ============== -[tool.mypy] -python_version = "3.11" -strict = true -ignore_missing_imports = true -disallow_untyped_defs = true -warn_return_any = true -warn_unused_ignores = false -explicit_package_bases = true -mypy_path = "." -exclude = [ - "^reference_repos/", - "^examples/", - "^folder/", - "^src/app.py", -] - -# ============== PYTEST CONFIG ============== -[tool.pytest.ini_options] -testpaths = ["tests"] -asyncio_mode = "auto" -addopts = [ - "-v", - "--tb=short", - "--strict-markers", - "-p", - "no:logfire", -] -markers = [ - "unit: Unit tests (mocked)", - "integration: Integration tests (real APIs)", - "slow: Slow tests", - "openai: Tests that require OpenAI API key", - "huggingface: Tests that require HuggingFace API key or use HuggingFace models", - "embedding_provider: Tests that require API-based embedding providers (OpenAI, etc.)", - "local_embeddings: Tests that use local embeddings (sentence-transformers, ChromaDB)", -] - -# ============== COVERAGE CONFIG ============== -[tool.coverage.run] -source = ["src"] -omit = ["*/__init__.py"] - -[tool.coverage.report] -exclude_lines = [ - "pragma: no cover", - "if TYPE_CHECKING:", - "raise NotImplementedError", -] - -[dependency-groups] -dev = [ - "mkdocs>=1.6.1", - "mkdocs-codeinclude-plugin>=0.2.1", - "mkdocs-material>=9.7.0", - "mkdocs-mermaid2-plugin>=1.2.3", - "mkdocs-git-revision-date-localized-plugin>=1.2.0", - "mkdocs-minify-plugin>=0.8.0", - "structlog>=25.5.0", - "ty>=0.0.1a28", -] - -# Note: agent-framework-core is optional for magentic mode (multi-agent orchestration) -# Version pinned to 1.0.0b* to avoid breaking changes. CI skips tests via pytest.importorskip diff --git a/requirements.txt b/requirements.txt index cf1b66bb960423122615ba3340799ef6e9d92435..3dfd393b3eb853eff9cee0d4e8ad62f73ba27ff0 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,92 +1 @@ -########################## -# DO NOT USE THIS FILE -# FOR GRADIO DEMO ONLY -########################## - - -#Core dependencies for HuggingFace Spaces -pydantic>=2.7 -pydantic-settings>=2.2 -pydantic-ai>=0.0.16 - -# OPTIONAL AI Providers -openai>=1.0.0 -anthropic>=0.18.0 - -# HTTP & Parsing -httpx>=0.27 -aiohttp>=3.13.2 # Required for website crawling -beautifulsoup4>=4.12 -lxml>=6.0.2 # Required for BeautifulSoup lxml parser (faster than html.parser) -xmltodict>=0.13 - -# HuggingFace Hub -huggingface-hub>=0.20.0 - -# UI (Gradio with MCP server support) -gradio[mcp,oauth]>=6.0.0 - -# Utils -python-dotenv>=1.0 -tenacity>=8.2 -structlog>=24.1 -requests>=2.32.5 -limits>=3.0 # Rate limiting -pydantic-graph>=1.22.0 - -# Web search -ddgs>=9.9.2 # duckduckgo-search has been renamed to ddgs -fake-useragent==2.2.0 -socksio==1.0.0 -# LlamaIndex RAG -llama-index-llms-huggingface>=0.6.1 -llama-index-llms-huggingface-api>=0.6.1 -llama-index-vector-stores-chroma>=0.5.3 -llama-index>=0.14.8 - -# Audio/Image processing -gradio-client>=1.0.0 # For STT/OCR API calls -soundfile>=0.12.0 # For audio file I/O -pillow>=10.0.0 # For image processing - -# TTS dependencies (for Modal GPU TTS) -torch>=2.0.0 # Required by Kokoro TTS -transformers>=4.57.2 # Required by Kokoro TTS -modal>=0.63.0 # Required for TTS GPU execution -# Note: Kokoro is installed in Modal image from: git+https://github.com/hexgrad/kokoro.git - -# Embeddings & Vector Store -tokenizers>=0.22.0,<=0.23.0 -rpds-py>=0.29.0 # Python implementation of rpds (required by chromadb on Windows) -chromadb>=0.4.0 -sentence-transformers>=2.2.0 -numpy<2.0 # chromadb compatibility: uses np.float_ removed in NumPy 2.0 -neo4j>=6.0.3 - -### DOCUMENT STUFF - -cssselect2==0.8.0 -docopt==0.6.2 -fonttools==4.61.0 -markdown2==2.5.4 -md2pdf==1.0.1 -pydyf==0.11.0 -pyphen==0.17.2 -tinycss2==1.5.1 -tinyhtml5==2.0.0 -weasyprint==66.0 -webencodings==0.5.1 -zopfli==0.4.0 - -# Optional: Modal for code execution -modal>=0.63.0 - -# Pydantic AI with HuggingFace support -pydantic-ai-slim[huggingface]>=0.0.18 - -# Multi-agent orchestration (Advanced mode) -agent-framework-core>=1.0.0b251120,<2.0.0 - -# LlamaIndex RAG - OpenAI -llama-index-llms-openai>=0.6.9 -llama-index-embeddings-openai>=0.5.1 +deepcritical \ No newline at end of file diff --git a/site/404.html b/site/404.html deleted file mode 100644 index e27584cf925588e1877fa35980693a314efa832c..0000000000000000000000000000000000000000 --- a/site/404.html +++ /dev/null @@ -1 +0,0 @@ - The DETERMINATOR

404 - Not found

\ No newline at end of file diff --git a/site/api/agents/index.html b/site/api/agents/index.html deleted file mode 100644 index 47398e7e76b2e53e625f3576fa4685056935bbe8..0000000000000000000000000000000000000000 --- a/site/api/agents/index.html +++ /dev/null @@ -1 +0,0 @@ - Agents API Reference - The DETERMINATOR

Agents API Reference

This page documents the API for DeepCritical agents.

KnowledgeGapAgent

Module: src.agents.knowledge_gap

Purpose: Evaluates research state and identifies knowledge gaps.

Methods

evaluate

Evaluates research completeness and identifies outstanding knowledge gaps.

Parameters: - query: Research query string - background_context: Background context for the query (default: "") - conversation_history: History of actions, findings, and thoughts as string (default: "") - iteration: Current iteration number (default: 0) - time_elapsed_minutes: Elapsed time in minutes (default: 0.0) - max_time_minutes: Maximum time limit in minutes (default: 10)

Returns: KnowledgeGapOutput with: - research_complete: Boolean indicating if research is complete - outstanding_gaps: List of remaining knowledge gaps

ToolSelectorAgent

Module: src.agents.tool_selector

Purpose: Selects appropriate tools for addressing knowledge gaps.

Methods

select_tools

Selects tools for addressing a knowledge gap.

Parameters: - gap: The knowledge gap to address - query: Research query string - background_context: Optional background context (default: "") - conversation_history: History of actions, findings, and thoughts as string (default: "")

Returns: AgentSelectionPlan with list of AgentTask objects.

WriterAgent

Module: src.agents.writer

Purpose: Generates final reports from research findings.

Methods

write_report

Generates a markdown report from research findings.

Parameters: - query: Research query string - findings: Research findings to include in report - output_length: Optional description of desired output length (default: "") - output_instructions: Optional additional instructions for report generation (default: "")

Returns: Markdown string with numbered citations.

LongWriterAgent

Module: src.agents.long_writer

Purpose: Long-form report generation with section-by-section writing.

Methods

write_next_section

Writes the next section of a long-form report.

Parameters: - original_query: The original research query - report_draft: Current report draft as string (all sections written so far) - next_section_title: Title of the section to write - next_section_draft: Draft content for the next section

Returns: LongWriterOutput with formatted section and references.

write_report

Generates final report from draft.

Parameters: - query: Research query string - report_title: Title of the report - report_draft: Complete report draft

Returns: Final markdown report string.

ProofreaderAgent

Module: src.agents.proofreader

Purpose: Proofreads and polishes report drafts.

Methods

proofread

Proofreads and polishes a report draft.

Parameters: - query: Research query string - report_title: Title of the report - report_draft: Report draft to proofread

Returns: Polished markdown string.

ThinkingAgent

Module: src.agents.thinking

Purpose: Generates observations from conversation history.

Methods

generate_observations

Generates observations from conversation history.

Parameters: - query: Research query string - background_context: Optional background context (default: "") - conversation_history: History of actions, findings, and thoughts as string (default: "") - iteration: Current iteration number (default: 1)

Returns: Observation string.

InputParserAgent

Module: src.agents.input_parser

Purpose: Parses and improves user queries, detects research mode.

Methods

parse

Parses and improves a user query.

Parameters: - query: Original query string

Returns: ParsedQuery with: - original_query: Original query string - improved_query: Refined query string - research_mode: "iterative" or "deep" - key_entities: List of key entities - research_questions: List of research questions

Factory Functions

All agents have factory functions in src.agent_factory.agents:

Parameters: - model: Optional Pydantic AI model. If None, uses get_model() from settings. - oauth_token: Optional OAuth token from HuggingFace login (takes priority over env vars)

Returns: Agent instance.

See Also

\ No newline at end of file diff --git a/site/api/models/index.html b/site/api/models/index.html deleted file mode 100644 index 06a0e6dbadc4385037c571d9624f3d5766b1709f..0000000000000000000000000000000000000000 --- a/site/api/models/index.html +++ /dev/null @@ -1 +0,0 @@ - Models API Reference - The DETERMINATOR

Models API Reference

This page documents the Pydantic models used throughout DeepCritical.

Evidence

Module: src.utils.models

Purpose: Represents evidence from search results.

Fields: - citation: Citation information (title, URL, date, authors) - content: Evidence text content - relevance: Relevance score (0.0-1.0) - metadata: Additional metadata dictionary

Citation

Module: src.utils.models

Purpose: Citation information for evidence.

Fields: - source: Source name (e.g., "pubmed", "clinicaltrials", "europepmc", "web", "rag") - title: Article/trial title - url: Source URL - date: Publication date (YYYY-MM-DD or "Unknown") - authors: List of authors (optional)

KnowledgeGapOutput

Module: src.utils.models

Purpose: Output from knowledge gap evaluation.

Fields: - research_complete: Boolean indicating if research is complete - outstanding_gaps: List of remaining knowledge gaps

AgentSelectionPlan

Module: src.utils.models

Purpose: Plan for tool/agent selection.

Fields: - tasks: List of agent tasks to execute

AgentTask

Module: src.utils.models

Purpose: Individual agent task.

Fields: - gap: The knowledge gap being addressed (optional) - agent: Name of agent to use - query: The specific query for the agent - entity_website: The website of the entity being researched, if known (optional)

ReportDraft

Module: src.utils.models

Purpose: Draft structure for long-form reports.

Fields: - sections: List of report sections

ReportSection

Module: src.utils.models

Purpose: Individual section in a report draft.

Fields: - section_title: The title of the section - section_content: The content of the section

ParsedQuery

Module: src.utils.models

Purpose: Parsed and improved query.

Fields: - original_query: Original query string - improved_query: Refined query string - research_mode: Research mode ("iterative" or "deep") - key_entities: List of key entities - research_questions: List of research questions

Conversation

Module: src.utils.models

Purpose: Conversation history with iterations.

Fields: - history: List of iteration data

IterationData

Module: src.utils.models

Purpose: Data for a single iteration.

Fields: - gap: The gap addressed in the iteration - tool_calls: The tool calls made - findings: The findings collected from tool calls - thought: The thinking done to reflect on the success of the iteration and next steps

AgentEvent

Module: src.utils.models

Purpose: Event emitted during research execution.

Fields: - type: Event type (e.g., "started", "search_complete", "complete") - iteration: Iteration number (optional) - data: Event data dictionary

BudgetStatus

Module: src.utils.models

Purpose: Current budget status.

Fields: - tokens_used: Total tokens used - tokens_limit: Token budget limit - time_elapsed_seconds: Time elapsed in seconds - time_limit_seconds: Time budget limit (default: 600.0 seconds / 10 minutes) - iterations: Number of iterations completed - iterations_limit: Maximum iterations (default: 10) - iteration_tokens: Tokens used per iteration (iteration number -> token count)

See Also

\ No newline at end of file diff --git a/site/api/orchestrators/index.html b/site/api/orchestrators/index.html deleted file mode 100644 index ac74f5543f764e9d4dba1b4b6c1cb6bfd7d013e5..0000000000000000000000000000000000000000 --- a/site/api/orchestrators/index.html +++ /dev/null @@ -1 +0,0 @@ - Orchestrators API Reference - The DETERMINATOR

Orchestrators API Reference

This page documents the API for DeepCritical orchestrators.

IterativeResearchFlow

Module: src.orchestrator.research_flow

Purpose: Single-loop research with search-judge-synthesize cycles.

Methods

run

Runs iterative research flow.

Parameters: - query: Research query string - background_context: Background context (default: "") - output_length: Optional description of desired output length (default: "") - output_instructions: Optional additional instructions for report generation (default: "")

Returns: Final report string.

Note: max_iterations, max_time_minutes, and token_budget are constructor parameters, not run() parameters.

DeepResearchFlow

Module: src.orchestrator.research_flow

Purpose: Multi-section parallel research with planning and synthesis.

Methods

run

Runs deep research flow.

Parameters: - query: Research query string

Returns: Final report string.

Note: max_iterations_per_section, max_time_minutes, and token_budget are constructor parameters, not run() parameters.

GraphOrchestrator

Module: src.orchestrator.graph_orchestrator

Purpose: Graph-based execution using Pydantic AI agents as nodes.

Methods

run

Runs graph-based research orchestration.

Parameters: - query: Research query string

Yields: AgentEvent objects during graph execution.

Note: research_mode and use_graph are constructor parameters, not run() parameters.

Orchestrator Factory

Module: src.orchestrator_factory

Purpose: Factory for creating orchestrators.

Functions

create_orchestrator

Creates an orchestrator instance.

Parameters: - search_handler: Search handler protocol implementation (optional, required for simple mode) - judge_handler: Judge handler protocol implementation (optional, required for simple mode) - config: Configuration object (optional) - mode: Orchestrator mode ("simple", "advanced", "magentic", "iterative", "deep", "auto", or None for auto-detect) - oauth_token: Optional OAuth token from HuggingFace login (takes priority over env vars)

Returns: Orchestrator instance.

Raises: - ValueError: If requirements not met

Modes: - "simple": Legacy orchestrator - "advanced" or "magentic": Magentic orchestrator (requires OpenAI API key) - None: Auto-detect based on API key availability

MagenticOrchestrator

Module: src.orchestrator_magentic

Purpose: Multi-agent coordination using Microsoft Agent Framework.

Methods

run

Runs Magentic orchestration.

Parameters: - query: Research query string

Yields: AgentEvent objects converted from Magentic events.

Note: max_rounds and max_stalls are constructor parameters, not run() parameters.

Requirements: - agent-framework-core package - OpenAI API key

See Also

\ No newline at end of file diff --git a/site/api/services/index.html b/site/api/services/index.html deleted file mode 100644 index b9d99cd369ec61c46fd4414a7b88c680e0481759..0000000000000000000000000000000000000000 --- a/site/api/services/index.html +++ /dev/null @@ -1,49 +0,0 @@ - Services API Reference - The DETERMINATOR

Services API Reference

This page documents the API for DeepCritical services.

EmbeddingService

Module: src.services.embeddings

Purpose: Local sentence-transformers for semantic search and deduplication.

Methods

embed

Generates embedding for a text string.

Parameters: - text: Text to embed

Returns: Embedding vector as list of floats.

embed_batch

async def embed_batch(self, texts: list[str]) -> list[list[float]]
-

Generates embeddings for multiple texts.

Parameters: - texts: List of texts to embed

Returns: List of embedding vectors.

similarity

async def similarity(self, text1: str, text2: str) -> float
-

Calculates similarity between two texts.

Parameters: - text1: First text - text2: Second text

Returns: Similarity score (0.0-1.0).

find_duplicates

async def find_duplicates(
-    self,
-    texts: list[str],
-    threshold: float = 0.85
-) -> list[tuple[int, int]]
-

Finds duplicate texts based on similarity threshold.

Parameters: - texts: List of texts to check - threshold: Similarity threshold (default: 0.85)

Returns: List of (index1, index2) tuples for duplicate pairs.

add_evidence

async def add_evidence(
-    self,
-    evidence_id: str,
-    content: str,
-    metadata: dict[str, Any]
-) -> None
-

Adds evidence to vector store for semantic search.

Parameters: - evidence_id: Unique identifier for the evidence - content: Evidence text content - metadata: Additional metadata dictionary

search_similar

async def search_similar(
-    self,
-    query: str,
-    n_results: int = 5
-) -> list[dict[str, Any]]
-

Finds semantically similar evidence.

Parameters: - query: Search query string - n_results: Number of results to return (default: 5)

Returns: List of dictionaries with id, content, metadata, and distance keys.

deduplicate

async def deduplicate(
-    self,
-    new_evidence: list[Evidence],
-    threshold: float = 0.9
-) -> list[Evidence]
-

Removes semantically duplicate evidence.

Parameters: - new_evidence: List of evidence items to deduplicate - threshold: Similarity threshold (default: 0.9, where 0.9 = 90% similar is duplicate)

Returns: List of unique evidence items (not already in vector store).

Factory Function

get_embedding_service

@lru_cache(maxsize=1)
-def get_embedding_service() -> EmbeddingService
-

Returns singleton EmbeddingService instance.

LlamaIndexRAGService

Module: src.services.rag

Purpose: Retrieval-Augmented Generation using LlamaIndex.

Methods

ingest_evidence

Ingests evidence into RAG service.

Parameters: - evidence_list: List of Evidence objects to ingest

Note: Supports multiple embedding providers (OpenAI, local sentence-transformers, Hugging Face).

retrieve

def retrieve(
-    self,
-    query: str,
-    top_k: int | None = None
-) -> list[dict[str, Any]]
-

Retrieves relevant documents for a query.

Parameters: - query: Search query string - top_k: Number of top results to return (defaults to similarity_top_k from constructor)

Returns: List of dictionaries with text, score, and metadata keys.

query

def query(
-    self,
-    query_str: str,
-    top_k: int | None = None
-) -> str
-

Queries RAG service and returns synthesized response.

Parameters: - query_str: Query string - top_k: Number of results to use (defaults to similarity_top_k from constructor)

Returns: Synthesized response string.

Raises: - ConfigurationError: If no LLM API key is available for query synthesis

ingest_documents

def ingest_documents(self, documents: list[Any]) -> None
-

Ingests raw LlamaIndex Documents.

Parameters: - documents: List of LlamaIndex Document objects

clear_collection

def clear_collection(self) -> None
-

Clears all documents from the collection.

Factory Function

get_rag_service

def get_rag_service(
-    collection_name: str = "deepcritical_evidence",
-    oauth_token: str | None = None,
-    **kwargs: Any
-) -> LlamaIndexRAGService
-

Get or create a RAG service instance.

Parameters: - collection_name: Name of the ChromaDB collection (default: "deepcritical_evidence") - oauth_token: Optional OAuth token from HuggingFace login (takes priority over env vars) - **kwargs: Additional arguments for LlamaIndexRAGService (e.g., use_openai_embeddings=False)

Returns: Configured LlamaIndexRAGService instance.

Note: By default, uses local embeddings (sentence-transformers) which require no API keys.

StatisticalAnalyzer

Module: src.services.statistical_analyzer

Purpose: Secure execution of AI-generated statistical code.

Methods

analyze

async def analyze(
-    self,
-    query: str,
-    evidence: list[Evidence],
-    hypothesis: dict[str, Any] | None = None
-) -> AnalysisResult
-

Analyzes a research question using statistical methods.

Parameters: - query: The research question - evidence: List of Evidence objects to analyze - hypothesis: Optional hypothesis dict with drug, target, pathway, effect, confidence keys

Returns: AnalysisResult with: - verdict: SUPPORTED, REFUTED, or INCONCLUSIVE - confidence: Confidence in verdict (0.0-1.0) - statistical_evidence: Summary of statistical findings - code_generated: Python code that was executed - execution_output: Output from code execution - key_takeaways: Key takeaways from analysis - limitations: List of limitations

Note: Requires Modal credentials for sandbox execution.

See Also

\ No newline at end of file diff --git a/site/api/tools/index.html b/site/api/tools/index.html deleted file mode 100644 index d5798e8b58ccf5ee1834bcb8cca0aa319e6a1f1a..0000000000000000000000000000000000000000 --- a/site/api/tools/index.html +++ /dev/null @@ -1,51 +0,0 @@ - Tools API Reference - The DETERMINATOR

Tools API Reference

This page documents the API for DeepCritical search tools.

SearchTool Protocol

All tools implement the SearchTool protocol:

class SearchTool(Protocol):
-    @property
-    def name(self) -> str: ...
-    
-    async def search(
-        self, 
-        query: str, 
-        max_results: int = 10
-    ) -> list[Evidence]: ...
-

PubMedTool

Module: src.tools.pubmed

Purpose: Search peer-reviewed biomedical literature from PubMed.

Properties

name

@property
-def name(self) -> str
-

Returns tool name: "pubmed"

Methods

async def search(
-    self,
-    query: str,
-    max_results: int = 10
-) -> list[Evidence]
-

Searches PubMed for articles.

Parameters: - query: Search query string - max_results: Maximum number of results to return (default: 10)

Returns: List of Evidence objects with PubMed articles.

Raises: - SearchError: If search fails (timeout, HTTP error, XML parsing error) - RateLimitError: If rate limit is exceeded (429 status code)

Note: Uses NCBI E-utilities (ESearch → EFetch). Rate limit: 0.34s between requests. Handles single vs. multiple articles.

ClinicalTrialsTool

Module: src.tools.clinicaltrials

Purpose: Search ClinicalTrials.gov for interventional studies.

Properties

name

@property
-def name(self) -> str
-

Returns tool name: "clinicaltrials"

Methods

search

async def search(
-    self,
-    query: str,
-    max_results: int = 10
-) -> list[Evidence]
-

Searches ClinicalTrials.gov for trials.

Parameters: - query: Search query string - max_results: Maximum number of results to return (default: 10)

Returns: List of Evidence objects with clinical trials.

Note: Only returns interventional studies with status: COMPLETED, ACTIVE_NOT_RECRUITING, RECRUITING, ENROLLING_BY_INVITATION. Uses requests library (NOT httpx - WAF blocks httpx). Runs in thread pool for async compatibility.

Raises: - SearchError: If search fails (HTTP error, request exception)

EuropePMCTool

Module: src.tools.europepmc

Purpose: Search Europe PMC for preprints and peer-reviewed articles.

Properties

name

@property
-def name(self) -> str
-

Returns tool name: "europepmc"

Methods

search

async def search(
-    self,
-    query: str,
-    max_results: int = 10
-) -> list[Evidence]
-

Searches Europe PMC for articles and preprints.

Parameters: - query: Search query string - max_results: Maximum number of results to return (default: 10)

Returns: List of Evidence objects with articles/preprints.

Note: Includes both preprints (marked with [PREPRINT - Not peer-reviewed]) and peer-reviewed articles. Handles preprint markers. Builds URLs from DOI or PMID.

Raises: - SearchError: If search fails (HTTP error, connection error)

RAGTool

Module: src.tools.rag_tool

Purpose: Semantic search within collected evidence.

Initialization

def __init__(
-    self,
-    rag_service: LlamaIndexRAGService | None = None,
-    oauth_token: str | None = None
-) -> None
-

Parameters: - rag_service: Optional RAG service instance. If None, will be lazy-initialized. - oauth_token: Optional OAuth token from HuggingFace login (for RAG LLM)

Properties

name

@property
-def name(self) -> str
-

Returns tool name: "rag"

Methods

search

async def search(
-    self,
-    query: str,
-    max_results: int = 10
-) -> list[Evidence]
-

Searches collected evidence using semantic similarity.

Parameters: - query: Search query string - max_results: Maximum number of results to return (default: 10)

Returns: List of Evidence objects from collected evidence.

Raises: - ConfigurationError: If RAG service is unavailable

Note: Requires evidence to be ingested into RAG service first. Wraps LlamaIndexRAGService. Returns Evidence from RAG results.

SearchHandler

Module: src.tools.search_handler

Purpose: Orchestrates parallel searches across multiple tools.

Initialization

def __init__(
-    self,
-    tools: list[SearchTool],
-    timeout: float = 30.0,
-    include_rag: bool = False,
-    auto_ingest_to_rag: bool = True,
-    oauth_token: str | None = None
-) -> None
-

Parameters: - tools: List of search tools to use - timeout: Timeout for each search in seconds (default: 30.0) - include_rag: Whether to include RAG tool in searches (default: False) - auto_ingest_to_rag: Whether to automatically ingest results into RAG (default: True) - oauth_token: Optional OAuth token from HuggingFace login (for RAG LLM)

Methods

execute

Searches multiple tools in parallel.

Parameters: - query: Search query string - max_results_per_tool: Maximum results per tool (default: 10)

Returns: SearchResult with: - query: The search query - evidence: Aggregated list of evidence - sources_searched: List of source names searched - total_found: Total number of results - errors: List of error messages from failed tools

Raises: - SearchError: If search times out

Note: Uses asyncio.gather() for parallel execution. Handles tool failures gracefully (returns errors in SearchResult.errors). Automatically ingests evidence into RAG if enabled.

See Also

\ No newline at end of file diff --git a/site/architecture/agents/index.html b/site/architecture/agents/index.html deleted file mode 100644 index 00b7543c6b33ccde185a2eb1af4da8d64d41f12d..0000000000000000000000000000000000000000 --- a/site/architecture/agents/index.html +++ /dev/null @@ -1 +0,0 @@ - Agents - The DETERMINATOR

Agents Architecture

DeepCritical uses Pydantic AI agents for all AI-powered operations. All agents follow a consistent pattern and use structured output types.

Agent Pattern

Pydantic AI Agents

Pydantic AI agents use the Agent class with the following structure:

  • System Prompt: Module-level constant with date injection
  • Agent Class: __init__(model: Any | None = None)
  • Main Method: Async method (e.g., async def evaluate(), async def write_report())
  • Factory Function: def create_agent_name(model: Any | None = None, oauth_token: str | None = None) -> AgentName

Note: Factory functions accept an optional oauth_token parameter for HuggingFace authentication, which takes priority over environment variables.

Model Initialization

Agents use get_model() from src/agent_factory/judges.py if no model is provided. This supports:

  • OpenAI models
  • Anthropic models
  • HuggingFace Inference API models

The model selection is based on the configured LLM_PROVIDER in settings.

Error Handling

Agents return fallback values on failure rather than raising exceptions:

  • KnowledgeGapOutput(research_complete=False, outstanding_gaps=[...])
  • Empty strings for text outputs
  • Default structured outputs

All errors are logged with context using structlog.

Input Validation

All agents validate inputs:

  • Check that queries/inputs are not empty
  • Truncate very long inputs with warnings
  • Handle None values gracefully

Output Types

Agents use structured output types from src/utils/models.py:

  • KnowledgeGapOutput: Research completeness evaluation
  • AgentSelectionPlan: Tool selection plan
  • ReportDraft: Long-form report structure
  • ParsedQuery: Query parsing and mode detection

For text output (writer agents), agents return str directly.

Agent Types

Knowledge Gap Agent

File: src/agents/knowledge_gap.py

Purpose: Evaluates research state and identifies knowledge gaps.

Output: KnowledgeGapOutput with: - research_complete: Boolean indicating if research is complete - outstanding_gaps: List of remaining knowledge gaps

Methods: - async def evaluate(query, background_context, conversation_history, iteration, time_elapsed_minutes, max_time_minutes) -> KnowledgeGapOutput

Tool Selector Agent

File: src/agents/tool_selector.py

Purpose: Selects appropriate tools for addressing knowledge gaps.

Output: AgentSelectionPlan with list of AgentTask objects.

Available Agents: - WebSearchAgent: General web search for fresh information - SiteCrawlerAgent: Research specific entities/companies - RAGAgent: Semantic search within collected evidence

Writer Agent

File: src/agents/writer.py

Purpose: Generates final reports from research findings.

Output: Markdown string with numbered citations.

Methods: - async def write_report(query, findings, output_length, output_instructions) -> str

Features: - Validates inputs - Truncates very long findings (max 50000 chars) with warning - Retry logic for transient failures (3 retries) - Citation validation before returning

Long Writer Agent

File: src/agents/long_writer.py

Purpose: Long-form report generation with section-by-section writing.

Input/Output: Uses ReportDraft models.

Methods: - async def write_next_section(query, draft, section_title, section_content) -> LongWriterOutput - async def write_report(query, report_title, report_draft) -> str

Features: - Writes sections iteratively - Aggregates references across sections - Reformats section headings and references - Deduplicates and renumbers references

Proofreader Agent

File: src/agents/proofreader.py

Purpose: Proofreads and polishes report drafts.

Input: ReportDraft Output: Polished markdown string

Methods: - async def proofread(query, report_title, report_draft) -> str

Features: - Removes duplicate content across sections - Adds executive summary if multiple sections - Preserves all references and citations - Improves flow and readability

Thinking Agent

File: src/agents/thinking.py

Purpose: Generates observations from conversation history.

Output: Observation string

Methods: - async def generate_observations(query, background_context, conversation_history) -> str

Input Parser Agent

File: src/agents/input_parser.py

Purpose: Parses and improves user queries, detects research mode.

Output: ParsedQuery with: - original_query: Original query string - improved_query: Refined query string - research_mode: "iterative" or "deep" - key_entities: List of key entities - research_questions: List of research questions

Magentic Agents

The following agents use the BaseAgent pattern from agent-framework and are used exclusively with MagenticOrchestrator:

Hypothesis Agent

File: src/agents/hypothesis_agent.py

Purpose: Generates mechanistic hypotheses based on evidence.

Pattern: BaseAgent from agent-framework

Methods: - async def run(messages, thread, **kwargs) -> AgentRunResponse

Features: - Uses internal Pydantic AI Agent with HypothesisAssessment output type - Accesses shared evidence_store for evidence - Uses embedding service for diverse evidence selection (MMR algorithm) - Stores hypotheses in shared context

Search Agent

File: src/agents/search_agent.py

Purpose: Wraps SearchHandler as an agent for Magentic orchestrator.

Pattern: BaseAgent from agent-framework

Methods: - async def run(messages, thread, **kwargs) -> AgentRunResponse

Features: - Executes searches via SearchHandlerProtocol - Deduplicates evidence using embedding service - Searches for semantically related evidence - Updates shared evidence store

Analysis Agent

File: src/agents/analysis_agent.py

Purpose: Performs statistical analysis using Modal sandbox.

Pattern: BaseAgent from agent-framework

Methods: - async def run(messages, thread, **kwargs) -> AgentRunResponse

Features: - Wraps StatisticalAnalyzer service - Analyzes evidence and hypotheses - Returns verdict (SUPPORTED/REFUTED/INCONCLUSIVE) - Stores analysis results in shared context

Report Agent (Magentic)

File: src/agents/report_agent.py

Purpose: Generates structured scientific reports from evidence and hypotheses.

Pattern: BaseAgent from agent-framework

Methods: - async def run(messages, thread, **kwargs) -> AgentRunResponse

Features: - Uses internal Pydantic AI Agent with ResearchReport output type - Accesses shared evidence store and hypotheses - Validates citations before returning - Formats report as markdown

Judge Agent

File: src/agents/judge_agent.py

Purpose: Evaluates evidence quality and determines if sufficient for synthesis.

Pattern: BaseAgent from agent-framework

Methods: - async def run(messages, thread, **kwargs) -> AgentRunResponse - async def run_stream(messages, thread, **kwargs) -> AsyncIterable[AgentRunResponseUpdate]

Features: - Wraps JudgeHandlerProtocol - Accesses shared evidence store - Returns JudgeAssessment with sufficient flag, confidence, and recommendation

Agent Patterns

DeepCritical uses two distinct agent patterns:

1. Pydantic AI Agents (Traditional Pattern)

These agents use the Pydantic AI Agent class directly and are used in iterative and deep research flows:

  • Pattern: Agent(model, output_type, system_prompt)
  • Initialization: __init__(model: Any | None = None)
  • Methods: Agent-specific async methods (e.g., async def evaluate(), async def write_report())
  • Examples: KnowledgeGapAgent, ToolSelectorAgent, WriterAgent, LongWriterAgent, ProofreaderAgent, ThinkingAgent, InputParserAgent

2. Magentic Agents (Agent-Framework Pattern)

These agents use the BaseAgent class from agent-framework and are used in Magentic orchestrator:

  • Pattern: BaseAgent from agent-framework with async def run() method
  • Initialization: __init__(evidence_store, embedding_service, ...)
  • Methods: async def run(messages, thread, **kwargs) -> AgentRunResponse
  • Examples: HypothesisAgent, SearchAgent, AnalysisAgent, ReportAgent, JudgeAgent

Note: Magentic agents are used exclusively with the MagenticOrchestrator and follow the agent-framework protocol for multi-agent coordination.

Factory Functions

All agents have factory functions in src/agent_factory/agents.py:

Factory functions: - Use get_model() if no model provided - Accept oauth_token parameter for HuggingFace authentication - Raise ConfigurationError if creation fails - Log agent creation

See Also

\ No newline at end of file diff --git a/site/architecture/graph_orchestration/index.html b/site/architecture/graph_orchestration/index.html deleted file mode 100644 index 6f35dca9170f12c7f670b5de3ef547337f1ead61..0000000000000000000000000000000000000000 --- a/site/architecture/graph_orchestration/index.html +++ /dev/null @@ -1,75 +0,0 @@ - Graph Orchestration - The DETERMINATOR

Graph Orchestration Architecture

Overview

DeepCritical implements a graph-based orchestration system for research workflows using Pydantic AI agents as nodes. This enables better parallel execution, conditional routing, and state management compared to simple agent chains.

Graph Patterns

Iterative Research Graph

The iterative research graph follows this pattern:

[Input] → [Thinking] → [Knowledge Gap] → [Decision: Complete?]
-                                              ↓ No          ↓ Yes
-                                    [Tool Selector]    [Writer]
-                                              ↓
-                                    [Execute Tools] → [Loop Back]
-

Node IDs: thinkingknowledge_gapcontinue_decisiontool_selector/writerexecute_tools → (loop back to thinking)

Special Node Handling: - execute_tools: State node that uses search_handler to execute searches and add evidence to workflow state - continue_decision: Decision node that routes based on research_complete flag from KnowledgeGapOutput

Deep Research Graph

The deep research graph follows this pattern:

[Input] → [Planner] → [Store Plan] → [Parallel Loops] → [Collect Drafts] → [Synthesizer]
-                                        ↓         ↓         ↓
-                                     [Loop1]  [Loop2]  [Loop3]
-

Node IDs: plannerstore_planparallel_loopscollect_draftssynthesizer

Special Node Handling: - planner: Agent node that creates ReportPlan with report outline - store_plan: State node that stores ReportPlan in context for parallel loops - parallel_loops: Parallel node that executes IterativeResearchFlow instances for each section - collect_drafts: State node that collects section drafts from parallel loops - synthesizer: Agent node that calls LongWriterAgent.write_report() directly with ReportDraft

Deep Research


-sequenceDiagram
-    actor User
-    participant GraphOrchestrator
-    participant InputParser
-    participant GraphBuilder
-    participant GraphExecutor
-    participant Agent
-    participant BudgetTracker
-    participant WorkflowState
-
-    User->>GraphOrchestrator: run(query)
-    GraphOrchestrator->>InputParser: detect_research_mode(query)
-    InputParser-->>GraphOrchestrator: mode (iterative/deep)
-    GraphOrchestrator->>GraphBuilder: build_graph(mode)
-    GraphBuilder-->>GraphOrchestrator: ResearchGraph
-    GraphOrchestrator->>WorkflowState: init_workflow_state()
-    GraphOrchestrator->>BudgetTracker: create_budget()
-    GraphOrchestrator->>GraphExecutor: _execute_graph(graph)
-    
-    loop For each node in graph
-        GraphExecutor->>Agent: execute_node(agent_node)
-        Agent->>Agent: process_input
-        Agent-->>GraphExecutor: result
-        GraphExecutor->>WorkflowState: update_state(result)
-        GraphExecutor->>BudgetTracker: add_tokens(used)
-        GraphExecutor->>BudgetTracker: check_budget()
-        alt Budget exceeded
-            GraphExecutor->>GraphOrchestrator: emit(error_event)
-        else Continue
-            GraphExecutor->>GraphOrchestrator: emit(progress_event)
-        end
-    end
-    
-    GraphOrchestrator->>User: AsyncGenerator[AgentEvent]
-

Iterative Research

sequenceDiagram
-    participant IterativeFlow
-    participant ThinkingAgent
-    participant KnowledgeGapAgent
-    participant ToolSelector
-    participant ToolExecutor
-    participant JudgeHandler
-    participant WriterAgent
-
-    IterativeFlow->>IterativeFlow: run(query)
-    
-    loop Until complete or max_iterations
-        IterativeFlow->>ThinkingAgent: generate_observations()
-        ThinkingAgent-->>IterativeFlow: observations
-        
-        IterativeFlow->>KnowledgeGapAgent: evaluate_gaps()
-        KnowledgeGapAgent-->>IterativeFlow: KnowledgeGapOutput
-        
-        alt Research complete
-            IterativeFlow->>WriterAgent: create_final_report()
-            WriterAgent-->>IterativeFlow: final_report
-        else Gaps remain
-            IterativeFlow->>ToolSelector: select_agents(gap)
-            ToolSelector-->>IterativeFlow: AgentSelectionPlan
-            
-            IterativeFlow->>ToolExecutor: execute_tool_tasks()
-            ToolExecutor-->>IterativeFlow: ToolAgentOutput[]
-            
-            IterativeFlow->>JudgeHandler: assess_evidence()
-            JudgeHandler-->>IterativeFlow: should_continue
-        end
-    end

Graph Structure

Nodes

Graph nodes represent different stages in the research workflow:

  1. Agent Nodes: Execute Pydantic AI agents
  2. Input: Prompt/query
  3. Output: Structured or unstructured response
  4. Examples: KnowledgeGapAgent, ToolSelectorAgent, ThinkingAgent

  5. State Nodes: Update or read workflow state

  6. Input: Current state
  7. Output: Updated state
  8. Examples: Update evidence, update conversation history

  9. Decision Nodes: Make routing decisions based on conditions

  10. Input: Current state/results
  11. Output: Next node ID
  12. Examples: Continue research vs. complete research

  13. Parallel Nodes: Execute multiple nodes concurrently

  14. Input: List of node IDs
  15. Output: Aggregated results
  16. Examples: Parallel iterative research loops

Edges

Edges define transitions between nodes:

  1. Sequential Edges: Always traversed (no condition)
  2. From: Source node
  3. To: Target node
  4. Condition: None (always True)

  5. Conditional Edges: Traversed based on condition

  6. From: Source node
  7. To: Target node
  8. Condition: Callable that returns bool
  9. Example: If research complete → go to writer, else → continue loop

  10. Parallel Edges: Used for parallel execution branches

  11. From: Parallel node
  12. To: Multiple target nodes
  13. Execution: All targets run concurrently

State Management

State is managed via WorkflowState using ContextVar for thread-safe isolation:

  • Evidence: Collected evidence from searches
  • Conversation: Iteration history (gaps, tool calls, findings, thoughts)
  • Embedding Service: For semantic search

State transitions occur at state nodes, which update the global workflow state.

Execution Flow

  1. Graph Construction: Build graph from nodes and edges using create_iterative_graph() or create_deep_graph()
  2. Graph Validation: Ensure graph is valid (no cycles, all nodes reachable) via ResearchGraph.validate_structure()
  3. Graph Execution: Traverse graph from entry node using GraphOrchestrator._execute_graph()
  4. Node Execution: Execute each node based on type:
  5. Agent Nodes: Call agent.run() with transformed input
  6. State Nodes: Update workflow state via state_updater function
  7. Decision Nodes: Evaluate decision_function to get next node ID
  8. Parallel Nodes: Execute all parallel nodes concurrently via asyncio.gather()
  9. Edge Evaluation: Determine next node(s) based on edges and conditions
  10. Parallel Execution: Use asyncio.gather() for parallel nodes
  11. State Updates: Update state at state nodes via GraphExecutionContext.update_state()
  12. Event Streaming: Yield AgentEvent objects during execution for UI

GraphExecutionContext

The GraphExecutionContext class manages execution state during graph traversal:

  • State: Current WorkflowState instance
  • Budget Tracker: BudgetTracker instance for budget enforcement
  • Node Results: Dictionary storing results from each node execution
  • Visited Nodes: Set of node IDs that have been executed
  • Current Node: ID of the node currently being executed

Methods: - set_node_result(node_id, result): Store result from node execution - get_node_result(node_id): Retrieve stored result - has_visited(node_id): Check if node was visited - mark_visited(node_id): Mark node as visited - update_state(updater, data): Update workflow state

Conditional Routing

Decision nodes evaluate conditions and return next node IDs:

  • Knowledge Gap Decision: If research_complete → writer, else → tool selector
  • Budget Decision: If budget exceeded → exit, else → continue
  • Iteration Decision: If max iterations → exit, else → continue

Parallel Execution

Parallel nodes execute multiple nodes concurrently:

  • Each parallel branch runs independently
  • Results are aggregated after all branches complete
  • State is synchronized after parallel execution
  • Errors in one branch don't stop other branches

Budget Enforcement

Budget constraints are enforced at decision nodes:

  • Token Budget: Track LLM token usage
  • Time Budget: Track elapsed time
  • Iteration Budget: Track iteration count

If any budget is exceeded, execution routes to exit node.

Error Handling

Errors are handled at multiple levels:

  1. Node Level: Catch errors in individual node execution
  2. Graph Level: Handle errors during graph traversal
  3. State Level: Rollback state changes on error

Errors are logged and yield error events for UI.

Backward Compatibility

Graph execution is optional via feature flag:

  • USE_GRAPH_EXECUTION=true: Use graph-based execution
  • USE_GRAPH_EXECUTION=false: Use agent chain execution (existing)

This allows gradual migration and fallback if needed.

See Also

\ No newline at end of file diff --git a/site/architecture/middleware/index.html b/site/architecture/middleware/index.html deleted file mode 100644 index 54435ced90617eb287b0b1dd8ef87da616890ec8..0000000000000000000000000000000000000000 --- a/site/architecture/middleware/index.html +++ /dev/null @@ -1,40 +0,0 @@ - Middleware - The DETERMINATOR

Middleware Architecture

DeepCritical uses middleware for state management, budget tracking, and workflow coordination.

State Management

WorkflowState

File: src/middleware/state_machine.py

Purpose: Thread-safe state management for research workflows

Implementation: Uses ContextVar for thread-safe isolation

State Components: - evidence: list[Evidence]: Collected evidence from searches - conversation: Conversation: Iteration history (gaps, tool calls, findings, thoughts) - embedding_service: Any: Embedding service for semantic search

Methods: - add_evidence(new_evidence: list[Evidence]) -> int: Adds evidence with URL-based deduplication. Returns the number of new items added (excluding duplicates). - async search_related(query: str, n_results: int = 5) -> list[Evidence]: Semantic search for related evidence using embedding service

Initialization:

Access:

Workflow Manager

File: src/middleware/workflow_manager.py

Purpose: Coordinates parallel research loops

Methods: - async add_loop(loop_id: str, query: str) -> ResearchLoop: Add a new research loop to manage - async run_loops_parallel(loop_configs: list[dict], loop_func: Callable, judge_handler: Any | None = None, budget_tracker: Any | None = None) -> list[Any]: Run multiple research loops in parallel. Takes configuration dicts and a loop function. - async update_loop_status(loop_id: str, status: LoopStatus, error: str | None = None): Update loop status - async sync_loop_evidence_to_state(loop_id: str): Synchronize evidence from a specific loop to global state

Features: - Uses asyncio.gather() for parallel execution - Handles errors per loop (doesn't fail all if one fails) - Tracks loop status: pending, running, completed, failed, cancelled - Evidence deduplication across parallel loops

Usage:

from src.middleware.workflow_manager import WorkflowManager
-
-manager = WorkflowManager()
-await manager.add_loop("loop1", "Research query 1")
-await manager.add_loop("loop2", "Research query 2")
-
-async def run_research(config: dict) -> str:
-    loop_id = config["loop_id"]
-    query = config["query"]
-    # ... research logic ...
-    return "report"
-
-results = await manager.run_loops_parallel(
-    loop_configs=[
-        {"loop_id": "loop1", "query": "Research query 1"},
-        {"loop_id": "loop2", "query": "Research query 2"},
-    ],
-    loop_func=run_research,
-)
-

Budget Tracker

File: src/middleware/budget_tracker.py

Purpose: Tracks and enforces resource limits

Budget Components: - Tokens: LLM token usage - Time: Elapsed time in seconds - Iterations: Number of iterations

Methods: - create_budget(loop_id: str, tokens_limit: int = 100000, time_limit_seconds: float = 600.0, iterations_limit: int = 10) -> BudgetStatus: Create a budget for a specific loop - add_tokens(loop_id: str, tokens: int): Add token usage to a loop's budget - start_timer(loop_id: str): Start time tracking for a loop - update_timer(loop_id: str): Update elapsed time for a loop - increment_iteration(loop_id: str): Increment iteration count for a loop - check_budget(loop_id: str) -> tuple[bool, str]: Check if a loop's budget has been exceeded. Returns (exceeded: bool, reason: str) - can_continue(loop_id: str) -> bool: Check if a loop can continue based on budget

Token Estimation: - estimate_tokens(text: str) -> int: ~4 chars per token - estimate_llm_call_tokens(prompt: str, response: str) -> int: Estimate LLM call tokens

Usage:

from src.middleware.budget_tracker import BudgetTracker
-
-tracker = BudgetTracker()
-budget = tracker.create_budget(
-    loop_id="research_loop",
-    tokens_limit=100000,
-    time_limit_seconds=600,
-    iterations_limit=10
-)
-tracker.start_timer("research_loop")
-# ... research operations ...
-tracker.add_tokens("research_loop", 5000)
-tracker.update_timer("research_loop")
-exceeded, reason = tracker.check_budget("research_loop")
-if exceeded:
-    # Budget exceeded, stop research
-    pass
-if not tracker.can_continue("research_loop"):
-    # Budget exceeded, stop research
-    pass
-

Models

All middleware models are defined in src/utils/models.py:

  • IterationData: Data for a single iteration
  • Conversation: Conversation history with iterations
  • ResearchLoop: Research loop state and configuration
  • BudgetStatus: Current budget status

Thread Safety

All middleware components use ContextVar for thread-safe isolation:

  • Each request/thread has its own workflow state
  • No global mutable state
  • Safe for concurrent requests

See Also

\ No newline at end of file diff --git a/site/architecture/orchestrators/index.html b/site/architecture/orchestrators/index.html deleted file mode 100644 index fcd6738c81d1096bf2112ab7f4f30c80b18b00e4..0000000000000000000000000000000000000000 --- a/site/architecture/orchestrators/index.html +++ /dev/null @@ -1 +0,0 @@ - Orchestrators - The DETERMINATOR

Orchestrators Architecture

DeepCritical supports multiple orchestration patterns for research workflows.

Research Flows

IterativeResearchFlow

File: src/orchestrator/research_flow.py

Pattern: Generate observations → Evaluate gaps → Select tools → Execute → Judge → Continue/Complete

Agents Used: - KnowledgeGapAgent: Evaluates research completeness - ToolSelectorAgent: Selects tools for addressing gaps - ThinkingAgent: Generates observations - WriterAgent: Creates final report - JudgeHandler: Assesses evidence sufficiency

Features: - Tracks iterations, time, budget - Supports graph execution (use_graph=True) and agent chains (use_graph=False) - Iterates until research complete or constraints met

Usage:

DeepResearchFlow

File: src/orchestrator/research_flow.py

Pattern: Planner → Parallel iterative loops per section → Synthesizer

Agents Used: - PlannerAgent: Breaks query into report sections - IterativeResearchFlow: Per-section research (parallel) - LongWriterAgent or ProofreaderAgent: Final synthesis

Features: - Uses WorkflowManager for parallel execution - Budget tracking per section and globally - State synchronization across parallel loops - Supports graph execution and agent chains

Usage:

Graph Orchestrator

File: src/orchestrator/graph_orchestrator.py

Purpose: Graph-based execution using Pydantic AI agents as nodes

Features: - Uses graph execution (use_graph=True) or agent chains (use_graph=False) as fallback - Routes based on research mode (iterative/deep/auto) - Streams AgentEvent objects for UI - Uses GraphExecutionContext to manage execution state

Node Types: - Agent Nodes: Execute Pydantic AI agents - State Nodes: Update or read workflow state - Decision Nodes: Make routing decisions - Parallel Nodes: Execute multiple nodes concurrently

Edge Types: - Sequential Edges: Always traversed - Conditional Edges: Traversed based on condition - Parallel Edges: Used for parallel execution branches

Special Node Handling:

The GraphOrchestrator has special handling for certain nodes:

  • execute_tools node: State node that uses search_handler to execute searches and add evidence to workflow state
  • parallel_loops node: Parallel node that executes IterativeResearchFlow instances for each section in deep research mode
  • synthesizer node: Agent node that calls LongWriterAgent.write_report() directly with ReportDraft instead of using agent.run()
  • writer node: Agent node that calls WriterAgent.write_report() directly with findings instead of using agent.run()

GraphExecutionContext:

The orchestrator uses GraphExecutionContext to manage execution state: - Tracks current node, visited nodes, and node results - Manages workflow state and budget tracker - Provides methods to store and retrieve node execution results

Orchestrator Factory

File: src/orchestrator_factory.py

Purpose: Factory for creating orchestrators

Modes: - Simple: Legacy orchestrator (backward compatible) - Advanced: Magentic orchestrator (requires OpenAI API key) - Auto-detect: Chooses based on API key availability

Usage:

Magentic Orchestrator

File: src/orchestrator_magentic.py

Purpose: Multi-agent coordination using Microsoft Agent Framework

Features: - Uses agent-framework-core - ChatAgent pattern with internal LLMs per agent - MagenticBuilder with participants: - searcher: SearchAgent (wraps SearchHandler) - hypothesizer: HypothesisAgent (generates hypotheses) - judge: JudgeAgent (evaluates evidence) - reporter: ReportAgent (generates final report) - Manager orchestrates agents via chat client (OpenAI or HuggingFace) - Event-driven: converts Magentic events to AgentEvent for UI streaming via _process_event() method - Supports max rounds, stall detection, and reset handling

Event Processing:

The orchestrator processes Magentic events and converts them to AgentEvent: - MagenticOrchestratorMessageEventAgentEvent with type based on message content - MagenticAgentMessageEventAgentEvent with type based on agent name - MagenticAgentDeltaEventAgentEvent for streaming updates - MagenticFinalResultEventAgentEvent with type "complete"

Requirements: - agent-framework-core package - OpenAI API key or HuggingFace authentication

Hierarchical Orchestrator

File: src/orchestrator_hierarchical.py

Purpose: Hierarchical orchestrator using middleware and sub-teams

Features: - Uses SubIterationMiddleware with ResearchTeam and LLMSubIterationJudge - Adapts Magentic ChatAgent to SubIterationTeam protocol - Event-driven via asyncio.Queue for coordination - Supports sub-iteration patterns for complex research tasks

Legacy Simple Mode

File: src/legacy_orchestrator.py

Purpose: Linear search-judge-synthesize loop

Features: - Uses SearchHandlerProtocol and JudgeHandlerProtocol - Generator-based design yielding AgentEvent objects - Backward compatibility for simple use cases

State Initialization

All orchestrators must initialize workflow state:

Event Streaming

All orchestrators yield AgentEvent objects:

Event Types: - started: Research started - searching: Search in progress - search_complete: Search completed - judging: Evidence evaluation in progress - judge_complete: Evidence evaluation completed - looping: Iteration in progress - hypothesizing: Generating hypotheses - analyzing: Statistical analysis in progress - analysis_complete: Statistical analysis completed - synthesizing: Synthesizing results - complete: Research completed - error: Error occurred - streaming: Streaming update (delta events)

Event Structure:

See Also

\ No newline at end of file diff --git a/site/architecture/services/index.html b/site/architecture/services/index.html deleted file mode 100644 index 100a2966b8d7b3403b5cf31d9d1a7b1402f52cf1..0000000000000000000000000000000000000000 --- a/site/architecture/services/index.html +++ /dev/null @@ -1,30 +0,0 @@ - Services - The DETERMINATOR

Services Architecture

DeepCritical provides several services for embeddings, RAG, and statistical analysis.

Embedding Service

File: src/services/embeddings.py

Purpose: Local sentence-transformers for semantic search and deduplication

Features: - No API Key Required: Uses local sentence-transformers models - Async-Safe: All operations use run_in_executor() to avoid blocking the event loop - ChromaDB Storage: In-memory vector storage for embeddings - Deduplication: 0.9 similarity threshold by default (90% similarity = duplicate, configurable)

Model: Configurable via settings.local_embedding_model (default: all-MiniLM-L6-v2)

Methods: - async def embed(text: str) -> list[float]: Generate embeddings (async-safe via run_in_executor()) - async def embed_batch(texts: list[str]) -> list[list[float]]: Batch embedding (more efficient) - async def add_evidence(evidence_id: str, content: str, metadata: dict[str, Any]) -> None: Add evidence to vector store - async def search_similar(query: str, n_results: int = 5) -> list[dict[str, Any]]: Find semantically similar evidence - async def deduplicate(new_evidence: list[Evidence], threshold: float = 0.9) -> list[Evidence]: Remove semantically duplicate evidence

Usage:

from src.services.embeddings import get_embedding_service
-
-service = get_embedding_service()
-embedding = await service.embed("text to embed")
-

LlamaIndex RAG Service

File: src/services/llamaindex_rag.py

Purpose: Retrieval-Augmented Generation using LlamaIndex

Features: - Multiple Embedding Providers: OpenAI embeddings (requires OPENAI_API_KEY) or local sentence-transformers (no API key) - Multiple LLM Providers: HuggingFace LLM (preferred) or OpenAI LLM (fallback) for query synthesis - ChromaDB Storage: Vector database for document storage (supports in-memory mode) - Metadata Preservation: Preserves source, title, URL, date, authors - Lazy Initialization: Graceful fallback if dependencies not available

Initialization Parameters: - use_openai_embeddings: bool | None: Force OpenAI embeddings (None = auto-detect) - use_in_memory: bool: Use in-memory ChromaDB client (useful for tests) - oauth_token: str | None: Optional OAuth token from HuggingFace login (takes priority over env vars)

Methods: - async def ingest_evidence(evidence: list[Evidence]) -> None: Ingest evidence into RAG - async def retrieve(query: str, top_k: int = 5) -> list[Document]: Retrieve relevant documents - async def query(query: str, top_k: int = 5) -> str: Query with RAG

Usage:

from src.services.llamaindex_rag import get_rag_service
-
-service = get_rag_service(
-    use_openai_embeddings=False,  # Use local embeddings
-    use_in_memory=True,  # Use in-memory ChromaDB
-    oauth_token=token  # Optional HuggingFace token
-)
-if service:
-    documents = await service.retrieve("query", top_k=5)
-

Statistical Analyzer

File: src/services/statistical_analyzer.py

Purpose: Secure execution of AI-generated statistical code

Features: - Modal Sandbox: Secure, isolated execution environment - Code Generation: Generates Python code via LLM - Library Pinning: Version-pinned libraries in SANDBOX_LIBRARIES - Network Isolation: block_network=True by default

Libraries Available: - pandas, numpy, scipy - matplotlib, scikit-learn - statsmodels

Output: AnalysisResult with: - verdict: SUPPORTED, REFUTED, or INCONCLUSIVE - code: Generated analysis code - output: Execution output - error: Error message if execution failed

Usage:

from src.services.statistical_analyzer import StatisticalAnalyzer
-
-analyzer = StatisticalAnalyzer()
-result = await analyzer.analyze(
-    hypothesis="Metformin reduces cancer risk",
-    evidence=evidence_list
-)
-

Singleton Pattern

Services use singleton patterns for lazy initialization:

EmbeddingService: Uses a global variable pattern:

LlamaIndexRAGService: Direct instantiation (no caching):

This ensures: - Single instance per process - Lazy initialization - No dependencies required at import time

Service Availability

Services check availability before use:

from src.utils.config import settings
-
-if settings.modal_available:
-    # Use Modal sandbox
-    pass
-
-if settings.has_openai_key:
-    # Use OpenAI embeddings for RAG
-    pass
-

See Also

\ No newline at end of file diff --git a/site/architecture/tools/index.html b/site/architecture/tools/index.html deleted file mode 100644 index 0efb987e90752c5f16b1ebc32a23cee8cc3add39..0000000000000000000000000000000000000000 --- a/site/architecture/tools/index.html +++ /dev/null @@ -1,19 +0,0 @@ - Tools - The DETERMINATOR

Tools Architecture

DeepCritical implements a protocol-based search tool system for retrieving evidence from multiple sources.

SearchTool Protocol

All tools implement the SearchTool protocol from src/tools/base.py:

Rate Limiting

All tools use the @retry decorator from tenacity:

Tools with API rate limits implement _rate_limit() method and use shared rate limiters from src/tools/rate_limiter.py.

Error Handling

Tools raise custom exceptions:

  • SearchError: General search failures
  • RateLimitError: Rate limit exceeded

Tools handle HTTP errors (429, 500, timeout) and return empty lists on non-critical errors (with warning logs).

Query Preprocessing

Tools use preprocess_query() from src/tools/query_utils.py to:

  • Remove noise from queries
  • Expand synonyms
  • Normalize query format

Evidence Conversion

All tools convert API responses to Evidence objects with:

  • Citation: Title, URL, date, authors
  • content: Evidence text
  • relevance_score: 0.0-1.0 relevance score
  • metadata: Additional metadata

Missing fields are handled gracefully with defaults.

Tool Implementations

PubMed Tool

File: src/tools/pubmed.py

API: NCBI E-utilities (ESearch → EFetch)

Rate Limiting: - 0.34s between requests (3 req/sec without API key) - 0.1s between requests (10 req/sec with NCBI API key)

Features: - XML parsing with xmltodict - Handles single vs. multiple articles - Query preprocessing - Evidence conversion with metadata extraction

ClinicalTrials Tool

File: src/tools/clinicaltrials.py

API: ClinicalTrials.gov API v2

Important: Uses requests library (NOT httpx) because WAF blocks httpx TLS fingerprint.

Execution: Runs in thread pool: await asyncio.to_thread(requests.get, ...)

Filtering: - Only interventional studies - Status: COMPLETED, ACTIVE_NOT_RECRUITING, RECRUITING, ENROLLING_BY_INVITATION

Features: - Parses nested JSON structure - Extracts trial metadata - Evidence conversion

Europe PMC Tool

File: src/tools/europepmc.py

API: Europe PMC REST API

Features: - Handles preprint markers: [PREPRINT - Not peer-reviewed] - Builds URLs from DOI or PMID - Checks pubTypeList for preprint detection - Includes both preprints and peer-reviewed articles

RAG Tool

File: src/tools/rag_tool.py

Purpose: Semantic search within collected evidence

Implementation: Wraps LlamaIndexRAGService

Features: - Returns Evidence from RAG results - Handles evidence ingestion - Semantic similarity search - Metadata preservation

Search Handler

File: src/tools/search_handler.py

Purpose: Orchestrates parallel searches across multiple tools

Initialization Parameters: - tools: list[SearchTool]: List of search tools to use - timeout: float = 30.0: Timeout for each search in seconds - include_rag: bool = False: Whether to include RAG tool in searches - auto_ingest_to_rag: bool = True: Whether to automatically ingest results into RAG - oauth_token: str | None = None: Optional OAuth token from HuggingFace login (for RAG LLM)

Methods: - async def execute(query: str, max_results_per_tool: int = 10) -> SearchResult: Execute search across all tools in parallel

Features: - Uses asyncio.gather() with return_exceptions=True for parallel execution - Aggregates results into SearchResult with evidence and metadata - Handles tool failures gracefully (continues with other tools) - Deduplicates results by URL - Automatically ingests results into RAG if auto_ingest_to_rag=True - Can add RAG tool dynamically via add_rag_tool() method

Tool Registration

Tools are registered in the search handler:

from src.tools.pubmed import PubMedTool
-from src.tools.clinicaltrials import ClinicalTrialsTool
-from src.tools.europepmc import EuropePMCTool
-from src.tools.search_handler import SearchHandler
-
-search_handler = SearchHandler(
-    tools=[
-        PubMedTool(),
-        ClinicalTrialsTool(),
-        EuropePMCTool(),
-    ],
-    include_rag=True,  # Include RAG tool for semantic search
-    auto_ingest_to_rag=True,  # Automatically ingest results into RAG
-    oauth_token=token  # Optional HuggingFace token for RAG LLM
-)
-
-# Execute search
-result = await search_handler.execute("query", max_results_per_tool=10)
-

See Also

\ No newline at end of file diff --git a/site/architecture/workflow-diagrams/index.html b/site/architecture/workflow-diagrams/index.html deleted file mode 100644 index 418df6422b20c0fbf9fcfb6434093667a2793c4d..0000000000000000000000000000000000000000 --- a/site/architecture/workflow-diagrams/index.html +++ /dev/null @@ -1,488 +0,0 @@ - Workflow Diagrams - The DETERMINATOR

DeepCritical Workflow - Simplified Magentic Architecture

Architecture Pattern: Microsoft Magentic Orchestration Design Philosophy: Simple, dynamic, manager-driven coordination Key Innovation: Intelligent manager replaces rigid sequential phases


1. High-Level Magentic Workflow

flowchart TD
-    Start([User Query]) --> Manager[Magentic Manager<br/>Plan • Select • Assess • Adapt]
-
-    Manager -->|Plans| Task1[Task Decomposition]
-    Task1 --> Manager
-
-    Manager -->|Selects & Executes| HypAgent[Hypothesis Agent]
-    Manager -->|Selects & Executes| SearchAgent[Search Agent]
-    Manager -->|Selects & Executes| AnalysisAgent[Analysis Agent]
-    Manager -->|Selects & Executes| ReportAgent[Report Agent]
-
-    HypAgent -->|Results| Manager
-    SearchAgent -->|Results| Manager
-    AnalysisAgent -->|Results| Manager
-    ReportAgent -->|Results| Manager
-
-    Manager -->|Assesses Quality| Decision{Good Enough?}
-    Decision -->|No - Refine| Manager
-    Decision -->|No - Different Agent| Manager
-    Decision -->|No - Stalled| Replan[Reset Plan]
-    Replan --> Manager
-
-    Decision -->|Yes| Synthesis[Synthesize Final Result]
-    Synthesis --> Output([Research Report])
-
-    style Start fill:#e1f5e1
-    style Manager fill:#ffe6e6
-    style HypAgent fill:#fff4e6
-    style SearchAgent fill:#fff4e6
-    style AnalysisAgent fill:#fff4e6
-    style ReportAgent fill:#fff4e6
-    style Decision fill:#ffd6d6
-    style Synthesis fill:#d4edda
-    style Output fill:#e1f5e1

2. Magentic Manager: The 6-Phase Cycle

flowchart LR
-    P1[1. Planning<br/>Analyze task<br/>Create strategy] --> P2[2. Agent Selection<br/>Pick best agent<br/>for subtask]
-    P2 --> P3[3. Execution<br/>Run selected<br/>agent with tools]
-    P3 --> P4[4. Assessment<br/>Evaluate quality<br/>Check progress]
-    P4 --> Decision{Quality OK?<br/>Progress made?}
-    Decision -->|Yes| P6[6. Synthesis<br/>Combine results<br/>Generate report]
-    Decision -->|No| P5[5. Iteration<br/>Adjust plan<br/>Try again]
-    P5 --> P2
-    P6 --> Done([Complete])
-
-    style P1 fill:#fff4e6
-    style P2 fill:#ffe6e6
-    style P3 fill:#e6f3ff
-    style P4 fill:#ffd6d6
-    style P5 fill:#fff3cd
-    style P6 fill:#d4edda
-    style Done fill:#e1f5e1

3. Simplified Agent Architecture

graph TB
-    subgraph "Orchestration Layer"
-        Manager[Magentic Manager<br/>• Plans workflow<br/>• Selects agents<br/>• Assesses quality<br/>• Adapts strategy]
-        SharedContext[(Shared Context<br/>• Hypotheses<br/>• Search Results<br/>• Analysis<br/>• Progress)]
-        Manager <--> SharedContext
-    end
-
-    subgraph "Specialist Agents"
-        HypAgent[Hypothesis Agent<br/>• Domain understanding<br/>• Hypothesis generation<br/>• Testability refinement]
-        SearchAgent[Search Agent<br/>• Multi-source search<br/>• RAG retrieval<br/>• Result ranking]
-        AnalysisAgent[Analysis Agent<br/>• Evidence extraction<br/>• Statistical analysis<br/>• Code execution]
-        ReportAgent[Report Agent<br/>• Report assembly<br/>• Visualization<br/>• Citation formatting]
-    end
-
-    subgraph "MCP Tools"
-        WebSearch[Web Search<br/>PubMed • arXiv • bioRxiv]
-        CodeExec[Code Execution<br/>Sandboxed Python]
-        RAG[RAG Retrieval<br/>Vector DB • Embeddings]
-        Viz[Visualization<br/>Charts • Graphs]
-    end
-
-    Manager -->|Selects & Directs| HypAgent
-    Manager -->|Selects & Directs| SearchAgent
-    Manager -->|Selects & Directs| AnalysisAgent
-    Manager -->|Selects & Directs| ReportAgent
-
-    HypAgent --> SharedContext
-    SearchAgent --> SharedContext
-    AnalysisAgent --> SharedContext
-    ReportAgent --> SharedContext
-
-    SearchAgent --> WebSearch
-    SearchAgent --> RAG
-    AnalysisAgent --> CodeExec
-    ReportAgent --> CodeExec
-    ReportAgent --> Viz
-
-    style Manager fill:#ffe6e6
-    style SharedContext fill:#ffe6f0
-    style HypAgent fill:#fff4e6
-    style SearchAgent fill:#fff4e6
-    style AnalysisAgent fill:#fff4e6
-    style ReportAgent fill:#fff4e6
-    style WebSearch fill:#e6f3ff
-    style CodeExec fill:#e6f3ff
-    style RAG fill:#e6f3ff
-    style Viz fill:#e6f3ff

4. Dynamic Workflow Example

sequenceDiagram
-    participant User
-    participant Manager
-    participant HypAgent
-    participant SearchAgent
-    participant AnalysisAgent
-    participant ReportAgent
-
-    User->>Manager: "Research protein folding in Alzheimer's"
-
-    Note over Manager: PLAN: Generate hypotheses → Search → Analyze → Report
-
-    Manager->>HypAgent: Generate 3 hypotheses
-    HypAgent-->>Manager: Returns 3 hypotheses
-    Note over Manager: ASSESS: Good quality, proceed
-
-    Manager->>SearchAgent: Search literature for hypothesis 1
-    SearchAgent-->>Manager: Returns 15 papers
-    Note over Manager: ASSESS: Good results, continue
-
-    Manager->>SearchAgent: Search for hypothesis 2
-    SearchAgent-->>Manager: Only 2 papers found
-    Note over Manager: ASSESS: Insufficient, refine search
-
-    Manager->>SearchAgent: Refined query for hypothesis 2
-    SearchAgent-->>Manager: Returns 12 papers
-    Note over Manager: ASSESS: Better, proceed
-
-    Manager->>AnalysisAgent: Analyze evidence for all hypotheses
-    AnalysisAgent-->>Manager: Returns analysis with code
-    Note over Manager: ASSESS: Complete, generate report
-
-    Manager->>ReportAgent: Create comprehensive report
-    ReportAgent-->>Manager: Returns formatted report
-    Note over Manager: SYNTHESIZE: Combine all results
-
-    Manager->>User: Final Research Report

5. Manager Decision Logic

flowchart TD
-    Start([Manager Receives Task]) --> Plan[Create Initial Plan]
-
-    Plan --> Select[Select Agent for Next Subtask]
-    Select --> Execute[Execute Agent]
-    Execute --> Collect[Collect Results]
-
-    Collect --> Assess[Assess Quality & Progress]
-
-    Assess --> Q1{Quality Sufficient?}
-    Q1 -->|No| Q2{Same Agent Can Fix?}
-    Q2 -->|Yes| Feedback[Provide Specific Feedback]
-    Feedback --> Execute
-    Q2 -->|No| Different[Try Different Agent]
-    Different --> Select
-
-    Q1 -->|Yes| Q3{Task Complete?}
-    Q3 -->|No| Q4{Making Progress?}
-    Q4 -->|Yes| Select
-    Q4 -->|No - Stalled| Replan[Reset Plan & Approach]
-    Replan --> Plan
-
-    Q3 -->|Yes| Synth[Synthesize Final Result]
-    Synth --> Done([Return Report])
-
-    style Start fill:#e1f5e1
-    style Plan fill:#fff4e6
-    style Select fill:#ffe6e6
-    style Execute fill:#e6f3ff
-    style Assess fill:#ffd6d6
-    style Q1 fill:#ffe6e6
-    style Q2 fill:#ffe6e6
-    style Q3 fill:#ffe6e6
-    style Q4 fill:#ffe6e6
-    style Synth fill:#d4edda
-    style Done fill:#e1f5e1

6. Hypothesis Agent Workflow

flowchart LR
-    Input[Research Query] --> Domain[Identify Domain<br/>& Key Concepts]
-    Domain --> Context[Retrieve Background<br/>Knowledge]
-    Context --> Generate[Generate 3-5<br/>Initial Hypotheses]
-    Generate --> Refine[Refine for<br/>Testability]
-    Refine --> Rank[Rank by<br/>Quality Score]
-    Rank --> Output[Return Top<br/>Hypotheses]
-
-    Output --> Struct[Hypothesis Structure:<br/>• Statement<br/>• Rationale<br/>• Testability Score<br/>• Data Requirements<br/>• Expected Outcomes]
-
-    style Input fill:#e1f5e1
-    style Output fill:#fff4e6
-    style Struct fill:#e6f3ff

7. Search Agent Workflow

flowchart TD
-    Input[Hypotheses] --> Strategy[Formulate Search<br/>Strategy per Hypothesis]
-
-    Strategy --> Multi[Multi-Source Search]
-
-    Multi --> PubMed[PubMed Search<br/>via MCP]
-    Multi --> ArXiv[arXiv Search<br/>via MCP]
-    Multi --> BioRxiv[bioRxiv Search<br/>via MCP]
-
-    PubMed --> Aggregate[Aggregate Results]
-    ArXiv --> Aggregate
-    BioRxiv --> Aggregate
-
-    Aggregate --> Filter[Filter & Rank<br/>by Relevance]
-    Filter --> Dedup[Deduplicate<br/>Cross-Reference]
-    Dedup --> Embed[Embed Documents<br/>via MCP]
-    Embed --> Vector[(Vector DB)]
-    Vector --> RAGRetrieval[RAG Retrieval<br/>Top-K per Hypothesis]
-    RAGRetrieval --> Output[Return Contextualized<br/>Search Results]
-
-    style Input fill:#fff4e6
-    style Multi fill:#ffe6e6
-    style Vector fill:#ffe6f0
-    style Output fill:#e6f3ff

8. Analysis Agent Workflow

flowchart TD
-    Input1[Hypotheses] --> Extract
-    Input2[Search Results] --> Extract[Extract Evidence<br/>per Hypothesis]
-
-    Extract --> Methods[Determine Analysis<br/>Methods Needed]
-
-    Methods --> Branch{Requires<br/>Computation?}
-    Branch -->|Yes| GenCode[Generate Python<br/>Analysis Code]
-    Branch -->|No| Qual[Qualitative<br/>Synthesis]
-
-    GenCode --> Execute[Execute Code<br/>via MCP Sandbox]
-    Execute --> Interpret1[Interpret<br/>Results]
-    Qual --> Interpret2[Interpret<br/>Findings]
-
-    Interpret1 --> Synthesize[Synthesize Evidence<br/>Across Sources]
-    Interpret2 --> Synthesize
-
-    Synthesize --> Verdict[Determine Verdict<br/>per Hypothesis]
-    Verdict --> Support[• Supported<br/>• Refuted<br/>• Inconclusive]
-    Support --> Gaps[Identify Knowledge<br/>Gaps & Limitations]
-    Gaps --> Output[Return Analysis<br/>Report]
-
-    style Input1 fill:#fff4e6
-    style Input2 fill:#e6f3ff
-    style Execute fill:#ffe6e6
-    style Output fill:#e6ffe6

9. Report Agent Workflow

flowchart TD
-    Input1[Query] --> Assemble
-    Input2[Hypotheses] --> Assemble
-    Input3[Search Results] --> Assemble
-    Input4[Analysis] --> Assemble[Assemble Report<br/>Sections]
-
-    Assemble --> Exec[Executive Summary]
-    Assemble --> Intro[Introduction]
-    Assemble --> Methods[Methods]
-    Assemble --> Results[Results per<br/>Hypothesis]
-    Assemble --> Discussion[Discussion]
-    Assemble --> Future[Future Directions]
-    Assemble --> Refs[References]
-
-    Results --> VizCheck{Needs<br/>Visualization?}
-    VizCheck -->|Yes| GenViz[Generate Viz Code]
-    GenViz --> ExecViz[Execute via MCP<br/>Create Charts]
-    ExecViz --> Combine
-    VizCheck -->|No| Combine[Combine All<br/>Sections]
-
-    Exec --> Combine
-    Intro --> Combine
-    Methods --> Combine
-    Discussion --> Combine
-    Future --> Combine
-    Refs --> Combine
-
-    Combine --> Format[Format Output]
-    Format --> MD[Markdown]
-    Format --> PDF[PDF]
-    Format --> JSON[JSON]
-
-    MD --> Output[Return Final<br/>Report]
-    PDF --> Output
-    JSON --> Output
-
-    style Input1 fill:#e1f5e1
-    style Input2 fill:#fff4e6
-    style Input3 fill:#e6f3ff
-    style Input4 fill:#e6ffe6
-    style Output fill:#d4edda

10. Data Flow & Event Streaming

flowchart TD
-    User[👤 User] -->|Research Query| UI[Gradio UI]
-    UI -->|Submit| Manager[Magentic Manager]
-
-    Manager -->|Event: Planning| UI
-    Manager -->|Select Agent| HypAgent[Hypothesis Agent]
-    HypAgent -->|Event: Delta/Message| UI
-    HypAgent -->|Hypotheses| Context[(Shared Context)]
-
-    Context -->|Retrieved by| Manager
-    Manager -->|Select Agent| SearchAgent[Search Agent]
-    SearchAgent -->|MCP Request| WebSearch[Web Search Tool]
-    WebSearch -->|Results| SearchAgent
-    SearchAgent -->|Event: Delta/Message| UI
-    SearchAgent -->|Documents| Context
-    SearchAgent -->|Embeddings| VectorDB[(Vector DB)]
-
-    Context -->|Retrieved by| Manager
-    Manager -->|Select Agent| AnalysisAgent[Analysis Agent]
-    AnalysisAgent -->|MCP Request| CodeExec[Code Execution Tool]
-    CodeExec -->|Results| AnalysisAgent
-    AnalysisAgent -->|Event: Delta/Message| UI
-    AnalysisAgent -->|Analysis| Context
-
-    Context -->|Retrieved by| Manager
-    Manager -->|Select Agent| ReportAgent[Report Agent]
-    ReportAgent -->|MCP Request| CodeExec
-    ReportAgent -->|Event: Delta/Message| UI
-    ReportAgent -->|Report| Context
-
-    Manager -->|Event: Final Result| UI
-    UI -->|Display| User
-
-    style User fill:#e1f5e1
-    style UI fill:#e6f3ff
-    style Manager fill:#ffe6e6
-    style Context fill:#ffe6f0
-    style VectorDB fill:#ffe6f0
-    style WebSearch fill:#f0f0f0
-    style CodeExec fill:#f0f0f0

11. MCP Tool Architecture

graph TB
-    subgraph "Agent Layer"
-        Manager[Magentic Manager]
-        HypAgent[Hypothesis Agent]
-        SearchAgent[Search Agent]
-        AnalysisAgent[Analysis Agent]
-        ReportAgent[Report Agent]
-    end
-
-    subgraph "MCP Protocol Layer"
-        Registry[MCP Tool Registry<br/>• Discovers tools<br/>• Routes requests<br/>• Manages connections]
-    end
-
-    subgraph "MCP Servers"
-        Server1[Web Search Server<br/>localhost:8001<br/>• PubMed<br/>• arXiv<br/>• bioRxiv]
-        Server2[Code Execution Server<br/>localhost:8002<br/>• Sandboxed Python<br/>• Package management]
-        Server3[RAG Server<br/>localhost:8003<br/>• Vector embeddings<br/>• Similarity search]
-        Server4[Visualization Server<br/>localhost:8004<br/>• Chart generation<br/>• Plot rendering]
-    end
-
-    subgraph "External Services"
-        PubMed[PubMed API]
-        ArXiv[arXiv API]
-        BioRxiv[bioRxiv API]
-        Modal[Modal Sandbox]
-        ChromaDB[(ChromaDB)]
-    end
-
-    SearchAgent -->|Request| Registry
-    AnalysisAgent -->|Request| Registry
-    ReportAgent -->|Request| Registry
-
-    Registry --> Server1
-    Registry --> Server2
-    Registry --> Server3
-    Registry --> Server4
-
-    Server1 --> PubMed
-    Server1 --> ArXiv
-    Server1 --> BioRxiv
-    Server2 --> Modal
-    Server3 --> ChromaDB
-
-    style Manager fill:#ffe6e6
-    style Registry fill:#fff4e6
-    style Server1 fill:#e6f3ff
-    style Server2 fill:#e6f3ff
-    style Server3 fill:#e6f3ff
-    style Server4 fill:#e6f3ff

12. Progress Tracking & Stall Detection

stateDiagram-v2
-    [*] --> Initialization: User Query
-
-    Initialization --> Planning: Manager starts
-
-    Planning --> AgentExecution: Select agent
-
-    AgentExecution --> Assessment: Collect results
-
-    Assessment --> QualityCheck: Evaluate output
-
-    QualityCheck --> AgentExecution: Poor quality<br/>(retry < max_rounds)
-    QualityCheck --> Planning: Poor quality<br/>(try different agent)
-    QualityCheck --> NextAgent: Good quality<br/>(task incomplete)
-    QualityCheck --> Synthesis: Good quality<br/>(task complete)
-
-    NextAgent --> AgentExecution: Select next agent
-
-    state StallDetection <<choice>>
-    Assessment --> StallDetection: Check progress
-    StallDetection --> Planning: No progress<br/>(stall count < max)
-    StallDetection --> ErrorRecovery: No progress<br/>(max stalls reached)
-
-    ErrorRecovery --> PartialReport: Generate partial results
-    PartialReport --> [*]
-
-    Synthesis --> FinalReport: Combine all outputs
-    FinalReport --> [*]
-
-    note right of QualityCheck
-        Manager assesses:
-        • Output completeness
-        • Quality metrics
-        • Progress made
-    end note
-
-    note right of StallDetection
-        Stall = no new progress
-        after agent execution
-        Triggers plan reset
-    end note

13. Gradio UI Integration

graph TD
-    App[Gradio App<br/>DeepCritical Research Agent]
-
-    App --> Input[Input Section]
-    App --> Status[Status Section]
-    App --> Output[Output Section]
-
-    Input --> Query[Research Question<br/>Text Area]
-    Input --> Controls[Controls]
-    Controls --> MaxHyp[Max Hypotheses: 1-10]
-    Controls --> MaxRounds[Max Rounds: 5-20]
-    Controls --> Submit[Start Research Button]
-
-    Status --> Log[Real-time Event Log<br/>• Manager planning<br/>• Agent selection<br/>• Execution updates<br/>• Quality assessment]
-    Status --> Progress[Progress Tracker<br/>• Current agent<br/>• Round count<br/>• Stall count]
-
-    Output --> Tabs[Tabbed Results]
-    Tabs --> Tab1[Hypotheses Tab<br/>Generated hypotheses with scores]
-    Tabs --> Tab2[Search Results Tab<br/>Papers & sources found]
-    Tabs --> Tab3[Analysis Tab<br/>Evidence & verdicts]
-    Tabs --> Tab4[Report Tab<br/>Final research report]
-    Tab4 --> Download[Download Report<br/>MD / PDF / JSON]
-
-    Submit -.->|Triggers| Workflow[Magentic Workflow]
-    Workflow -.->|MagenticOrchestratorMessageEvent| Log
-    Workflow -.->|MagenticAgentDeltaEvent| Log
-    Workflow -.->|MagenticAgentMessageEvent| Log
-    Workflow -.->|MagenticFinalResultEvent| Tab4
-
-    style App fill:#e1f5e1
-    style Input fill:#fff4e6
-    style Status fill:#e6f3ff
-    style Output fill:#e6ffe6
-    style Workflow fill:#ffe6e6

14. Complete System Context

graph LR
-    User[👤 Researcher<br/>Asks research questions] -->|Submits query| DC[DeepCritical<br/>Magentic Workflow]
-
-    DC -->|Literature search| PubMed[PubMed API<br/>Medical papers]
-    DC -->|Preprint search| ArXiv[arXiv API<br/>Scientific preprints]
-    DC -->|Biology search| BioRxiv[bioRxiv API<br/>Biology preprints]
-    DC -->|Agent reasoning| Claude[Claude API<br/>Sonnet 4 / Opus]
-    DC -->|Code execution| Modal[Modal Sandbox<br/>Safe Python env]
-    DC -->|Vector storage| Chroma[ChromaDB<br/>Embeddings & RAG]
-
-    DC -->|Deployed on| HF[HuggingFace Spaces<br/>Gradio 6.0]
-
-    PubMed -->|Results| DC
-    ArXiv -->|Results| DC
-    BioRxiv -->|Results| DC
-    Claude -->|Responses| DC
-    Modal -->|Output| DC
-    Chroma -->|Context| DC
-
-    DC -->|Research report| User
-
-    style User fill:#e1f5e1
-    style DC fill:#ffe6e6
-    style PubMed fill:#e6f3ff
-    style ArXiv fill:#e6f3ff
-    style BioRxiv fill:#e6f3ff
-    style Claude fill:#ffd6d6
-    style Modal fill:#f0f0f0
-    style Chroma fill:#ffe6f0
-    style HF fill:#d4edda

15. Workflow Timeline (Simplified)

gantt
-    title DeepCritical Magentic Workflow - Typical Execution
-    dateFormat mm:ss
-    axisFormat %M:%S
-
-    section Manager Planning
-    Initial planning         :p1, 00:00, 10s
-
-    section Hypothesis Agent
-    Generate hypotheses      :h1, after p1, 30s
-    Manager assessment       :h2, after h1, 5s
-
-    section Search Agent
-    Search hypothesis 1      :s1, after h2, 20s
-    Search hypothesis 2      :s2, after s1, 20s
-    Search hypothesis 3      :s3, after s2, 20s
-    RAG processing          :s4, after s3, 15s
-    Manager assessment      :s5, after s4, 5s
-
-    section Analysis Agent
-    Evidence extraction     :a1, after s5, 15s
-    Code generation        :a2, after a1, 20s
-    Code execution         :a3, after a2, 25s
-    Synthesis              :a4, after a3, 20s
-    Manager assessment     :a5, after a4, 5s
-
-    section Report Agent
-    Report assembly        :r1, after a5, 30s
-    Visualization          :r2, after r1, 15s
-    Formatting             :r3, after r2, 10s
-
-    section Manager Synthesis
-    Final synthesis        :f1, after r3, 10s

Key Differences from Original Design

Aspect Original (Judge-in-Loop) New (Magentic)
Control Flow Fixed sequential phases Dynamic agent selection
Quality Control Separate Judge Agent Manager assessment built-in
Retry Logic Phase-level with feedback Agent-level with adaptation
Flexibility Rigid 4-phase pipeline Adaptive workflow
Complexity 5 agents (including Judge) 4 agents (no Judge)
Progress Tracking Manual state management Built-in round/stall detection
Agent Coordination Sequential handoff Manager-driven dynamic selection
Error Recovery Retry same phase Try different agent or replan

Simplified Design Principles

  1. Manager is Intelligent: LLM-powered manager handles planning, selection, and quality assessment
  2. No Separate Judge: Manager's assessment phase replaces dedicated Judge Agent
  3. Dynamic Workflow: Agents can be called multiple times in any order based on need
  4. Built-in Safety: max_round_count (15) and max_stall_count (3) prevent infinite loops
  5. Event-Driven UI: Real-time streaming updates to Gradio interface
  6. MCP-Powered Tools: All external capabilities via Model Context Protocol
  7. Shared Context: Centralized state accessible to all agents
  8. Progress Awareness: Manager tracks what's been done and what's needed

Legend

  • 🔴 Red/Pink: Manager, orchestration, decision-making
  • 🟡 Yellow/Orange: Specialist agents, processing
  • 🔵 Blue: Data, tools, MCP services
  • 🟣 Purple/Pink: Storage, databases, state
  • 🟢 Green: User interactions, final outputs
  • Gray: External services, APIs

Implementation Highlights

Simple 4-Agent Setup:

Manager handles quality assessment in its instructions: - Checks hypothesis quality (testable, novel, clear) - Validates search results (relevant, authoritative, recent) - Assesses analysis soundness (methodology, evidence, conclusions) - Ensures report completeness (all sections, proper citations)

No separate Judge Agent needed - manager does it all!


Document Version: 2.0 (Magentic Simplified) Last Updated: 2025-11-24 Architecture: Microsoft Magentic Orchestration Pattern Agents: 4 (Hypothesis, Search, Analysis, Report) + 1 Manager License: MIT

See Also

\ No newline at end of file diff --git a/site/assets/images/favicon.png b/site/assets/images/favicon.png deleted file mode 100644 index 1cf13b9f9d978896599290a74f77d5dbe7d1655c..0000000000000000000000000000000000000000 Binary files a/site/assets/images/favicon.png and /dev/null differ diff --git a/site/assets/javascripts/bundle.e71a0d61.min.js b/site/assets/javascripts/bundle.e71a0d61.min.js deleted file mode 100644 index c76b3b2b18a0e8a097ad2690dd51fa8adc12d0be..0000000000000000000000000000000000000000 --- a/site/assets/javascripts/bundle.e71a0d61.min.js +++ /dev/null @@ -1,16 +0,0 @@ -"use strict";(()=>{var Zi=Object.create;var _r=Object.defineProperty;var ea=Object.getOwnPropertyDescriptor;var ta=Object.getOwnPropertyNames,Bt=Object.getOwnPropertySymbols,ra=Object.getPrototypeOf,Ar=Object.prototype.hasOwnProperty,bo=Object.prototype.propertyIsEnumerable;var ho=(e,t,r)=>t in e?_r(e,t,{enumerable:!0,configurable:!0,writable:!0,value:r}):e[t]=r,P=(e,t)=>{for(var r in t||(t={}))Ar.call(t,r)&&ho(e,r,t[r]);if(Bt)for(var r of Bt(t))bo.call(t,r)&&ho(e,r,t[r]);return e};var vo=(e,t)=>{var r={};for(var o in e)Ar.call(e,o)&&t.indexOf(o)<0&&(r[o]=e[o]);if(e!=null&&Bt)for(var o of Bt(e))t.indexOf(o)<0&&bo.call(e,o)&&(r[o]=e[o]);return r};var Cr=(e,t)=>()=>(t||e((t={exports:{}}).exports,t),t.exports);var oa=(e,t,r,o)=>{if(t&&typeof t=="object"||typeof t=="function")for(let n of ta(t))!Ar.call(e,n)&&n!==r&&_r(e,n,{get:()=>t[n],enumerable:!(o=ea(t,n))||o.enumerable});return e};var $t=(e,t,r)=>(r=e!=null?Zi(ra(e)):{},oa(t||!e||!e.__esModule?_r(r,"default",{value:e,enumerable:!0}):r,e));var go=(e,t,r)=>new Promise((o,n)=>{var i=c=>{try{a(r.next(c))}catch(p){n(p)}},s=c=>{try{a(r.throw(c))}catch(p){n(p)}},a=c=>c.done?o(c.value):Promise.resolve(c.value).then(i,s);a((r=r.apply(e,t)).next())});var xo=Cr((kr,yo)=>{(function(e,t){typeof kr=="object"&&typeof yo!="undefined"?t():typeof define=="function"&&define.amd?define(t):t()})(kr,(function(){"use strict";function e(r){var o=!0,n=!1,i=null,s={text:!0,search:!0,url:!0,tel:!0,email:!0,password:!0,number:!0,date:!0,month:!0,week:!0,time:!0,datetime:!0,"datetime-local":!0};function a(k){return!!(k&&k!==document&&k.nodeName!=="HTML"&&k.nodeName!=="BODY"&&"classList"in k&&"contains"in k.classList)}function c(k){var ut=k.type,je=k.tagName;return!!(je==="INPUT"&&s[ut]&&!k.readOnly||je==="TEXTAREA"&&!k.readOnly||k.isContentEditable)}function p(k){k.classList.contains("focus-visible")||(k.classList.add("focus-visible"),k.setAttribute("data-focus-visible-added",""))}function l(k){k.hasAttribute("data-focus-visible-added")&&(k.classList.remove("focus-visible"),k.removeAttribute("data-focus-visible-added"))}function f(k){k.metaKey||k.altKey||k.ctrlKey||(a(r.activeElement)&&p(r.activeElement),o=!0)}function u(k){o=!1}function d(k){a(k.target)&&(o||c(k.target))&&p(k.target)}function v(k){a(k.target)&&(k.target.classList.contains("focus-visible")||k.target.hasAttribute("data-focus-visible-added"))&&(n=!0,window.clearTimeout(i),i=window.setTimeout(function(){n=!1},100),l(k.target))}function S(k){document.visibilityState==="hidden"&&(n&&(o=!0),X())}function X(){document.addEventListener("mousemove",ee),document.addEventListener("mousedown",ee),document.addEventListener("mouseup",ee),document.addEventListener("pointermove",ee),document.addEventListener("pointerdown",ee),document.addEventListener("pointerup",ee),document.addEventListener("touchmove",ee),document.addEventListener("touchstart",ee),document.addEventListener("touchend",ee)}function re(){document.removeEventListener("mousemove",ee),document.removeEventListener("mousedown",ee),document.removeEventListener("mouseup",ee),document.removeEventListener("pointermove",ee),document.removeEventListener("pointerdown",ee),document.removeEventListener("pointerup",ee),document.removeEventListener("touchmove",ee),document.removeEventListener("touchstart",ee),document.removeEventListener("touchend",ee)}function ee(k){k.target.nodeName&&k.target.nodeName.toLowerCase()==="html"||(o=!1,re())}document.addEventListener("keydown",f,!0),document.addEventListener("mousedown",u,!0),document.addEventListener("pointerdown",u,!0),document.addEventListener("touchstart",u,!0),document.addEventListener("visibilitychange",S,!0),X(),r.addEventListener("focus",d,!0),r.addEventListener("blur",v,!0),r.nodeType===Node.DOCUMENT_FRAGMENT_NODE&&r.host?r.host.setAttribute("data-js-focus-visible",""):r.nodeType===Node.DOCUMENT_NODE&&(document.documentElement.classList.add("js-focus-visible"),document.documentElement.setAttribute("data-js-focus-visible",""))}if(typeof window!="undefined"&&typeof document!="undefined"){window.applyFocusVisiblePolyfill=e;var t;try{t=new CustomEvent("focus-visible-polyfill-ready")}catch(r){t=document.createEvent("CustomEvent"),t.initCustomEvent("focus-visible-polyfill-ready",!1,!1,{})}window.dispatchEvent(t)}typeof document!="undefined"&&e(document)}))});var ro=Cr((jy,Rn)=>{"use strict";/*! - * escape-html - * Copyright(c) 2012-2013 TJ Holowaychuk - * Copyright(c) 2015 Andreas Lubbe - * Copyright(c) 2015 Tiancheng "Timothy" Gu - * MIT Licensed - */var qa=/["'&<>]/;Rn.exports=Ka;function Ka(e){var t=""+e,r=qa.exec(t);if(!r)return t;var o,n="",i=0,s=0;for(i=r.index;i{/*! - * clipboard.js v2.0.11 - * https://clipboardjs.com/ - * - * Licensed MIT © Zeno Rocha - */(function(t,r){typeof Nt=="object"&&typeof io=="object"?io.exports=r():typeof define=="function"&&define.amd?define([],r):typeof Nt=="object"?Nt.ClipboardJS=r():t.ClipboardJS=r()})(Nt,function(){return(function(){var e={686:(function(o,n,i){"use strict";i.d(n,{default:function(){return Xi}});var s=i(279),a=i.n(s),c=i(370),p=i.n(c),l=i(817),f=i.n(l);function u(q){try{return document.execCommand(q)}catch(C){return!1}}var d=function(C){var _=f()(C);return u("cut"),_},v=d;function S(q){var C=document.documentElement.getAttribute("dir")==="rtl",_=document.createElement("textarea");_.style.fontSize="12pt",_.style.border="0",_.style.padding="0",_.style.margin="0",_.style.position="absolute",_.style[C?"right":"left"]="-9999px";var D=window.pageYOffset||document.documentElement.scrollTop;return _.style.top="".concat(D,"px"),_.setAttribute("readonly",""),_.value=q,_}var X=function(C,_){var D=S(C);_.container.appendChild(D);var N=f()(D);return u("copy"),D.remove(),N},re=function(C){var _=arguments.length>1&&arguments[1]!==void 0?arguments[1]:{container:document.body},D="";return typeof C=="string"?D=X(C,_):C instanceof HTMLInputElement&&!["text","search","url","tel","password"].includes(C==null?void 0:C.type)?D=X(C.value,_):(D=f()(C),u("copy")),D},ee=re;function k(q){"@babel/helpers - typeof";return typeof Symbol=="function"&&typeof Symbol.iterator=="symbol"?k=function(_){return typeof _}:k=function(_){return _&&typeof Symbol=="function"&&_.constructor===Symbol&&_!==Symbol.prototype?"symbol":typeof _},k(q)}var ut=function(){var C=arguments.length>0&&arguments[0]!==void 0?arguments[0]:{},_=C.action,D=_===void 0?"copy":_,N=C.container,G=C.target,We=C.text;if(D!=="copy"&&D!=="cut")throw new Error('Invalid "action" value, use either "copy" or "cut"');if(G!==void 0)if(G&&k(G)==="object"&&G.nodeType===1){if(D==="copy"&&G.hasAttribute("disabled"))throw new Error('Invalid "target" attribute. Please use "readonly" instead of "disabled" attribute');if(D==="cut"&&(G.hasAttribute("readonly")||G.hasAttribute("disabled")))throw new Error(`Invalid "target" attribute. You can't cut text from elements with "readonly" or "disabled" attributes`)}else throw new Error('Invalid "target" value, use a valid Element');if(We)return ee(We,{container:N});if(G)return D==="cut"?v(G):ee(G,{container:N})},je=ut;function R(q){"@babel/helpers - typeof";return typeof Symbol=="function"&&typeof Symbol.iterator=="symbol"?R=function(_){return typeof _}:R=function(_){return _&&typeof Symbol=="function"&&_.constructor===Symbol&&_!==Symbol.prototype?"symbol":typeof _},R(q)}function se(q,C){if(!(q instanceof C))throw new TypeError("Cannot call a class as a function")}function ce(q,C){for(var _=0;_0&&arguments[0]!==void 0?arguments[0]:{};this.action=typeof N.action=="function"?N.action:this.defaultAction,this.target=typeof N.target=="function"?N.target:this.defaultTarget,this.text=typeof N.text=="function"?N.text:this.defaultText,this.container=R(N.container)==="object"?N.container:document.body}},{key:"listenClick",value:function(N){var G=this;this.listener=p()(N,"click",function(We){return G.onClick(We)})}},{key:"onClick",value:function(N){var G=N.delegateTarget||N.currentTarget,We=this.action(G)||"copy",Yt=je({action:We,container:this.container,target:this.target(G),text:this.text(G)});this.emit(Yt?"success":"error",{action:We,text:Yt,trigger:G,clearSelection:function(){G&&G.focus(),window.getSelection().removeAllRanges()}})}},{key:"defaultAction",value:function(N){return Mr("action",N)}},{key:"defaultTarget",value:function(N){var G=Mr("target",N);if(G)return document.querySelector(G)}},{key:"defaultText",value:function(N){return Mr("text",N)}},{key:"destroy",value:function(){this.listener.destroy()}}],[{key:"copy",value:function(N){var G=arguments.length>1&&arguments[1]!==void 0?arguments[1]:{container:document.body};return ee(N,G)}},{key:"cut",value:function(N){return v(N)}},{key:"isSupported",value:function(){var N=arguments.length>0&&arguments[0]!==void 0?arguments[0]:["copy","cut"],G=typeof N=="string"?[N]:N,We=!!document.queryCommandSupported;return G.forEach(function(Yt){We=We&&!!document.queryCommandSupported(Yt)}),We}}]),_})(a()),Xi=Ji}),828:(function(o){var n=9;if(typeof Element!="undefined"&&!Element.prototype.matches){var i=Element.prototype;i.matches=i.matchesSelector||i.mozMatchesSelector||i.msMatchesSelector||i.oMatchesSelector||i.webkitMatchesSelector}function s(a,c){for(;a&&a.nodeType!==n;){if(typeof a.matches=="function"&&a.matches(c))return a;a=a.parentNode}}o.exports=s}),438:(function(o,n,i){var s=i(828);function a(l,f,u,d,v){var S=p.apply(this,arguments);return l.addEventListener(u,S,v),{destroy:function(){l.removeEventListener(u,S,v)}}}function c(l,f,u,d,v){return typeof l.addEventListener=="function"?a.apply(null,arguments):typeof u=="function"?a.bind(null,document).apply(null,arguments):(typeof l=="string"&&(l=document.querySelectorAll(l)),Array.prototype.map.call(l,function(S){return a(S,f,u,d,v)}))}function p(l,f,u,d){return function(v){v.delegateTarget=s(v.target,f),v.delegateTarget&&d.call(l,v)}}o.exports=c}),879:(function(o,n){n.node=function(i){return i!==void 0&&i instanceof HTMLElement&&i.nodeType===1},n.nodeList=function(i){var s=Object.prototype.toString.call(i);return i!==void 0&&(s==="[object NodeList]"||s==="[object HTMLCollection]")&&"length"in i&&(i.length===0||n.node(i[0]))},n.string=function(i){return typeof i=="string"||i instanceof String},n.fn=function(i){var s=Object.prototype.toString.call(i);return s==="[object Function]"}}),370:(function(o,n,i){var s=i(879),a=i(438);function c(u,d,v){if(!u&&!d&&!v)throw new Error("Missing required arguments");if(!s.string(d))throw new TypeError("Second argument must be a String");if(!s.fn(v))throw new TypeError("Third argument must be a Function");if(s.node(u))return p(u,d,v);if(s.nodeList(u))return l(u,d,v);if(s.string(u))return f(u,d,v);throw new TypeError("First argument must be a String, HTMLElement, HTMLCollection, or NodeList")}function p(u,d,v){return u.addEventListener(d,v),{destroy:function(){u.removeEventListener(d,v)}}}function l(u,d,v){return Array.prototype.forEach.call(u,function(S){S.addEventListener(d,v)}),{destroy:function(){Array.prototype.forEach.call(u,function(S){S.removeEventListener(d,v)})}}}function f(u,d,v){return a(document.body,u,d,v)}o.exports=c}),817:(function(o){function n(i){var s;if(i.nodeName==="SELECT")i.focus(),s=i.value;else if(i.nodeName==="INPUT"||i.nodeName==="TEXTAREA"){var a=i.hasAttribute("readonly");a||i.setAttribute("readonly",""),i.select(),i.setSelectionRange(0,i.value.length),a||i.removeAttribute("readonly"),s=i.value}else{i.hasAttribute("contenteditable")&&i.focus();var c=window.getSelection(),p=document.createRange();p.selectNodeContents(i),c.removeAllRanges(),c.addRange(p),s=c.toString()}return s}o.exports=n}),279:(function(o){function n(){}n.prototype={on:function(i,s,a){var c=this.e||(this.e={});return(c[i]||(c[i]=[])).push({fn:s,ctx:a}),this},once:function(i,s,a){var c=this;function p(){c.off(i,p),s.apply(a,arguments)}return p._=s,this.on(i,p,a)},emit:function(i){var s=[].slice.call(arguments,1),a=((this.e||(this.e={}))[i]||[]).slice(),c=0,p=a.length;for(c;c0&&i[i.length-1])&&(p[0]===6||p[0]===2)){r=0;continue}if(p[0]===3&&(!i||p[1]>i[0]&&p[1]=e.length&&(e=void 0),{value:e&&e[o++],done:!e}}};throw new TypeError(t?"Object is not iterable.":"Symbol.iterator is not defined.")}function K(e,t){var r=typeof Symbol=="function"&&e[Symbol.iterator];if(!r)return e;var o=r.call(e),n,i=[],s;try{for(;(t===void 0||t-- >0)&&!(n=o.next()).done;)i.push(n.value)}catch(a){s={error:a}}finally{try{n&&!n.done&&(r=o.return)&&r.call(o)}finally{if(s)throw s.error}}return i}function B(e,t,r){if(r||arguments.length===2)for(var o=0,n=t.length,i;o1||c(d,S)})},v&&(n[d]=v(n[d])))}function c(d,v){try{p(o[d](v))}catch(S){u(i[0][3],S)}}function p(d){d.value instanceof dt?Promise.resolve(d.value.v).then(l,f):u(i[0][2],d)}function l(d){c("next",d)}function f(d){c("throw",d)}function u(d,v){d(v),i.shift(),i.length&&c(i[0][0],i[0][1])}}function To(e){if(!Symbol.asyncIterator)throw new TypeError("Symbol.asyncIterator is not defined.");var t=e[Symbol.asyncIterator],r;return t?t.call(e):(e=typeof Oe=="function"?Oe(e):e[Symbol.iterator](),r={},o("next"),o("throw"),o("return"),r[Symbol.asyncIterator]=function(){return this},r);function o(i){r[i]=e[i]&&function(s){return new Promise(function(a,c){s=e[i](s),n(a,c,s.done,s.value)})}}function n(i,s,a,c){Promise.resolve(c).then(function(p){i({value:p,done:a})},s)}}function I(e){return typeof e=="function"}function yt(e){var t=function(o){Error.call(o),o.stack=new Error().stack},r=e(t);return r.prototype=Object.create(Error.prototype),r.prototype.constructor=r,r}var Jt=yt(function(e){return function(r){e(this),this.message=r?r.length+` errors occurred during unsubscription: -`+r.map(function(o,n){return n+1+") "+o.toString()}).join(` - `):"",this.name="UnsubscriptionError",this.errors=r}});function Ze(e,t){if(e){var r=e.indexOf(t);0<=r&&e.splice(r,1)}}var qe=(function(){function e(t){this.initialTeardown=t,this.closed=!1,this._parentage=null,this._finalizers=null}return e.prototype.unsubscribe=function(){var t,r,o,n,i;if(!this.closed){this.closed=!0;var s=this._parentage;if(s)if(this._parentage=null,Array.isArray(s))try{for(var a=Oe(s),c=a.next();!c.done;c=a.next()){var p=c.value;p.remove(this)}}catch(S){t={error:S}}finally{try{c&&!c.done&&(r=a.return)&&r.call(a)}finally{if(t)throw t.error}}else s.remove(this);var l=this.initialTeardown;if(I(l))try{l()}catch(S){i=S instanceof Jt?S.errors:[S]}var f=this._finalizers;if(f){this._finalizers=null;try{for(var u=Oe(f),d=u.next();!d.done;d=u.next()){var v=d.value;try{So(v)}catch(S){i=i!=null?i:[],S instanceof Jt?i=B(B([],K(i)),K(S.errors)):i.push(S)}}}catch(S){o={error:S}}finally{try{d&&!d.done&&(n=u.return)&&n.call(u)}finally{if(o)throw o.error}}}if(i)throw new Jt(i)}},e.prototype.add=function(t){var r;if(t&&t!==this)if(this.closed)So(t);else{if(t instanceof e){if(t.closed||t._hasParent(this))return;t._addParent(this)}(this._finalizers=(r=this._finalizers)!==null&&r!==void 0?r:[]).push(t)}},e.prototype._hasParent=function(t){var r=this._parentage;return r===t||Array.isArray(r)&&r.includes(t)},e.prototype._addParent=function(t){var r=this._parentage;this._parentage=Array.isArray(r)?(r.push(t),r):r?[r,t]:t},e.prototype._removeParent=function(t){var r=this._parentage;r===t?this._parentage=null:Array.isArray(r)&&Ze(r,t)},e.prototype.remove=function(t){var r=this._finalizers;r&&Ze(r,t),t instanceof e&&t._removeParent(this)},e.EMPTY=(function(){var t=new e;return t.closed=!0,t})(),e})();var $r=qe.EMPTY;function Xt(e){return e instanceof qe||e&&"closed"in e&&I(e.remove)&&I(e.add)&&I(e.unsubscribe)}function So(e){I(e)?e():e.unsubscribe()}var De={onUnhandledError:null,onStoppedNotification:null,Promise:void 0,useDeprecatedSynchronousErrorHandling:!1,useDeprecatedNextContext:!1};var xt={setTimeout:function(e,t){for(var r=[],o=2;o0},enumerable:!1,configurable:!0}),t.prototype._trySubscribe=function(r){return this._throwIfClosed(),e.prototype._trySubscribe.call(this,r)},t.prototype._subscribe=function(r){return this._throwIfClosed(),this._checkFinalizedStatuses(r),this._innerSubscribe(r)},t.prototype._innerSubscribe=function(r){var o=this,n=this,i=n.hasError,s=n.isStopped,a=n.observers;return i||s?$r:(this.currentObservers=null,a.push(r),new qe(function(){o.currentObservers=null,Ze(a,r)}))},t.prototype._checkFinalizedStatuses=function(r){var o=this,n=o.hasError,i=o.thrownError,s=o.isStopped;n?r.error(i):s&&r.complete()},t.prototype.asObservable=function(){var r=new F;return r.source=this,r},t.create=function(r,o){return new Ho(r,o)},t})(F);var Ho=(function(e){ie(t,e);function t(r,o){var n=e.call(this)||this;return n.destination=r,n.source=o,n}return t.prototype.next=function(r){var o,n;(n=(o=this.destination)===null||o===void 0?void 0:o.next)===null||n===void 0||n.call(o,r)},t.prototype.error=function(r){var o,n;(n=(o=this.destination)===null||o===void 0?void 0:o.error)===null||n===void 0||n.call(o,r)},t.prototype.complete=function(){var r,o;(o=(r=this.destination)===null||r===void 0?void 0:r.complete)===null||o===void 0||o.call(r)},t.prototype._subscribe=function(r){var o,n;return(n=(o=this.source)===null||o===void 0?void 0:o.subscribe(r))!==null&&n!==void 0?n:$r},t})(T);var jr=(function(e){ie(t,e);function t(r){var o=e.call(this)||this;return o._value=r,o}return Object.defineProperty(t.prototype,"value",{get:function(){return this.getValue()},enumerable:!1,configurable:!0}),t.prototype._subscribe=function(r){var o=e.prototype._subscribe.call(this,r);return!o.closed&&r.next(this._value),o},t.prototype.getValue=function(){var r=this,o=r.hasError,n=r.thrownError,i=r._value;if(o)throw n;return this._throwIfClosed(),i},t.prototype.next=function(r){e.prototype.next.call(this,this._value=r)},t})(T);var Rt={now:function(){return(Rt.delegate||Date).now()},delegate:void 0};var It=(function(e){ie(t,e);function t(r,o,n){r===void 0&&(r=1/0),o===void 0&&(o=1/0),n===void 0&&(n=Rt);var i=e.call(this)||this;return i._bufferSize=r,i._windowTime=o,i._timestampProvider=n,i._buffer=[],i._infiniteTimeWindow=!0,i._infiniteTimeWindow=o===1/0,i._bufferSize=Math.max(1,r),i._windowTime=Math.max(1,o),i}return t.prototype.next=function(r){var o=this,n=o.isStopped,i=o._buffer,s=o._infiniteTimeWindow,a=o._timestampProvider,c=o._windowTime;n||(i.push(r),!s&&i.push(a.now()+c)),this._trimBuffer(),e.prototype.next.call(this,r)},t.prototype._subscribe=function(r){this._throwIfClosed(),this._trimBuffer();for(var o=this._innerSubscribe(r),n=this,i=n._infiniteTimeWindow,s=n._buffer,a=s.slice(),c=0;c0?e.prototype.schedule.call(this,r,o):(this.delay=o,this.state=r,this.scheduler.flush(this),this)},t.prototype.execute=function(r,o){return o>0||this.closed?e.prototype.execute.call(this,r,o):this._execute(r,o)},t.prototype.requestAsyncId=function(r,o,n){return n===void 0&&(n=0),n!=null&&n>0||n==null&&this.delay>0?e.prototype.requestAsyncId.call(this,r,o,n):(r.flush(this),0)},t})(St);var Ro=(function(e){ie(t,e);function t(){return e!==null&&e.apply(this,arguments)||this}return t})(Ot);var Dr=new Ro(Po);var Io=(function(e){ie(t,e);function t(r,o){var n=e.call(this,r,o)||this;return n.scheduler=r,n.work=o,n}return t.prototype.requestAsyncId=function(r,o,n){return n===void 0&&(n=0),n!==null&&n>0?e.prototype.requestAsyncId.call(this,r,o,n):(r.actions.push(this),r._scheduled||(r._scheduled=Tt.requestAnimationFrame(function(){return r.flush(void 0)})))},t.prototype.recycleAsyncId=function(r,o,n){var i;if(n===void 0&&(n=0),n!=null?n>0:this.delay>0)return e.prototype.recycleAsyncId.call(this,r,o,n);var s=r.actions;o!=null&&o===r._scheduled&&((i=s[s.length-1])===null||i===void 0?void 0:i.id)!==o&&(Tt.cancelAnimationFrame(o),r._scheduled=void 0)},t})(St);var Fo=(function(e){ie(t,e);function t(){return e!==null&&e.apply(this,arguments)||this}return t.prototype.flush=function(r){this._active=!0;var o;r?o=r.id:(o=this._scheduled,this._scheduled=void 0);var n=this.actions,i;r=r||n.shift();do if(i=r.execute(r.state,r.delay))break;while((r=n[0])&&r.id===o&&n.shift());if(this._active=!1,i){for(;(r=n[0])&&r.id===o&&n.shift();)r.unsubscribe();throw i}},t})(Ot);var ye=new Fo(Io);var y=new F(function(e){return e.complete()});function tr(e){return e&&I(e.schedule)}function Vr(e){return e[e.length-1]}function pt(e){return I(Vr(e))?e.pop():void 0}function Fe(e){return tr(Vr(e))?e.pop():void 0}function rr(e,t){return typeof Vr(e)=="number"?e.pop():t}var Lt=(function(e){return e&&typeof e.length=="number"&&typeof e!="function"});function or(e){return I(e==null?void 0:e.then)}function nr(e){return I(e[wt])}function ir(e){return Symbol.asyncIterator&&I(e==null?void 0:e[Symbol.asyncIterator])}function ar(e){return new TypeError("You provided "+(e!==null&&typeof e=="object"?"an invalid object":"'"+e+"'")+" where a stream was expected. You can provide an Observable, Promise, ReadableStream, Array, AsyncIterable, or Iterable.")}function fa(){return typeof Symbol!="function"||!Symbol.iterator?"@@iterator":Symbol.iterator}var sr=fa();function cr(e){return I(e==null?void 0:e[sr])}function pr(e){return wo(this,arguments,function(){var r,o,n,i;return Gt(this,function(s){switch(s.label){case 0:r=e.getReader(),s.label=1;case 1:s.trys.push([1,,9,10]),s.label=2;case 2:return[4,dt(r.read())];case 3:return o=s.sent(),n=o.value,i=o.done,i?[4,dt(void 0)]:[3,5];case 4:return[2,s.sent()];case 5:return[4,dt(n)];case 6:return[4,s.sent()];case 7:return s.sent(),[3,2];case 8:return[3,10];case 9:return r.releaseLock(),[7];case 10:return[2]}})})}function lr(e){return I(e==null?void 0:e.getReader)}function U(e){if(e instanceof F)return e;if(e!=null){if(nr(e))return ua(e);if(Lt(e))return da(e);if(or(e))return ha(e);if(ir(e))return jo(e);if(cr(e))return ba(e);if(lr(e))return va(e)}throw ar(e)}function ua(e){return new F(function(t){var r=e[wt]();if(I(r.subscribe))return r.subscribe(t);throw new TypeError("Provided object does not correctly implement Symbol.observable")})}function da(e){return new F(function(t){for(var r=0;r=2;return function(o){return o.pipe(e?g(function(n,i){return e(n,i,o)}):be,Ee(1),r?Qe(t):tn(function(){return new fr}))}}function Yr(e){return e<=0?function(){return y}:E(function(t,r){var o=[];t.subscribe(w(r,function(n){o.push(n),e=2,!0))}function le(e){e===void 0&&(e={});var t=e.connector,r=t===void 0?function(){return new T}:t,o=e.resetOnError,n=o===void 0?!0:o,i=e.resetOnComplete,s=i===void 0?!0:i,a=e.resetOnRefCountZero,c=a===void 0?!0:a;return function(p){var l,f,u,d=0,v=!1,S=!1,X=function(){f==null||f.unsubscribe(),f=void 0},re=function(){X(),l=u=void 0,v=S=!1},ee=function(){var k=l;re(),k==null||k.unsubscribe()};return E(function(k,ut){d++,!S&&!v&&X();var je=u=u!=null?u:r();ut.add(function(){d--,d===0&&!S&&!v&&(f=Br(ee,c))}),je.subscribe(ut),!l&&d>0&&(l=new bt({next:function(R){return je.next(R)},error:function(R){S=!0,X(),f=Br(re,n,R),je.error(R)},complete:function(){v=!0,X(),f=Br(re,s),je.complete()}}),U(k).subscribe(l))})(p)}}function Br(e,t){for(var r=[],o=2;oe.next(document)),e}function M(e,t=document){return Array.from(t.querySelectorAll(e))}function j(e,t=document){let r=ue(e,t);if(typeof r=="undefined")throw new ReferenceError(`Missing element: expected "${e}" to be present`);return r}function ue(e,t=document){return t.querySelector(e)||void 0}function Ne(){var e,t,r,o;return(o=(r=(t=(e=document.activeElement)==null?void 0:e.shadowRoot)==null?void 0:t.activeElement)!=null?r:document.activeElement)!=null?o:void 0}var Ra=L(h(document.body,"focusin"),h(document.body,"focusout")).pipe(Ae(1),Q(void 0),m(()=>Ne()||document.body),Z(1));function Ye(e){return Ra.pipe(m(t=>e.contains(t)),Y())}function it(e,t){return H(()=>L(h(e,"mouseenter").pipe(m(()=>!0)),h(e,"mouseleave").pipe(m(()=>!1))).pipe(t?jt(r=>He(+!r*t)):be,Q(e.matches(":hover"))))}function sn(e,t){if(typeof t=="string"||typeof t=="number")e.innerHTML+=t.toString();else if(t instanceof Node)e.appendChild(t);else if(Array.isArray(t))for(let r of t)sn(e,r)}function x(e,t,...r){let o=document.createElement(e);if(t)for(let n of Object.keys(t))typeof t[n]!="undefined"&&(typeof t[n]!="boolean"?o.setAttribute(n,t[n]):o.setAttribute(n,""));for(let n of r)sn(o,n);return o}function br(e){if(e>999){let t=+((e-950)%1e3>99);return`${((e+1e-6)/1e3).toFixed(t)}k`}else return e.toString()}function _t(e){let t=x("script",{src:e});return H(()=>(document.head.appendChild(t),L(h(t,"load"),h(t,"error").pipe(b(()=>Nr(()=>new ReferenceError(`Invalid script: ${e}`))))).pipe(m(()=>{}),A(()=>document.head.removeChild(t)),Ee(1))))}var cn=new T,Ia=H(()=>typeof ResizeObserver=="undefined"?_t("https://unpkg.com/resize-observer-polyfill"):$(void 0)).pipe(m(()=>new ResizeObserver(e=>e.forEach(t=>cn.next(t)))),b(e=>L(tt,$(e)).pipe(A(()=>e.disconnect()))),Z(1));function de(e){return{width:e.offsetWidth,height:e.offsetHeight}}function Le(e){let t=e;for(;t.clientWidth===0&&t.parentElement;)t=t.parentElement;return Ia.pipe(O(r=>r.observe(t)),b(r=>cn.pipe(g(o=>o.target===t),A(()=>r.unobserve(t)))),m(()=>de(e)),Q(de(e)))}function At(e){return{width:e.scrollWidth,height:e.scrollHeight}}function vr(e){let t=e.parentElement;for(;t&&(e.scrollWidth<=t.scrollWidth&&e.scrollHeight<=t.scrollHeight);)t=(e=t).parentElement;return t?e:void 0}function pn(e){let t=[],r=e.parentElement;for(;r;)(e.clientWidth>r.clientWidth||e.clientHeight>r.clientHeight)&&t.push(r),r=(e=r).parentElement;return t.length===0&&t.push(document.documentElement),t}function Be(e){return{x:e.offsetLeft,y:e.offsetTop}}function ln(e){let t=e.getBoundingClientRect();return{x:t.x+window.scrollX,y:t.y+window.scrollY}}function mn(e){return L(h(window,"load"),h(window,"resize")).pipe($e(0,ye),m(()=>Be(e)),Q(Be(e)))}function gr(e){return{x:e.scrollLeft,y:e.scrollTop}}function Ge(e){return L(h(e,"scroll"),h(window,"scroll"),h(window,"resize")).pipe($e(0,ye),m(()=>gr(e)),Q(gr(e)))}var fn=new T,Fa=H(()=>$(new IntersectionObserver(e=>{for(let t of e)fn.next(t)},{threshold:0}))).pipe(b(e=>L(tt,$(e)).pipe(A(()=>e.disconnect()))),Z(1));function mt(e){return Fa.pipe(O(t=>t.observe(e)),b(t=>fn.pipe(g(({target:r})=>r===e),A(()=>t.unobserve(e)),m(({isIntersecting:r})=>r))))}function un(e,t=16){return Ge(e).pipe(m(({y:r})=>{let o=de(e),n=At(e);return r>=n.height-o.height-t}),Y())}var yr={drawer:j("[data-md-toggle=drawer]"),search:j("[data-md-toggle=search]")};function dn(e){return yr[e].checked}function at(e,t){yr[e].checked!==t&&yr[e].click()}function Je(e){let t=yr[e];return h(t,"change").pipe(m(()=>t.checked),Q(t.checked))}function ja(e,t){switch(e.constructor){case HTMLInputElement:return e.type==="radio"?/^Arrow/.test(t):!0;case HTMLSelectElement:case HTMLTextAreaElement:return!0;default:return e.isContentEditable}}function Ua(){return L(h(window,"compositionstart").pipe(m(()=>!0)),h(window,"compositionend").pipe(m(()=>!1))).pipe(Q(!1))}function hn(){let e=h(window,"keydown").pipe(g(t=>!(t.metaKey||t.ctrlKey)),m(t=>({mode:dn("search")?"search":"global",type:t.key,claim(){t.preventDefault(),t.stopPropagation()}})),g(({mode:t,type:r})=>{if(t==="global"){let o=Ne();if(typeof o!="undefined")return!ja(o,r)}return!0}),le());return Ua().pipe(b(t=>t?y:e))}function we(){return new URL(location.href)}function st(e,t=!1){if(V("navigation.instant")&&!t){let r=x("a",{href:e.href});document.body.appendChild(r),r.click(),r.remove()}else location.href=e.href}function bn(){return new T}function vn(){return location.hash.slice(1)}function gn(e){let t=x("a",{href:e});t.addEventListener("click",r=>r.stopPropagation()),t.click()}function Zr(e){return L(h(window,"hashchange"),e).pipe(m(vn),Q(vn()),g(t=>t.length>0),Z(1))}function yn(e){return Zr(e).pipe(m(t=>ue(`[id="${t}"]`)),g(t=>typeof t!="undefined"))}function Wt(e){let t=matchMedia(e);return ur(r=>t.addListener(()=>r(t.matches))).pipe(Q(t.matches))}function xn(){let e=matchMedia("print");return L(h(window,"beforeprint").pipe(m(()=>!0)),h(window,"afterprint").pipe(m(()=>!1))).pipe(Q(e.matches))}function eo(e,t){return e.pipe(b(r=>r?t():y))}function to(e,t){return new F(r=>{let o=new XMLHttpRequest;return o.open("GET",`${e}`),o.responseType="blob",o.addEventListener("load",()=>{o.status>=200&&o.status<300?(r.next(o.response),r.complete()):r.error(new Error(o.statusText))}),o.addEventListener("error",()=>{r.error(new Error("Network error"))}),o.addEventListener("abort",()=>{r.complete()}),typeof(t==null?void 0:t.progress$)!="undefined"&&(o.addEventListener("progress",n=>{var i;if(n.lengthComputable)t.progress$.next(n.loaded/n.total*100);else{let s=(i=o.getResponseHeader("Content-Length"))!=null?i:0;t.progress$.next(n.loaded/+s*100)}}),t.progress$.next(5)),o.send(),()=>o.abort()})}function ze(e,t){return to(e,t).pipe(b(r=>r.text()),m(r=>JSON.parse(r)),Z(1))}function xr(e,t){let r=new DOMParser;return to(e,t).pipe(b(o=>o.text()),m(o=>r.parseFromString(o,"text/html")),Z(1))}function En(e,t){let r=new DOMParser;return to(e,t).pipe(b(o=>o.text()),m(o=>r.parseFromString(o,"text/xml")),Z(1))}function wn(){return{x:Math.max(0,scrollX),y:Math.max(0,scrollY)}}function Tn(){return L(h(window,"scroll",{passive:!0}),h(window,"resize",{passive:!0})).pipe(m(wn),Q(wn()))}function Sn(){return{width:innerWidth,height:innerHeight}}function On(){return h(window,"resize",{passive:!0}).pipe(m(Sn),Q(Sn()))}function Ln(){return z([Tn(),On()]).pipe(m(([e,t])=>({offset:e,size:t})),Z(1))}function Er(e,{viewport$:t,header$:r}){let o=t.pipe(ne("size")),n=z([o,r]).pipe(m(()=>Be(e)));return z([r,t,n]).pipe(m(([{height:i},{offset:s,size:a},{x:c,y:p}])=>({offset:{x:s.x-c,y:s.y-p+i},size:a})))}function Wa(e){return h(e,"message",t=>t.data)}function Da(e){let t=new T;return t.subscribe(r=>e.postMessage(r)),t}function Mn(e,t=new Worker(e)){let r=Wa(t),o=Da(t),n=new T;n.subscribe(o);let i=o.pipe(oe(),ae(!0));return n.pipe(oe(),Ve(r.pipe(W(i))),le())}var Va=j("#__config"),Ct=JSON.parse(Va.textContent);Ct.base=`${new URL(Ct.base,we())}`;function Te(){return Ct}function V(e){return Ct.features.includes(e)}function Me(e,t){return typeof t!="undefined"?Ct.translations[e].replace("#",t.toString()):Ct.translations[e]}function Ce(e,t=document){return j(`[data-md-component=${e}]`,t)}function me(e,t=document){return M(`[data-md-component=${e}]`,t)}function Na(e){let t=j(".md-typeset > :first-child",e);return h(t,"click",{once:!0}).pipe(m(()=>j(".md-typeset",e)),m(r=>({hash:__md_hash(r.innerHTML)})))}function _n(e){if(!V("announce.dismiss")||!e.childElementCount)return y;if(!e.hidden){let t=j(".md-typeset",e);__md_hash(t.innerHTML)===__md_get("__announce")&&(e.hidden=!0)}return H(()=>{let t=new T;return t.subscribe(({hash:r})=>{e.hidden=!0,__md_set("__announce",r)}),Na(e).pipe(O(r=>t.next(r)),A(()=>t.complete()),m(r=>P({ref:e},r)))})}function za(e,{target$:t}){return t.pipe(m(r=>({hidden:r!==e})))}function An(e,t){let r=new T;return r.subscribe(({hidden:o})=>{e.hidden=o}),za(e,t).pipe(O(o=>r.next(o)),A(()=>r.complete()),m(o=>P({ref:e},o)))}function Dt(e,t){return t==="inline"?x("div",{class:"md-tooltip md-tooltip--inline",id:e,role:"tooltip"},x("div",{class:"md-tooltip__inner md-typeset"})):x("div",{class:"md-tooltip",id:e,role:"tooltip"},x("div",{class:"md-tooltip__inner md-typeset"}))}function wr(...e){return x("div",{class:"md-tooltip2",role:"dialog"},x("div",{class:"md-tooltip2__inner md-typeset"},e))}function Cn(...e){return x("div",{class:"md-tooltip2",role:"tooltip"},x("div",{class:"md-tooltip2__inner md-typeset"},e))}function kn(e,t){if(t=t?`${t}_annotation_${e}`:void 0,t){let r=t?`#${t}`:void 0;return x("aside",{class:"md-annotation",tabIndex:0},Dt(t),x("a",{href:r,class:"md-annotation__index",tabIndex:-1},x("span",{"data-md-annotation-id":e})))}else return x("aside",{class:"md-annotation",tabIndex:0},Dt(t),x("span",{class:"md-annotation__index",tabIndex:-1},x("span",{"data-md-annotation-id":e})))}function Hn(e){return x("button",{class:"md-code__button",title:Me("clipboard.copy"),"data-clipboard-target":`#${e} > code`,"data-md-type":"copy"})}function $n(){return x("button",{class:"md-code__button",title:"Toggle line selection","data-md-type":"select"})}function Pn(){return x("nav",{class:"md-code__nav"})}var In=$t(ro());function oo(e,t){let r=t&2,o=t&1,n=Object.keys(e.terms).filter(c=>!e.terms[c]).reduce((c,p)=>[...c,x("del",null,(0,In.default)(p))," "],[]).slice(0,-1),i=Te(),s=new URL(e.location,i.base);V("search.highlight")&&s.searchParams.set("h",Object.entries(e.terms).filter(([,c])=>c).reduce((c,[p])=>`${c} ${p}`.trim(),""));let{tags:a}=Te();return x("a",{href:`${s}`,class:"md-search-result__link",tabIndex:-1},x("article",{class:"md-search-result__article md-typeset","data-md-score":e.score.toFixed(2)},r>0&&x("div",{class:"md-search-result__icon md-icon"}),r>0&&x("h1",null,e.title),r<=0&&x("h2",null,e.title),o>0&&e.text.length>0&&e.text,e.tags&&x("nav",{class:"md-tags"},e.tags.map(c=>{let p=a?c in a?`md-tag-icon md-tag--${a[c]}`:"md-tag-icon":"";return x("span",{class:`md-tag ${p}`},c)})),o>0&&n.length>0&&x("p",{class:"md-search-result__terms"},Me("search.result.term.missing"),": ",...n)))}function Fn(e){let t=e[0].score,r=[...e],o=Te(),n=r.findIndex(l=>!`${new URL(l.location,o.base)}`.includes("#")),[i]=r.splice(n,1),s=r.findIndex(l=>l.scoreoo(l,1)),...c.length?[x("details",{class:"md-search-result__more"},x("summary",{tabIndex:-1},x("div",null,c.length>0&&c.length===1?Me("search.result.more.one"):Me("search.result.more.other",c.length))),...c.map(l=>oo(l,1)))]:[]];return x("li",{class:"md-search-result__item"},p)}function jn(e){return x("ul",{class:"md-source__facts"},Object.entries(e).map(([t,r])=>x("li",{class:`md-source__fact md-source__fact--${t}`},typeof r=="number"?br(r):r)))}function no(e){let t=`tabbed-control tabbed-control--${e}`;return x("div",{class:t,hidden:!0},x("button",{class:"tabbed-button",tabIndex:-1,"aria-hidden":"true"}))}function Un(e){return x("div",{class:"md-typeset__scrollwrap"},x("div",{class:"md-typeset__table"},e))}function Qa(e){var o;let t=Te(),r=new URL(`../${e.version}/`,t.base);return x("li",{class:"md-version__item"},x("a",{href:`${r}`,class:"md-version__link"},e.title,((o=t.version)==null?void 0:o.alias)&&e.aliases.length>0&&x("span",{class:"md-version__alias"},e.aliases[0])))}function Wn(e,t){var o;let r=Te();return e=e.filter(n=>{var i;return!((i=n.properties)!=null&&i.hidden)}),x("div",{class:"md-version"},x("button",{class:"md-version__current","aria-label":Me("select.version")},t.title,((o=r.version)==null?void 0:o.alias)&&t.aliases.length>0&&x("span",{class:"md-version__alias"},t.aliases[0])),x("ul",{class:"md-version__list"},e.map(Qa)))}var Ya=0;function Ba(e,t=250){let r=z([Ye(e),it(e,t)]).pipe(m(([n,i])=>n||i),Y()),o=H(()=>pn(e)).pipe(J(Ge),gt(1),Pe(r),m(()=>ln(e)));return r.pipe(Re(n=>n),b(()=>z([r,o])),m(([n,i])=>({active:n,offset:i})),le())}function Vt(e,t,r=250){let{content$:o,viewport$:n}=t,i=`__tooltip2_${Ya++}`;return H(()=>{let s=new T,a=new jr(!1);s.pipe(oe(),ae(!1)).subscribe(a);let c=a.pipe(jt(l=>He(+!l*250,Dr)),Y(),b(l=>l?o:y),O(l=>l.id=i),le());z([s.pipe(m(({active:l})=>l)),c.pipe(b(l=>it(l,250)),Q(!1))]).pipe(m(l=>l.some(f=>f))).subscribe(a);let p=a.pipe(g(l=>l),te(c,n),m(([l,f,{size:u}])=>{let d=e.getBoundingClientRect(),v=d.width/2;if(f.role==="tooltip")return{x:v,y:8+d.height};if(d.y>=u.height/2){let{height:S}=de(f);return{x:v,y:-16-S}}else return{x:v,y:16+d.height}}));return z([c,s,p]).subscribe(([l,{offset:f},u])=>{l.style.setProperty("--md-tooltip-host-x",`${f.x}px`),l.style.setProperty("--md-tooltip-host-y",`${f.y}px`),l.style.setProperty("--md-tooltip-x",`${u.x}px`),l.style.setProperty("--md-tooltip-y",`${u.y}px`),l.classList.toggle("md-tooltip2--top",u.y<0),l.classList.toggle("md-tooltip2--bottom",u.y>=0)}),a.pipe(g(l=>l),te(c,(l,f)=>f),g(l=>l.role==="tooltip")).subscribe(l=>{let f=de(j(":scope > *",l));l.style.setProperty("--md-tooltip-width",`${f.width}px`),l.style.setProperty("--md-tooltip-tail","0px")}),a.pipe(Y(),xe(ye),te(c)).subscribe(([l,f])=>{f.classList.toggle("md-tooltip2--active",l)}),z([a.pipe(g(l=>l)),c]).subscribe(([l,f])=>{f.role==="dialog"?(e.setAttribute("aria-controls",i),e.setAttribute("aria-haspopup","dialog")):e.setAttribute("aria-describedby",i)}),a.pipe(g(l=>!l)).subscribe(()=>{e.removeAttribute("aria-controls"),e.removeAttribute("aria-describedby"),e.removeAttribute("aria-haspopup")}),Ba(e,r).pipe(O(l=>s.next(l)),A(()=>s.complete()),m(l=>P({ref:e},l)))})}function Xe(e,{viewport$:t},r=document.body){return Vt(e,{content$:new F(o=>{let n=e.title,i=Cn(n);return o.next(i),e.removeAttribute("title"),r.append(i),()=>{i.remove(),e.setAttribute("title",n)}}),viewport$:t},0)}function Ga(e,t){let r=H(()=>z([mn(e),Ge(t)])).pipe(m(([{x:o,y:n},i])=>{let{width:s,height:a}=de(e);return{x:o-i.x+s/2,y:n-i.y+a/2}}));return Ye(e).pipe(b(o=>r.pipe(m(n=>({active:o,offset:n})),Ee(+!o||1/0))))}function Dn(e,t,{target$:r}){let[o,n]=Array.from(e.children);return H(()=>{let i=new T,s=i.pipe(oe(),ae(!0));return i.subscribe({next({offset:a}){e.style.setProperty("--md-tooltip-x",`${a.x}px`),e.style.setProperty("--md-tooltip-y",`${a.y}px`)},complete(){e.style.removeProperty("--md-tooltip-x"),e.style.removeProperty("--md-tooltip-y")}}),mt(e).pipe(W(s)).subscribe(a=>{e.toggleAttribute("data-md-visible",a)}),L(i.pipe(g(({active:a})=>a)),i.pipe(Ae(250),g(({active:a})=>!a))).subscribe({next({active:a}){a?e.prepend(o):o.remove()},complete(){e.prepend(o)}}),i.pipe($e(16,ye)).subscribe(({active:a})=>{o.classList.toggle("md-tooltip--active",a)}),i.pipe(gt(125,ye),g(()=>!!e.offsetParent),m(()=>e.offsetParent.getBoundingClientRect()),m(({x:a})=>a)).subscribe({next(a){a?e.style.setProperty("--md-tooltip-0",`${-a}px`):e.style.removeProperty("--md-tooltip-0")},complete(){e.style.removeProperty("--md-tooltip-0")}}),h(n,"click").pipe(W(s),g(a=>!(a.metaKey||a.ctrlKey))).subscribe(a=>{a.stopPropagation(),a.preventDefault()}),h(n,"mousedown").pipe(W(s),te(i)).subscribe(([a,{active:c}])=>{var p;if(a.button!==0||a.metaKey||a.ctrlKey)a.preventDefault();else if(c){a.preventDefault();let l=e.parentElement.closest(".md-annotation");l instanceof HTMLElement?l.focus():(p=Ne())==null||p.blur()}}),r.pipe(W(s),g(a=>a===o),nt(125)).subscribe(()=>e.focus()),Ga(e,t).pipe(O(a=>i.next(a)),A(()=>i.complete()),m(a=>P({ref:e},a)))})}function Ja(e){let t=Te();if(e.tagName!=="CODE")return[e];let r=[".c",".c1",".cm"];if(t.annotate&&typeof t.annotate=="object"){let o=e.closest("[class|=language]");if(o)for(let n of Array.from(o.classList)){if(!n.startsWith("language-"))continue;let[,i]=n.split("-");i in t.annotate&&r.push(...t.annotate[i])}}return M(r.join(", "),e)}function Xa(e){let t=[];for(let r of Ja(e)){let o=[],n=document.createNodeIterator(r,NodeFilter.SHOW_TEXT);for(let i=n.nextNode();i;i=n.nextNode())o.push(i);for(let i of o){let s;for(;s=/(\(\d+\))(!)?/.exec(i.textContent);){let[,a,c]=s;if(typeof c=="undefined"){let p=i.splitText(s.index);i=p.splitText(a.length),t.push(p)}else{i.textContent=a,t.push(i);break}}}}return t}function Vn(e,t){t.append(...Array.from(e.childNodes))}function Tr(e,t,{target$:r,print$:o}){let n=t.closest("[id]"),i=n==null?void 0:n.id,s=new Map;for(let a of Xa(t)){let[,c]=a.textContent.match(/\((\d+)\)/);ue(`:scope > li:nth-child(${c})`,e)&&(s.set(c,kn(c,i)),a.replaceWith(s.get(c)))}return s.size===0?y:H(()=>{let a=new T,c=a.pipe(oe(),ae(!0)),p=[];for(let[l,f]of s)p.push([j(".md-typeset",f),j(`:scope > li:nth-child(${l})`,e)]);return o.pipe(W(c)).subscribe(l=>{e.hidden=!l,e.classList.toggle("md-annotation-list",l);for(let[f,u]of p)l?Vn(f,u):Vn(u,f)}),L(...[...s].map(([,l])=>Dn(l,t,{target$:r}))).pipe(A(()=>a.complete()),le())})}function Nn(e){if(e.nextElementSibling){let t=e.nextElementSibling;if(t.tagName==="OL")return t;if(t.tagName==="P"&&!t.children.length)return Nn(t)}}function zn(e,t){return H(()=>{let r=Nn(e);return typeof r!="undefined"?Tr(r,e,t):y})}var Kn=$t(ao());var Za=0,qn=L(h(window,"keydown").pipe(m(()=>!0)),L(h(window,"keyup"),h(window,"contextmenu")).pipe(m(()=>!1))).pipe(Q(!1),Z(1));function Qn(e){if(e.nextElementSibling){let t=e.nextElementSibling;if(t.tagName==="OL")return t;if(t.tagName==="P"&&!t.children.length)return Qn(t)}}function es(e){return Le(e).pipe(m(({width:t})=>({scrollable:At(e).width>t})),ne("scrollable"))}function Yn(e,t){let{matches:r}=matchMedia("(hover)"),o=H(()=>{let n=new T,i=n.pipe(Yr(1));n.subscribe(({scrollable:d})=>{d&&r?e.setAttribute("tabindex","0"):e.removeAttribute("tabindex")});let s=[],a=e.closest("pre"),c=a.closest("[id]"),p=c?c.id:Za++;a.id=`__code_${p}`;let l=[],f=e.closest(".highlight");if(f instanceof HTMLElement){let d=Qn(f);if(typeof d!="undefined"&&(f.classList.contains("annotate")||V("content.code.annotate"))){let v=Tr(d,e,t);l.push(Le(f).pipe(W(i),m(({width:S,height:X})=>S&&X),Y(),b(S=>S?v:y)))}}let u=M(":scope > span[id]",e);if(u.length&&(e.classList.add("md-code__content"),e.closest(".select")||V("content.code.select")&&!e.closest(".no-select"))){let d=+u[0].id.split("-").pop(),v=$n();s.push(v),V("content.tooltips")&&l.push(Xe(v,{viewport$}));let S=h(v,"click").pipe(Ut(R=>!R,!1),O(()=>v.blur()),le());S.subscribe(R=>{v.classList.toggle("md-code__button--active",R)});let X=fe(u).pipe(J(R=>it(R).pipe(m(se=>[R,se]))));S.pipe(b(R=>R?X:y)).subscribe(([R,se])=>{let ce=ue(".hll.select",R);if(ce&&!se)ce.replaceWith(...Array.from(ce.childNodes));else if(!ce&&se){let he=document.createElement("span");he.className="hll select",he.append(...Array.from(R.childNodes).slice(1)),R.append(he)}});let re=fe(u).pipe(J(R=>h(R,"mousedown").pipe(O(se=>se.preventDefault()),m(()=>R)))),ee=S.pipe(b(R=>R?re:y),te(qn),m(([R,se])=>{var he;let ce=u.indexOf(R)+d;if(se===!1)return[ce,ce];{let Se=M(".hll",e).map(Ue=>u.indexOf(Ue.parentElement)+d);return(he=window.getSelection())==null||he.removeAllRanges(),[Math.min(ce,...Se),Math.max(ce,...Se)]}})),k=Zr(y).pipe(g(R=>R.startsWith(`__codelineno-${p}-`)));k.subscribe(R=>{let[,,se]=R.split("-"),ce=se.split(":").map(Se=>+Se-d+1);ce.length===1&&ce.push(ce[0]);for(let Se of M(".hll:not(.select)",e))Se.replaceWith(...Array.from(Se.childNodes));let he=u.slice(ce[0]-1,ce[1]);for(let Se of he){let Ue=document.createElement("span");Ue.className="hll",Ue.append(...Array.from(Se.childNodes).slice(1)),Se.append(Ue)}}),k.pipe(Ee(1),xe(pe)).subscribe(R=>{if(R.includes(":")){let se=document.getElementById(R.split(":")[0]);se&&setTimeout(()=>{let ce=se,he=-64;for(;ce!==document.body;)he+=ce.offsetTop,ce=ce.offsetParent;window.scrollTo({top:he})},1)}});let je=fe(M('a[href^="#__codelineno"]',f)).pipe(J(R=>h(R,"click").pipe(O(se=>se.preventDefault()),m(()=>R)))).pipe(W(i),te(qn),m(([R,se])=>{let he=+j(`[id="${R.hash.slice(1)}"]`).parentElement.id.split("-").pop();if(se===!1)return[he,he];{let Se=M(".hll",e).map(Ue=>+Ue.parentElement.id.split("-").pop());return[Math.min(he,...Se),Math.max(he,...Se)]}}));L(ee,je).subscribe(R=>{let se=`#__codelineno-${p}-`;R[0]===R[1]?se+=R[0]:se+=`${R[0]}:${R[1]}`,history.replaceState({},"",se),window.dispatchEvent(new HashChangeEvent("hashchange",{newURL:window.location.origin+window.location.pathname+se,oldURL:window.location.href}))})}if(Kn.default.isSupported()&&(e.closest(".copy")||V("content.code.copy")&&!e.closest(".no-copy"))){let d=Hn(a.id);s.push(d),V("content.tooltips")&&l.push(Xe(d,{viewport$}))}if(s.length){let d=Pn();d.append(...s),a.insertBefore(d,e)}return es(e).pipe(O(d=>n.next(d)),A(()=>n.complete()),m(d=>P({ref:e},d)),Ve(L(...l).pipe(W(i))))});return V("content.lazy")?mt(e).pipe(g(n=>n),Ee(1),b(()=>o)):o}function ts(e,{target$:t,print$:r}){let o=!0;return L(t.pipe(m(n=>n.closest("details:not([open])")),g(n=>e===n),m(()=>({action:"open",reveal:!0}))),r.pipe(g(n=>n||!o),O(()=>o=e.open),m(n=>({action:n?"open":"close"}))))}function Bn(e,t){return H(()=>{let r=new T;return r.subscribe(({action:o,reveal:n})=>{e.toggleAttribute("open",o==="open"),n&&e.scrollIntoView()}),ts(e,t).pipe(O(o=>r.next(o)),A(()=>r.complete()),m(o=>P({ref:e},o)))})}var Gn=0;function rs(e){let t=document.createElement("h3");t.innerHTML=e.innerHTML;let r=[t],o=e.nextElementSibling;for(;o&&!(o instanceof HTMLHeadingElement);)r.push(o),o=o.nextElementSibling;return r}function os(e,t){for(let r of M("[href], [src]",e))for(let o of["href","src"]){let n=r.getAttribute(o);if(n&&!/^(?:[a-z]+:)?\/\//i.test(n)){r[o]=new URL(r.getAttribute(o),t).toString();break}}for(let r of M("[name^=__], [for]",e))for(let o of["id","for","name"]){let n=r.getAttribute(o);n&&r.setAttribute(o,`${n}$preview_${Gn}`)}return Gn++,$(e)}function Jn(e,t){let{sitemap$:r}=t;if(!(e instanceof HTMLAnchorElement))return y;if(!(V("navigation.instant.preview")||e.hasAttribute("data-preview")))return y;e.removeAttribute("title");let o=z([Ye(e),it(e)]).pipe(m(([i,s])=>i||s),Y(),g(i=>i));return rt([r,o]).pipe(b(([i])=>{let s=new URL(e.href);return s.search=s.hash="",i.has(`${s}`)?$(s):y}),b(i=>xr(i).pipe(b(s=>os(s,i)))),b(i=>{let s=e.hash?`article [id="${e.hash.slice(1)}"]`:"article h1",a=ue(s,i);return typeof a=="undefined"?y:$(rs(a))})).pipe(b(i=>{let s=new F(a=>{let c=wr(...i);return a.next(c),document.body.append(c),()=>c.remove()});return Vt(e,P({content$:s},t))}))}var Xn=".node circle,.node ellipse,.node path,.node polygon,.node rect{fill:var(--md-mermaid-node-bg-color);stroke:var(--md-mermaid-node-fg-color)}marker{fill:var(--md-mermaid-edge-color)!important}.edgeLabel .label rect{fill:#0000}.flowchartTitleText{fill:var(--md-mermaid-label-fg-color)}.label{color:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}.label foreignObject{line-height:normal;overflow:visible}.label div .edgeLabel{color:var(--md-mermaid-label-fg-color)}.edgeLabel,.edgeLabel p,.label div .edgeLabel{background-color:var(--md-mermaid-label-bg-color)}.edgeLabel,.edgeLabel p{fill:var(--md-mermaid-label-bg-color);color:var(--md-mermaid-edge-color)}.edgePath .path,.flowchart-link{stroke:var(--md-mermaid-edge-color)}.edgePath .arrowheadPath{fill:var(--md-mermaid-edge-color);stroke:none}.cluster rect{fill:var(--md-default-fg-color--lightest);stroke:var(--md-default-fg-color--lighter)}.cluster span{color:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}g #flowchart-circleEnd,g #flowchart-circleStart,g #flowchart-crossEnd,g #flowchart-crossStart,g #flowchart-pointEnd,g #flowchart-pointStart{stroke:none}.classDiagramTitleText{fill:var(--md-mermaid-label-fg-color)}g.classGroup line,g.classGroup rect{fill:var(--md-mermaid-node-bg-color);stroke:var(--md-mermaid-node-fg-color)}g.classGroup text{fill:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}.classLabel .box{fill:var(--md-mermaid-label-bg-color);background-color:var(--md-mermaid-label-bg-color);opacity:1}.classLabel .label{fill:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}.node .divider{stroke:var(--md-mermaid-node-fg-color)}.relation{stroke:var(--md-mermaid-edge-color)}.cardinality{fill:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}.cardinality text{fill:inherit!important}defs marker.marker.composition.class path,defs marker.marker.dependency.class path,defs marker.marker.extension.class path{fill:var(--md-mermaid-edge-color)!important;stroke:var(--md-mermaid-edge-color)!important}defs marker.marker.aggregation.class path{fill:var(--md-mermaid-label-bg-color)!important;stroke:var(--md-mermaid-edge-color)!important}.statediagramTitleText{fill:var(--md-mermaid-label-fg-color)}g.stateGroup rect{fill:var(--md-mermaid-node-bg-color);stroke:var(--md-mermaid-node-fg-color)}g.stateGroup .state-title{fill:var(--md-mermaid-label-fg-color)!important;font-family:var(--md-mermaid-font-family)}g.stateGroup .composit{fill:var(--md-mermaid-label-bg-color)}.nodeLabel,.nodeLabel p{color:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}a .nodeLabel{text-decoration:underline}.node circle.state-end,.node circle.state-start,.start-state{fill:var(--md-mermaid-edge-color);stroke:none}.end-state-inner,.end-state-outer{fill:var(--md-mermaid-edge-color)}.end-state-inner,.node circle.state-end{stroke:var(--md-mermaid-label-bg-color)}.transition{stroke:var(--md-mermaid-edge-color)}[id^=state-fork] rect,[id^=state-join] rect{fill:var(--md-mermaid-edge-color)!important;stroke:none!important}.statediagram-cluster.statediagram-cluster .inner{fill:var(--md-default-bg-color)}.statediagram-cluster rect{fill:var(--md-mermaid-node-bg-color);stroke:var(--md-mermaid-node-fg-color)}.statediagram-state rect.divider{fill:var(--md-default-fg-color--lightest);stroke:var(--md-default-fg-color--lighter)}defs #statediagram-barbEnd{stroke:var(--md-mermaid-edge-color)}[id^=entity] path,[id^=entity] rect{fill:var(--md-default-bg-color)}.relationshipLine{stroke:var(--md-mermaid-edge-color)}defs .marker.oneOrMore.er *,defs .marker.onlyOne.er *,defs .marker.zeroOrMore.er *,defs .marker.zeroOrOne.er *{stroke:var(--md-mermaid-edge-color)!important}text:not([class]):last-child{fill:var(--md-mermaid-label-fg-color)}.actor{fill:var(--md-mermaid-sequence-actor-bg-color);stroke:var(--md-mermaid-sequence-actor-border-color)}text.actor>tspan{fill:var(--md-mermaid-sequence-actor-fg-color);font-family:var(--md-mermaid-font-family)}line{stroke:var(--md-mermaid-sequence-actor-line-color)}.actor-man circle,.actor-man line{fill:var(--md-mermaid-sequence-actorman-bg-color);stroke:var(--md-mermaid-sequence-actorman-line-color)}.messageLine0,.messageLine1{stroke:var(--md-mermaid-sequence-message-line-color)}.note{fill:var(--md-mermaid-sequence-note-bg-color);stroke:var(--md-mermaid-sequence-note-border-color)}.loopText,.loopText>tspan,.messageText,.noteText>tspan{stroke:none;font-family:var(--md-mermaid-font-family)!important}.messageText{fill:var(--md-mermaid-sequence-message-fg-color)}.loopText,.loopText>tspan{fill:var(--md-mermaid-sequence-loop-fg-color)}.noteText>tspan{fill:var(--md-mermaid-sequence-note-fg-color)}#arrowhead path{fill:var(--md-mermaid-sequence-message-line-color);stroke:none}.loopLine{fill:var(--md-mermaid-sequence-loop-bg-color);stroke:var(--md-mermaid-sequence-loop-border-color)}.labelBox{fill:var(--md-mermaid-sequence-label-bg-color);stroke:none}.labelText,.labelText>span{fill:var(--md-mermaid-sequence-label-fg-color);font-family:var(--md-mermaid-font-family)}.sequenceNumber{fill:var(--md-mermaid-sequence-number-fg-color)}rect.rect{fill:var(--md-mermaid-sequence-box-bg-color);stroke:none}rect.rect+text.text{fill:var(--md-mermaid-sequence-box-fg-color)}defs #sequencenumber{fill:var(--md-mermaid-sequence-number-bg-color)!important}";var so,is=0;function as(){return typeof mermaid=="undefined"||mermaid instanceof Element?_t("https://unpkg.com/mermaid@11/dist/mermaid.min.js"):$(void 0)}function Zn(e){return e.classList.remove("mermaid"),so||(so=as().pipe(O(()=>mermaid.initialize({startOnLoad:!1,themeCSS:Xn,sequence:{actorFontSize:"16px",messageFontSize:"16px",noteFontSize:"16px"}})),m(()=>{}),Z(1))),so.subscribe(()=>go(null,null,function*(){e.classList.add("mermaid");let t=`__mermaid_${is++}`,r=x("div",{class:"mermaid"}),o=e.textContent,{svg:n,fn:i}=yield mermaid.render(t,o),s=r.attachShadow({mode:"closed"});s.innerHTML=n,e.replaceWith(r),i==null||i(s)})),so.pipe(m(()=>({ref:e})))}var ei=x("table");function ti(e){return e.replaceWith(ei),ei.replaceWith(Un(e)),$({ref:e})}function ss(e){let t=e.find(r=>r.checked)||e[0];return L(...e.map(r=>h(r,"change").pipe(m(()=>j(`label[for="${r.id}"]`))))).pipe(Q(j(`label[for="${t.id}"]`)),m(r=>({active:r})))}function ri(e,{viewport$:t,target$:r}){let o=j(".tabbed-labels",e),n=M(":scope > input",e),i=no("prev");e.append(i);let s=no("next");return e.append(s),H(()=>{let a=new T,c=a.pipe(oe(),ae(!0));z([a,Le(e),mt(e)]).pipe(W(c),$e(1,ye)).subscribe({next([{active:p},l]){let f=Be(p),{width:u}=de(p);e.style.setProperty("--md-indicator-x",`${f.x}px`),e.style.setProperty("--md-indicator-width",`${u}px`);let d=gr(o);(f.xd.x+l.width)&&o.scrollTo({left:Math.max(0,f.x-16),behavior:"smooth"})},complete(){e.style.removeProperty("--md-indicator-x"),e.style.removeProperty("--md-indicator-width")}}),z([Ge(o),Le(o)]).pipe(W(c)).subscribe(([p,l])=>{let f=At(o);i.hidden=p.x<16,s.hidden=p.x>f.width-l.width-16}),L(h(i,"click").pipe(m(()=>-1)),h(s,"click").pipe(m(()=>1))).pipe(W(c)).subscribe(p=>{let{width:l}=de(o);o.scrollBy({left:l*p,behavior:"smooth"})}),r.pipe(W(c),g(p=>n.includes(p))).subscribe(p=>p.click()),o.classList.add("tabbed-labels--linked");for(let p of n){let l=j(`label[for="${p.id}"]`);l.replaceChildren(x("a",{href:`#${l.htmlFor}`,tabIndex:-1},...Array.from(l.childNodes))),h(l.firstElementChild,"click").pipe(W(c),g(f=>!(f.metaKey||f.ctrlKey)),O(f=>{f.preventDefault(),f.stopPropagation()})).subscribe(()=>{history.replaceState({},"",`#${l.htmlFor}`),l.click()})}return V("content.tabs.link")&&a.pipe(Ie(1),te(t)).subscribe(([{active:p},{offset:l}])=>{let f=p.innerText.trim();if(p.hasAttribute("data-md-switching"))p.removeAttribute("data-md-switching");else{let u=e.offsetTop-l.y;for(let v of M("[data-tabs]"))for(let S of M(":scope > input",v)){let X=j(`label[for="${S.id}"]`);if(X!==p&&X.innerText.trim()===f){X.setAttribute("data-md-switching",""),S.click();break}}window.scrollTo({top:e.offsetTop-u});let d=__md_get("__tabs")||[];__md_set("__tabs",[...new Set([f,...d])])}}),a.pipe(W(c)).subscribe(()=>{for(let p of M("audio, video",e))p.offsetWidth&&p.autoplay?p.play().catch(()=>{}):p.pause()}),ss(n).pipe(O(p=>a.next(p)),A(()=>a.complete()),m(p=>P({ref:e},p)))}).pipe(et(pe))}function oi(e,t){let{viewport$:r,target$:o,print$:n}=t;return L(...M(".annotate:not(.highlight)",e).map(i=>zn(i,{target$:o,print$:n})),...M("pre:not(.mermaid) > code",e).map(i=>Yn(i,{target$:o,print$:n})),...M("a",e).map(i=>Jn(i,t)),...M("pre.mermaid",e).map(i=>Zn(i)),...M("table:not([class])",e).map(i=>ti(i)),...M("details",e).map(i=>Bn(i,{target$:o,print$:n})),...M("[data-tabs]",e).map(i=>ri(i,{viewport$:r,target$:o})),...M("[title]:not([data-preview])",e).filter(()=>V("content.tooltips")).map(i=>Xe(i,{viewport$:r})),...M(".footnote-ref",e).filter(()=>V("content.footnote.tooltips")).map(i=>Vt(i,{content$:new F(s=>{let a=new URL(i.href).hash.slice(1),c=Array.from(document.getElementById(a).cloneNode(!0).children),p=wr(...c);return s.next(p),document.body.append(p),()=>p.remove()}),viewport$:r})))}function cs(e,{alert$:t}){return t.pipe(b(r=>L($(!0),$(!1).pipe(nt(2e3))).pipe(m(o=>({message:r,active:o})))))}function ni(e,t){let r=j(".md-typeset",e);return H(()=>{let o=new T;return o.subscribe(({message:n,active:i})=>{e.classList.toggle("md-dialog--active",i),r.textContent=n}),cs(e,t).pipe(O(n=>o.next(n)),A(()=>o.complete()),m(n=>P({ref:e},n)))})}var ps=0;function ls(e,t){document.body.append(e);let{width:r}=de(e);e.style.setProperty("--md-tooltip-width",`${r}px`),e.remove();let o=vr(t),n=typeof o!="undefined"?Ge(o):$({x:0,y:0}),i=L(Ye(t),it(t)).pipe(Y());return z([i,n]).pipe(m(([s,a])=>{let{x:c,y:p}=Be(t),l=de(t),f=t.closest("table");return f&&t.parentElement&&(c+=f.offsetLeft+t.parentElement.offsetLeft,p+=f.offsetTop+t.parentElement.offsetTop),{active:s,offset:{x:c-a.x+l.width/2-r/2,y:p-a.y+l.height+8}}}))}function ii(e){let t=e.title;if(!t.length)return y;let r=`__tooltip_${ps++}`,o=Dt(r,"inline"),n=j(".md-typeset",o);return n.innerHTML=t,H(()=>{let i=new T;return i.subscribe({next({offset:s}){o.style.setProperty("--md-tooltip-x",`${s.x}px`),o.style.setProperty("--md-tooltip-y",`${s.y}px`)},complete(){o.style.removeProperty("--md-tooltip-x"),o.style.removeProperty("--md-tooltip-y")}}),L(i.pipe(g(({active:s})=>s)),i.pipe(Ae(250),g(({active:s})=>!s))).subscribe({next({active:s}){s?(e.insertAdjacentElement("afterend",o),e.setAttribute("aria-describedby",r),e.removeAttribute("title")):(o.remove(),e.removeAttribute("aria-describedby"),e.setAttribute("title",t))},complete(){o.remove(),e.removeAttribute("aria-describedby"),e.setAttribute("title",t)}}),i.pipe($e(16,ye)).subscribe(({active:s})=>{o.classList.toggle("md-tooltip--active",s)}),i.pipe(gt(125,ye),g(()=>!!e.offsetParent),m(()=>e.offsetParent.getBoundingClientRect()),m(({x:s})=>s)).subscribe({next(s){s?o.style.setProperty("--md-tooltip-0",`${-s}px`):o.style.removeProperty("--md-tooltip-0")},complete(){o.style.removeProperty("--md-tooltip-0")}}),ls(o,e).pipe(O(s=>i.next(s)),A(()=>i.complete()),m(s=>P({ref:e},s)))}).pipe(et(pe))}function ms({viewport$:e}){if(!V("header.autohide"))return $(!1);let t=e.pipe(m(({offset:{y:n}})=>n),ot(2,1),m(([n,i])=>[nMath.abs(i-n.y)>100),m(([,[n]])=>n),Y()),o=Je("search");return z([e,o]).pipe(m(([{offset:n},i])=>n.y>400&&!i),Y(),b(n=>n?r:$(!1)),Q(!1))}function ai(e,t){return H(()=>z([Le(e),ms(t)])).pipe(m(([{height:r},o])=>({height:r,hidden:o})),Y((r,o)=>r.height===o.height&&r.hidden===o.hidden),Z(1))}function si(e,{header$:t,main$:r}){return H(()=>{let o=new T,n=o.pipe(oe(),ae(!0));o.pipe(ne("active"),Pe(t)).subscribe(([{active:s},{hidden:a}])=>{e.classList.toggle("md-header--shadow",s&&!a),e.hidden=a});let i=fe(M("[title]",e)).pipe(g(()=>V("content.tooltips")),J(s=>ii(s)));return r.subscribe(o),t.pipe(W(n),m(s=>P({ref:e},s)),Ve(i.pipe(W(n))))})}function fs(e,{viewport$:t,header$:r}){return Er(e,{viewport$:t,header$:r}).pipe(m(({offset:{y:o}})=>{let{height:n}=de(e);return{active:n>0&&o>=n}}),ne("active"))}function ci(e,t){return H(()=>{let r=new T;r.subscribe({next({active:n}){e.classList.toggle("md-header__title--active",n)},complete(){e.classList.remove("md-header__title--active")}});let o=ue(".md-content h1");return typeof o=="undefined"?y:fs(o,t).pipe(O(n=>r.next(n)),A(()=>r.complete()),m(n=>P({ref:e},n)))})}function pi(e,{viewport$:t,header$:r}){let o=r.pipe(m(({height:i})=>i),Y()),n=o.pipe(b(()=>Le(e).pipe(m(({height:i})=>({top:e.offsetTop,bottom:e.offsetTop+i})),ne("bottom"))));return z([o,n,t]).pipe(m(([i,{top:s,bottom:a},{offset:{y:c},size:{height:p}}])=>(p=Math.max(0,p-Math.max(0,s-c,i)-Math.max(0,p+c-a)),{offset:s-i,height:p,active:s-i<=c})),Y((i,s)=>i.offset===s.offset&&i.height===s.height&&i.active===s.active))}function us(e){let t=__md_get("__palette")||{index:e.findIndex(o=>matchMedia(o.getAttribute("data-md-color-media")).matches)},r=Math.max(0,Math.min(t.index,e.length-1));return $(...e).pipe(J(o=>h(o,"change").pipe(m(()=>o))),Q(e[r]),m(o=>({index:e.indexOf(o),color:{media:o.getAttribute("data-md-color-media"),scheme:o.getAttribute("data-md-color-scheme"),primary:o.getAttribute("data-md-color-primary"),accent:o.getAttribute("data-md-color-accent")}})),Z(1))}function li(e){let t=M("input",e),r=x("meta",{name:"theme-color"});document.head.appendChild(r);let o=x("meta",{name:"color-scheme"});document.head.appendChild(o);let n=Wt("(prefers-color-scheme: light)");return H(()=>{let i=new T;return i.subscribe(s=>{if(document.body.setAttribute("data-md-color-switching",""),s.color.media==="(prefers-color-scheme)"){let a=matchMedia("(prefers-color-scheme: light)"),c=document.querySelector(a.matches?"[data-md-color-media='(prefers-color-scheme: light)']":"[data-md-color-media='(prefers-color-scheme: dark)']");s.color.scheme=c.getAttribute("data-md-color-scheme"),s.color.primary=c.getAttribute("data-md-color-primary"),s.color.accent=c.getAttribute("data-md-color-accent")}for(let[a,c]of Object.entries(s.color))document.body.setAttribute(`data-md-color-${a}`,c);for(let a=0;as.key==="Enter"),te(i,(s,a)=>a)).subscribe(({index:s})=>{s=(s+1)%t.length,t[s].click(),t[s].focus()}),i.pipe(m(()=>{let s=Ce("header"),a=window.getComputedStyle(s);return o.content=a.colorScheme,a.backgroundColor.match(/\d+/g).map(c=>(+c).toString(16).padStart(2,"0")).join("")})).subscribe(s=>r.content=`#${s}`),i.pipe(xe(pe)).subscribe(()=>{document.body.removeAttribute("data-md-color-switching")}),us(t).pipe(W(n.pipe(Ie(1))),vt(),O(s=>i.next(s)),A(()=>i.complete()),m(s=>P({ref:e},s)))})}function mi(e,{progress$:t}){return H(()=>{let r=new T;return r.subscribe(({value:o})=>{e.style.setProperty("--md-progress-value",`${o}`)}),t.pipe(O(o=>r.next({value:o})),A(()=>r.complete()),m(o=>({ref:e,value:o})))})}function fi(e,t){return e.protocol=t.protocol,e.hostname=t.hostname,e}function ds(e,t){let r=new Map;for(let o of M("url",e)){let n=j("loc",o),i=[fi(new URL(n.textContent),t)];r.set(`${i[0]}`,i);for(let s of M("[rel=alternate]",o)){let a=s.getAttribute("href");a!=null&&i.push(fi(new URL(a),t))}}return r}function kt(e){return En(new URL("sitemap.xml",e)).pipe(m(t=>ds(t,new URL(e))),ve(()=>$(new Map)),le())}function ui({document$:e}){let t=new Map;e.pipe(b(()=>M("link[rel=alternate]")),m(r=>new URL(r.href)),g(r=>!t.has(r.toString())),J(r=>kt(r).pipe(m(o=>[r,o]),ve(()=>y)))).subscribe(([r,o])=>{t.set(r.toString().replace(/\/$/,""),o)}),h(document.body,"click").pipe(g(r=>!r.metaKey&&!r.ctrlKey),b(r=>{if(r.target instanceof Element){let o=r.target.closest("a");if(o&&!o.target){let n=[...t].find(([f])=>o.href.startsWith(`${f}/`));if(typeof n=="undefined")return y;let[i,s]=n,a=we();if(a.href.startsWith(i))return y;let c=Te(),p=a.href.replace(c.base,"");p=`${i}/${p}`;let l=s.has(p.split("#")[0])?new URL(p,c.base):new URL(i);return r.preventDefault(),$(l)}}return y})).subscribe(r=>st(r,!0))}var co=$t(ao());function hs(e){e.setAttribute("data-md-copying","");let t=e.closest("[data-copy]"),r=t?t.getAttribute("data-copy"):e.innerText;return e.removeAttribute("data-md-copying"),r.trimEnd()}function di({alert$:e}){co.default.isSupported()&&new F(t=>{new co.default("[data-clipboard-target], [data-clipboard-text]",{text:r=>r.getAttribute("data-clipboard-text")||hs(j(r.getAttribute("data-clipboard-target")))}).on("success",r=>t.next(r))}).pipe(O(t=>{t.trigger.focus()}),m(()=>Me("clipboard.copied"))).subscribe(e)}function hi(e,t){if(!(e.target instanceof Element))return y;let r=e.target.closest("a");if(r===null)return y;if(r.target||e.metaKey||e.ctrlKey)return y;let o=new URL(r.href);return o.search=o.hash="",t.has(`${o}`)?(e.preventDefault(),$(r)):y}function bi(e){let t=new Map;for(let r of M(":scope > *",e.head))t.set(r.outerHTML,r);return t}function vi(e){for(let t of M("[href], [src]",e))for(let r of["href","src"]){let o=t.getAttribute(r);if(o&&!/^(?:[a-z]+:)?\/\//i.test(o)){t[r]=t[r];break}}return $(e)}function bs(e){for(let o of["[data-md-component=announce]","[data-md-component=container]","[data-md-component=header-topic]","[data-md-component=outdated]","[data-md-component=logo]","[data-md-component=skip]",...V("navigation.tabs.sticky")?["[data-md-component=tabs]"]:[]]){let n=ue(o),i=ue(o,e);typeof n!="undefined"&&typeof i!="undefined"&&n.replaceWith(i)}let t=bi(document);for(let[o,n]of bi(e))t.has(o)?t.delete(o):document.head.appendChild(n);for(let o of t.values()){let n=o.getAttribute("name");n!=="theme-color"&&n!=="color-scheme"&&o.remove()}let r=Ce("container");return Ke(M("script",r)).pipe(b(o=>{let n=e.createElement("script");if(o.src){for(let i of o.getAttributeNames())n.setAttribute(i,o.getAttribute(i));return o.replaceWith(n),new F(i=>{n.onload=()=>i.complete()})}else return n.textContent=o.textContent,o.replaceWith(n),y}),oe(),ae(document))}function gi({sitemap$:e,location$:t,viewport$:r,progress$:o}){if(location.protocol==="file:")return y;$(document).subscribe(vi);let n=h(document.body,"click").pipe(Pe(e),b(([a,c])=>hi(a,c)),m(({href:a})=>new URL(a)),le()),i=h(window,"popstate").pipe(m(we),le());n.pipe(te(r)).subscribe(([a,{offset:c}])=>{history.replaceState(c,""),history.pushState(null,"",a)}),L(n,i).subscribe(t);let s=t.pipe(ne("pathname"),b(a=>xr(a,{progress$:o}).pipe(ve(()=>(st(a,!0),y)))),b(vi),b(bs),le());return L(s.pipe(te(t,(a,c)=>c)),s.pipe(b(()=>t),ne("hash")),t.pipe(Y((a,c)=>a.pathname===c.pathname&&a.hash===c.hash),b(()=>n),O(()=>history.back()))).subscribe(a=>{var c,p;history.state!==null||!a.hash?window.scrollTo(0,(p=(c=history.state)==null?void 0:c.y)!=null?p:0):(history.scrollRestoration="auto",gn(a.hash),history.scrollRestoration="manual")}),t.subscribe(()=>{history.scrollRestoration="manual"}),h(window,"beforeunload").subscribe(()=>{history.scrollRestoration="auto"}),r.pipe(ne("offset"),Ae(100)).subscribe(({offset:a})=>{history.replaceState(a,"")}),V("navigation.instant.prefetch")&&L(h(document.body,"mousemove"),h(document.body,"focusin")).pipe(Pe(e),b(([a,c])=>hi(a,c)),Ae(25),Qr(({href:a})=>a),hr(a=>{let c=document.createElement("link");return c.rel="prefetch",c.href=a.toString(),document.head.appendChild(c),h(c,"load").pipe(m(()=>c),Ee(1))})).subscribe(a=>a.remove()),s}var yi=$t(ro());function xi(e){let t=e.separator.split("|").map(n=>n.replace(/(\(\?[!=<][^)]+\))/g,"").length===0?"\uFFFD":n).join("|"),r=new RegExp(t,"img"),o=(n,i,s)=>`${i}${s}`;return n=>{n=n.replace(/[\s*+\-:~^]+/g," ").replace(/&/g,"&").trim();let i=new RegExp(`(^|${e.separator}|)(${n.replace(/[|\\{}()[\]^$+*?.-]/g,"\\$&").replace(r,"|")})`,"img");return s=>(0,yi.default)(s).replace(i,o).replace(/<\/mark>(\s+)]*>/img,"$1")}}function zt(e){return e.type===1}function Sr(e){return e.type===3}function Ei(e,t){let r=Mn(e);return L($(location.protocol!=="file:"),Je("search")).pipe(Re(o=>o),b(()=>t)).subscribe(({config:o,docs:n})=>r.next({type:0,data:{config:o,docs:n,options:{suggest:V("search.suggest")}}})),r}function wi(e){var l;let{selectedVersionSitemap:t,selectedVersionBaseURL:r,currentLocation:o,currentBaseURL:n}=e,i=(l=po(n))==null?void 0:l.pathname;if(i===void 0)return;let s=ys(o.pathname,i);if(s===void 0)return;let a=Es(t.keys());if(!t.has(a))return;let c=po(s,a);if(!c||!t.has(c.href))return;let p=po(s,r);if(p)return p.hash=o.hash,p.search=o.search,p}function po(e,t){try{return new URL(e,t)}catch(r){return}}function ys(e,t){if(e.startsWith(t))return e.slice(t.length)}function xs(e,t){let r=Math.min(e.length,t.length),o;for(o=0;oy)),o=r.pipe(m(n=>{let[,i]=t.base.match(/([^/]+)\/?$/);return n.find(({version:s,aliases:a})=>s===i||a.includes(i))||n[0]}));r.pipe(m(n=>new Map(n.map(i=>[`${new URL(`../${i.version}/`,t.base)}`,i]))),b(n=>h(document.body,"click").pipe(g(i=>!i.metaKey&&!i.ctrlKey),te(o),b(([i,s])=>{if(i.target instanceof Element){let a=i.target.closest("a");if(a&&!a.target&&n.has(a.href)){let c=a.href;return!i.target.closest(".md-version")&&n.get(c)===s?y:(i.preventDefault(),$(new URL(c)))}}return y}),b(i=>kt(i).pipe(m(s=>{var a;return(a=wi({selectedVersionSitemap:s,selectedVersionBaseURL:i,currentLocation:we(),currentBaseURL:t.base}))!=null?a:i})))))).subscribe(n=>st(n,!0)),z([r,o]).subscribe(([n,i])=>{j(".md-header__topic").appendChild(Wn(n,i))}),e.pipe(b(()=>o)).subscribe(n=>{var a;let i=new URL(t.base),s=__md_get("__outdated",sessionStorage,i);if(s===null){s=!0;let c=((a=t.version)==null?void 0:a.default)||"latest";Array.isArray(c)||(c=[c]);e:for(let p of c)for(let l of n.aliases.concat(n.version))if(new RegExp(p,"i").test(l)){s=!1;break e}__md_set("__outdated",s,sessionStorage,i)}if(s)for(let c of me("outdated"))c.hidden=!1})}function ws(e,{worker$:t}){let{searchParams:r}=we();r.has("q")&&(at("search",!0),e.value=r.get("q"),e.focus(),Je("search").pipe(Re(i=>!i)).subscribe(()=>{let i=we();i.searchParams.delete("q"),history.replaceState({},"",`${i}`)}));let o=Ye(e),n=L(t.pipe(Re(zt)),h(e,"keyup"),o).pipe(m(()=>e.value),Y());return z([n,o]).pipe(m(([i,s])=>({value:i,focus:s})),Z(1))}function Si(e,{worker$:t}){let r=new T,o=r.pipe(oe(),ae(!0));z([t.pipe(Re(zt)),r],(i,s)=>s).pipe(ne("value")).subscribe(({value:i})=>t.next({type:2,data:i})),r.pipe(ne("focus")).subscribe(({focus:i})=>{i&&at("search",i)}),h(e.form,"reset").pipe(W(o)).subscribe(()=>e.focus());let n=j("header [for=__search]");return h(n,"click").subscribe(()=>e.focus()),ws(e,{worker$:t}).pipe(O(i=>r.next(i)),A(()=>r.complete()),m(i=>P({ref:e},i)),Z(1))}function Oi(e,{worker$:t,query$:r}){let o=new T,n=un(e.parentElement).pipe(g(Boolean)),i=e.parentElement,s=j(":scope > :first-child",e),a=j(":scope > :last-child",e);Je("search").subscribe(l=>{a.setAttribute("role",l?"list":"presentation"),a.hidden=!l}),o.pipe(te(r),Gr(t.pipe(Re(zt)))).subscribe(([{items:l},{value:f}])=>{switch(l.length){case 0:s.textContent=f.length?Me("search.result.none"):Me("search.result.placeholder");break;case 1:s.textContent=Me("search.result.one");break;default:let u=br(l.length);s.textContent=Me("search.result.other",u)}});let c=o.pipe(O(()=>a.innerHTML=""),b(({items:l})=>L($(...l.slice(0,10)),$(...l.slice(10)).pipe(ot(4),Xr(n),b(([f])=>f)))),m(Fn),le());return c.subscribe(l=>a.appendChild(l)),c.pipe(J(l=>{let f=ue("details",l);return typeof f=="undefined"?y:h(f,"toggle").pipe(W(o),m(()=>f))})).subscribe(l=>{l.open===!1&&l.offsetTop<=i.scrollTop&&i.scrollTo({top:l.offsetTop})}),t.pipe(g(Sr),m(({data:l})=>l)).pipe(O(l=>o.next(l)),A(()=>o.complete()),m(l=>P({ref:e},l)))}function Ts(e,{query$:t}){return t.pipe(m(({value:r})=>{let o=we();return o.hash="",r=r.replace(/\s+/g,"+").replace(/&/g,"%26").replace(/=/g,"%3D"),o.search=`q=${r}`,{url:o}}))}function Li(e,t){let r=new T,o=r.pipe(oe(),ae(!0));return r.subscribe(({url:n})=>{e.setAttribute("data-clipboard-text",e.href),e.href=`${n}`}),h(e,"click").pipe(W(o)).subscribe(n=>n.preventDefault()),Ts(e,t).pipe(O(n=>r.next(n)),A(()=>r.complete()),m(n=>P({ref:e},n)))}function Mi(e,{worker$:t,keyboard$:r}){let o=new T,n=Ce("search-query"),i=L(h(n,"keydown"),h(n,"focus")).pipe(xe(pe),m(()=>n.value),Y());return o.pipe(Pe(i),m(([{suggest:a},c])=>{let p=c.split(/([\s-]+)/);if(a!=null&&a.length&&p[p.length-1]){let l=a[a.length-1];l.startsWith(p[p.length-1])&&(p[p.length-1]=l)}else p.length=0;return p})).subscribe(a=>e.innerHTML=a.join("").replace(/\s/g," ")),r.pipe(g(({mode:a})=>a==="search")).subscribe(a=>{switch(a.type){case"ArrowRight":e.innerText.length&&n.selectionStart===n.value.length&&(n.value=e.innerText);break}}),t.pipe(g(Sr),m(({data:a})=>a)).pipe(O(a=>o.next(a)),A(()=>o.complete()),m(()=>({ref:e})))}function _i(e,{index$:t,keyboard$:r}){let o=Te();try{let n=Ei(o.search,t),i=Ce("search-query",e),s=Ce("search-result",e);h(e,"click").pipe(g(({target:c})=>c instanceof Element&&!!c.closest("a"))).subscribe(()=>at("search",!1)),r.pipe(g(({mode:c})=>c==="search")).subscribe(c=>{let p=Ne();switch(c.type){case"Enter":if(p===i){let l=new Map;for(let f of M(":first-child [href]",s)){let u=f.firstElementChild;l.set(f,parseFloat(u.getAttribute("data-md-score")))}if(l.size){let[[f]]=[...l].sort(([,u],[,d])=>d-u);f.click()}c.claim()}break;case"Escape":case"Tab":at("search",!1),i.blur();break;case"ArrowUp":case"ArrowDown":if(typeof p=="undefined")i.focus();else{let l=[i,...M(":not(details) > [href], summary, details[open] [href]",s)],f=Math.max(0,(Math.max(0,l.indexOf(p))+l.length+(c.type==="ArrowUp"?-1:1))%l.length);l[f].focus()}c.claim();break;default:i!==Ne()&&i.focus()}}),r.pipe(g(({mode:c})=>c==="global")).subscribe(c=>{switch(c.type){case"f":case"s":case"/":i.focus(),i.select(),c.claim();break}});let a=Si(i,{worker$:n});return L(a,Oi(s,{worker$:n,query$:a})).pipe(Ve(...me("search-share",e).map(c=>Li(c,{query$:a})),...me("search-suggest",e).map(c=>Mi(c,{worker$:n,keyboard$:r}))))}catch(n){return e.hidden=!0,tt}}function Ai(e,{index$:t,location$:r}){return z([t,r.pipe(Q(we()),g(o=>!!o.searchParams.get("h")))]).pipe(m(([o,n])=>xi(o.config)(n.searchParams.get("h"))),m(o=>{var s;let n=new Map,i=document.createNodeIterator(e,NodeFilter.SHOW_TEXT);for(let a=i.nextNode();a;a=i.nextNode())if((s=a.parentElement)!=null&&s.offsetHeight){let c=a.textContent,p=o(c);p.length>c.length&&n.set(a,p)}for(let[a,c]of n){let{childNodes:p}=x("span",null,c);a.replaceWith(...Array.from(p))}return{ref:e,nodes:n}}))}function Ss(e,{viewport$:t,main$:r}){let o=e.closest(".md-grid"),n=o.offsetTop-o.parentElement.offsetTop;return z([r,t]).pipe(m(([{offset:i,height:s},{offset:{y:a}}])=>(s=s+Math.min(n,Math.max(0,a-i))-n,{height:s,locked:a>=i+n})),Y((i,s)=>i.height===s.height&&i.locked===s.locked))}function lo(e,o){var n=o,{header$:t}=n,r=vo(n,["header$"]);let i=j(".md-sidebar__scrollwrap",e),{y:s}=Be(i);return H(()=>{let a=new T,c=a.pipe(oe(),ae(!0)),p=a.pipe($e(0,ye));return p.pipe(te(t)).subscribe({next([{height:l},{height:f}]){i.style.height=`${l-2*s}px`,e.style.top=`${f}px`},complete(){i.style.height="",e.style.top=""}}),p.pipe(Re()).subscribe(()=>{for(let l of M(".md-nav__link--active[href]",e)){if(!l.clientHeight)continue;let f=l.closest(".md-sidebar__scrollwrap");if(typeof f!="undefined"){let u=l.offsetTop-f.offsetTop,{height:d}=de(f);f.scrollTo({top:u-d/2})}}}),fe(M("label[tabindex]",e)).pipe(J(l=>h(l,"click").pipe(xe(pe),m(()=>l),W(c)))).subscribe(l=>{let f=j(`[id="${l.htmlFor}"]`);j(`[aria-labelledby="${l.id}"]`).setAttribute("aria-expanded",`${f.checked}`)}),V("content.tooltips")&&fe(M("abbr[title]",e)).pipe(J(l=>Xe(l,{viewport$})),W(c)).subscribe(),Ss(e,r).pipe(O(l=>a.next(l)),A(()=>a.complete()),m(l=>P({ref:e},l)))})}function Ci(e,t){if(typeof t!="undefined"){let r=`https://api.github.com/repos/${e}/${t}`;return rt(ze(`${r}/releases/latest`).pipe(ve(()=>y),m(o=>({version:o.tag_name})),Qe({})),ze(r).pipe(ve(()=>y),m(o=>({stars:o.stargazers_count,forks:o.forks_count})),Qe({}))).pipe(m(([o,n])=>P(P({},o),n)))}else{let r=`https://api.github.com/users/${e}`;return ze(r).pipe(m(o=>({repositories:o.public_repos})),Qe({}))}}function ki(e,t){let r=`https://${e}/api/v4/projects/${encodeURIComponent(t)}`;return rt(ze(`${r}/releases/permalink/latest`).pipe(ve(()=>y),m(({tag_name:o})=>({version:o})),Qe({})),ze(r).pipe(ve(()=>y),m(({star_count:o,forks_count:n})=>({stars:o,forks:n})),Qe({}))).pipe(m(([o,n])=>P(P({},o),n)))}function Hi(e){let t=e.match(/^.+github\.com\/([^/]+)\/?([^/]+)?/i);if(t){let[,r,o]=t;return Ci(r,o)}if(t=e.match(/^.+?([^/]*gitlab[^/]+)\/(.+?)\/?$/i),t){let[,r,o]=t;return ki(r,o)}return y}var Os;function Ls(e){return Os||(Os=H(()=>{let t=__md_get("__source",sessionStorage);if(t)return $(t);if(me("consent").length){let o=__md_get("__consent");if(!(o&&o.github))return y}return Hi(e.href).pipe(O(o=>__md_set("__source",o,sessionStorage)))}).pipe(ve(()=>y),g(t=>Object.keys(t).length>0),m(t=>({facts:t})),Z(1)))}function $i(e){let t=j(":scope > :last-child",e);return H(()=>{let r=new T;return r.subscribe(({facts:o})=>{t.appendChild(jn(o)),t.classList.add("md-source__repository--active")}),Ls(e).pipe(O(o=>r.next(o)),A(()=>r.complete()),m(o=>P({ref:e},o)))})}function Ms(e,{viewport$:t,header$:r}){return Le(document.body).pipe(b(()=>Er(e,{header$:r,viewport$:t})),m(({offset:{y:o}})=>({hidden:o>=10})),ne("hidden"))}function Pi(e,t){return H(()=>{let r=new T;return r.subscribe({next({hidden:o}){e.hidden=o},complete(){e.hidden=!1}}),(V("navigation.tabs.sticky")?$({hidden:!1}):Ms(e,t)).pipe(O(o=>r.next(o)),A(()=>r.complete()),m(o=>P({ref:e},o)))})}function _s(e,{viewport$:t,header$:r}){let o=new Map,n=M(".md-nav__link",e);for(let a of n){let c=decodeURIComponent(a.hash.substring(1)),p=ue(`[id="${c}"]`);typeof p!="undefined"&&o.set(a,p)}let i=r.pipe(ne("height"),m(({height:a})=>{let c=Ce("main"),p=j(":scope > :first-child",c);return a+.8*(p.offsetTop-c.offsetTop)}),le());return Le(document.body).pipe(ne("height"),b(a=>H(()=>{let c=[];return $([...o].reduce((p,[l,f])=>{for(;c.length&&o.get(c[c.length-1]).tagName>=f.tagName;)c.pop();let u=f.offsetTop;for(;!u&&f.parentElement;)f=f.parentElement,u=f.offsetTop;let d=f.offsetParent;for(;d;d=d.offsetParent)u+=d.offsetTop;return p.set([...c=[...c,l]].reverse(),u)},new Map))}).pipe(m(c=>new Map([...c].sort(([,p],[,l])=>p-l))),Pe(i),b(([c,p])=>t.pipe(Ut(([l,f],{offset:{y:u},size:d})=>{let v=u+d.height>=Math.floor(a.height);for(;f.length;){let[,S]=f[0];if(S-p=u&&!v)f=[l.pop(),...f];else break}return[l,f]},[[],[...c]]),Y((l,f)=>l[0]===f[0]&&l[1]===f[1])))))).pipe(m(([a,c])=>({prev:a.map(([p])=>p),next:c.map(([p])=>p)})),Q({prev:[],next:[]}),ot(2,1),m(([a,c])=>a.prev.length{let i=new T,s=i.pipe(oe(),ae(!0));if(i.subscribe(({prev:a,next:c})=>{for(let[p]of c)p.classList.remove("md-nav__link--passed"),p.classList.remove("md-nav__link--active");for(let[p,[l]]of a.entries())l.classList.add("md-nav__link--passed"),l.classList.toggle("md-nav__link--active",p===a.length-1)}),V("toc.follow")){let a=L(t.pipe(Ae(1),m(()=>{})),t.pipe(Ae(250),m(()=>"smooth")));i.pipe(g(({prev:c})=>c.length>0),Pe(o.pipe(xe(pe))),te(a)).subscribe(([[{prev:c}],p])=>{let[l]=c[c.length-1];if(l.offsetHeight){let f=vr(l);if(typeof f!="undefined"){let u=l.offsetTop-f.offsetTop,{height:d}=de(f);f.scrollTo({top:u-d/2,behavior:p})}}})}return V("navigation.tracking")&&t.pipe(W(s),ne("offset"),Ae(250),Ie(1),W(n.pipe(Ie(1))),vt({delay:250}),te(i)).subscribe(([,{prev:a}])=>{let c=we(),p=a[a.length-1];if(p&&p.length){let[l]=p,{hash:f}=new URL(l.href);c.hash!==f&&(c.hash=f,history.replaceState({},"",`${c}`))}else c.hash="",history.replaceState({},"",`${c}`)}),_s(e,{viewport$:t,header$:r}).pipe(O(a=>i.next(a)),A(()=>i.complete()),m(a=>P({ref:e},a)))})}function As(e,{viewport$:t,main$:r,target$:o}){let n=t.pipe(m(({offset:{y:s}})=>s),ot(2,1),m(([s,a])=>s>a&&a>0),Y()),i=r.pipe(m(({active:s})=>s));return z([i,n]).pipe(m(([s,a])=>!(s&&a)),Y(),W(o.pipe(Ie(1))),ae(!0),vt({delay:250}),m(s=>({hidden:s})))}function Ii(e,{viewport$:t,header$:r,main$:o,target$:n}){let i=new T,s=i.pipe(oe(),ae(!0));return i.subscribe({next({hidden:a}){e.hidden=a,a?(e.setAttribute("tabindex","-1"),e.blur()):e.removeAttribute("tabindex")},complete(){e.style.top="",e.hidden=!0,e.removeAttribute("tabindex")}}),r.pipe(W(s),ne("height")).subscribe(({height:a})=>{e.style.top=`${a+16}px`}),h(e,"click").subscribe(a=>{a.preventDefault(),window.scrollTo({top:0})}),As(e,{viewport$:t,main$:o,target$:n}).pipe(O(a=>i.next(a)),A(()=>i.complete()),m(a=>P({ref:e},a)))}function Fi({document$:e,viewport$:t}){e.pipe(b(()=>M(".md-ellipsis")),J(r=>mt(r).pipe(W(e.pipe(Ie(1))),g(o=>o),m(()=>r),Ee(1))),g(r=>r.offsetWidth{let o=r.innerText,n=r.closest("a")||r;return n.title=o,V("content.tooltips")?Xe(n,{viewport$:t}).pipe(W(e.pipe(Ie(1))),A(()=>n.removeAttribute("title"))):y})).subscribe(),V("content.tooltips")&&e.pipe(b(()=>M(".md-status")),J(r=>Xe(r,{viewport$:t}))).subscribe()}function ji({document$:e,tablet$:t}){e.pipe(b(()=>M(".md-toggle--indeterminate")),O(r=>{r.indeterminate=!0,r.checked=!1}),J(r=>h(r,"change").pipe(Jr(()=>r.classList.contains("md-toggle--indeterminate")),m(()=>r))),te(t)).subscribe(([r,o])=>{r.classList.remove("md-toggle--indeterminate"),o&&(r.checked=!1)})}function Cs(){return/(iPad|iPhone|iPod)/.test(navigator.userAgent)}function Ui({document$:e}){e.pipe(b(()=>M("[data-md-scrollfix]")),O(t=>t.removeAttribute("data-md-scrollfix")),g(Cs),J(t=>h(t,"touchstart").pipe(m(()=>t)))).subscribe(t=>{let r=t.scrollTop;r===0?t.scrollTop=1:r+t.offsetHeight===t.scrollHeight&&(t.scrollTop=r-1)})}function Wi({viewport$:e,tablet$:t}){z([Je("search"),t]).pipe(m(([r,o])=>r&&!o),b(r=>$(r).pipe(nt(r?400:100))),te(e)).subscribe(([r,{offset:{y:o}}])=>{if(r)document.body.setAttribute("data-md-scrolllock",""),document.body.style.top=`-${o}px`;else{let n=-1*parseInt(document.body.style.top,10);document.body.removeAttribute("data-md-scrolllock"),document.body.style.top="",n&&window.scrollTo(0,n)}})}Object.entries||(Object.entries=function(e){let t=[];for(let r of Object.keys(e))t.push([r,e[r]]);return t});Object.values||(Object.values=function(e){let t=[];for(let r of Object.keys(e))t.push(e[r]);return t});typeof Element!="undefined"&&(Element.prototype.scrollTo||(Element.prototype.scrollTo=function(e,t){typeof e=="object"?(this.scrollLeft=e.left,this.scrollTop=e.top):(this.scrollLeft=e,this.scrollTop=t)}),Element.prototype.replaceWith||(Element.prototype.replaceWith=function(...e){let t=this.parentNode;if(t){e.length===0&&t.removeChild(this);for(let r=e.length-1;r>=0;r--){let o=e[r];typeof o=="string"?o=document.createTextNode(o):o.parentNode&&o.parentNode.removeChild(o),r?t.insertBefore(this.previousSibling,o):t.replaceChild(o,this)}}}));function ks(){return location.protocol==="file:"?_t(`${new URL("search/search_index.js",Or.base)}`).pipe(m(()=>__index),Z(1)):ze(new URL("search/search_index.json",Or.base))}document.documentElement.classList.remove("no-js");document.documentElement.classList.add("js");var ct=an(),Kt=bn(),Ht=yn(Kt),mo=hn(),ke=Ln(),Lr=Wt("(min-width: 60em)"),Vi=Wt("(min-width: 76.25em)"),Ni=xn(),Or=Te(),zi=document.forms.namedItem("search")?ks():tt,fo=new T;di({alert$:fo});ui({document$:ct});var uo=new T,qi=kt(Or.base);V("navigation.instant")&&gi({sitemap$:qi,location$:Kt,viewport$:ke,progress$:uo}).subscribe(ct);var Di;((Di=Or.version)==null?void 0:Di.provider)==="mike"&&Ti({document$:ct});L(Kt,Ht).pipe(nt(125)).subscribe(()=>{at("drawer",!1),at("search",!1)});mo.pipe(g(({mode:e})=>e==="global")).subscribe(e=>{switch(e.type){case"p":case",":let t=ue("link[rel=prev]");typeof t!="undefined"&&st(t);break;case"n":case".":let r=ue("link[rel=next]");typeof r!="undefined"&&st(r);break;case"Enter":let o=Ne();o instanceof HTMLLabelElement&&o.click()}});Fi({viewport$:ke,document$:ct});ji({document$:ct,tablet$:Lr});Ui({document$:ct});Wi({viewport$:ke,tablet$:Lr});var ft=ai(Ce("header"),{viewport$:ke}),qt=ct.pipe(m(()=>Ce("main")),b(e=>pi(e,{viewport$:ke,header$:ft})),Z(1)),Hs=L(...me("consent").map(e=>An(e,{target$:Ht})),...me("dialog").map(e=>ni(e,{alert$:fo})),...me("palette").map(e=>li(e)),...me("progress").map(e=>mi(e,{progress$:uo})),...me("search").map(e=>_i(e,{index$:zi,keyboard$:mo})),...me("source").map(e=>$i(e))),$s=H(()=>L(...me("announce").map(e=>_n(e)),...me("content").map(e=>oi(e,{sitemap$:qi,viewport$:ke,target$:Ht,print$:Ni})),...me("content").map(e=>V("search.highlight")?Ai(e,{index$:zi,location$:Kt}):y),...me("header").map(e=>si(e,{viewport$:ke,header$:ft,main$:qt})),...me("header-title").map(e=>ci(e,{viewport$:ke,header$:ft})),...me("sidebar").map(e=>e.getAttribute("data-md-type")==="navigation"?eo(Vi,()=>lo(e,{viewport$:ke,header$:ft,main$:qt})):eo(Lr,()=>lo(e,{viewport$:ke,header$:ft,main$:qt}))),...me("tabs").map(e=>Pi(e,{viewport$:ke,header$:ft})),...me("toc").map(e=>Ri(e,{viewport$:ke,header$:ft,main$:qt,target$:Ht})),...me("top").map(e=>Ii(e,{viewport$:ke,header$:ft,main$:qt,target$:Ht})))),Ki=ct.pipe(b(()=>$s),Ve(Hs),Z(1));Ki.subscribe();window.document$=ct;window.location$=Kt;window.target$=Ht;window.keyboard$=mo;window.viewport$=ke;window.tablet$=Lr;window.screen$=Vi;window.print$=Ni;window.alert$=fo;window.progress$=uo;window.component$=Ki;})(); -//# sourceMappingURL=bundle.e71a0d61.min.js.map - diff --git a/site/assets/javascripts/bundle.e71a0d61.min.js.map b/site/assets/javascripts/bundle.e71a0d61.min.js.map deleted file mode 100644 index 23451b54d11b39ef33a5f94d16d9e351dec9972c..0000000000000000000000000000000000000000 --- a/site/assets/javascripts/bundle.e71a0d61.min.js.map +++ /dev/null @@ -1,7 +0,0 @@ -{ - "version": 3, - "sources": ["node_modules/focus-visible/dist/focus-visible.js", "node_modules/escape-html/index.js", "node_modules/clipboard/dist/clipboard.js", "src/templates/assets/javascripts/bundle.ts", "node_modules/tslib/tslib.es6.mjs", "node_modules/rxjs/src/internal/util/isFunction.ts", "node_modules/rxjs/src/internal/util/createErrorClass.ts", "node_modules/rxjs/src/internal/util/UnsubscriptionError.ts", "node_modules/rxjs/src/internal/util/arrRemove.ts", "node_modules/rxjs/src/internal/Subscription.ts", "node_modules/rxjs/src/internal/config.ts", "node_modules/rxjs/src/internal/scheduler/timeoutProvider.ts", "node_modules/rxjs/src/internal/util/reportUnhandledError.ts", "node_modules/rxjs/src/internal/util/noop.ts", "node_modules/rxjs/src/internal/NotificationFactories.ts", "node_modules/rxjs/src/internal/util/errorContext.ts", "node_modules/rxjs/src/internal/Subscriber.ts", "node_modules/rxjs/src/internal/symbol/observable.ts", "node_modules/rxjs/src/internal/util/identity.ts", "node_modules/rxjs/src/internal/util/pipe.ts", "node_modules/rxjs/src/internal/Observable.ts", "node_modules/rxjs/src/internal/util/lift.ts", "node_modules/rxjs/src/internal/operators/OperatorSubscriber.ts", "node_modules/rxjs/src/internal/scheduler/animationFrameProvider.ts", "node_modules/rxjs/src/internal/util/ObjectUnsubscribedError.ts", "node_modules/rxjs/src/internal/Subject.ts", "node_modules/rxjs/src/internal/BehaviorSubject.ts", "node_modules/rxjs/src/internal/scheduler/dateTimestampProvider.ts", "node_modules/rxjs/src/internal/ReplaySubject.ts", "node_modules/rxjs/src/internal/scheduler/Action.ts", "node_modules/rxjs/src/internal/scheduler/intervalProvider.ts", "node_modules/rxjs/src/internal/scheduler/AsyncAction.ts", "node_modules/rxjs/src/internal/Scheduler.ts", "node_modules/rxjs/src/internal/scheduler/AsyncScheduler.ts", "node_modules/rxjs/src/internal/scheduler/async.ts", "node_modules/rxjs/src/internal/scheduler/QueueAction.ts", "node_modules/rxjs/src/internal/scheduler/QueueScheduler.ts", "node_modules/rxjs/src/internal/scheduler/queue.ts", "node_modules/rxjs/src/internal/scheduler/AnimationFrameAction.ts", "node_modules/rxjs/src/internal/scheduler/AnimationFrameScheduler.ts", "node_modules/rxjs/src/internal/scheduler/animationFrame.ts", "node_modules/rxjs/src/internal/observable/empty.ts", "node_modules/rxjs/src/internal/util/isScheduler.ts", "node_modules/rxjs/src/internal/util/args.ts", "node_modules/rxjs/src/internal/util/isArrayLike.ts", "node_modules/rxjs/src/internal/util/isPromise.ts", "node_modules/rxjs/src/internal/util/isInteropObservable.ts", "node_modules/rxjs/src/internal/util/isAsyncIterable.ts", "node_modules/rxjs/src/internal/util/throwUnobservableError.ts", "node_modules/rxjs/src/internal/symbol/iterator.ts", "node_modules/rxjs/src/internal/util/isIterable.ts", "node_modules/rxjs/src/internal/util/isReadableStreamLike.ts", "node_modules/rxjs/src/internal/observable/innerFrom.ts", "node_modules/rxjs/src/internal/util/executeSchedule.ts", "node_modules/rxjs/src/internal/operators/observeOn.ts", "node_modules/rxjs/src/internal/operators/subscribeOn.ts", "node_modules/rxjs/src/internal/scheduled/scheduleObservable.ts", "node_modules/rxjs/src/internal/scheduled/schedulePromise.ts", "node_modules/rxjs/src/internal/scheduled/scheduleArray.ts", "node_modules/rxjs/src/internal/scheduled/scheduleIterable.ts", "node_modules/rxjs/src/internal/scheduled/scheduleAsyncIterable.ts", "node_modules/rxjs/src/internal/scheduled/scheduleReadableStreamLike.ts", "node_modules/rxjs/src/internal/scheduled/scheduled.ts", "node_modules/rxjs/src/internal/observable/from.ts", "node_modules/rxjs/src/internal/observable/of.ts", "node_modules/rxjs/src/internal/observable/throwError.ts", "node_modules/rxjs/src/internal/util/EmptyError.ts", "node_modules/rxjs/src/internal/util/isDate.ts", "node_modules/rxjs/src/internal/operators/map.ts", "node_modules/rxjs/src/internal/util/mapOneOrManyArgs.ts", "node_modules/rxjs/src/internal/util/argsArgArrayOrObject.ts", "node_modules/rxjs/src/internal/util/createObject.ts", "node_modules/rxjs/src/internal/observable/combineLatest.ts", "node_modules/rxjs/src/internal/operators/mergeInternals.ts", "node_modules/rxjs/src/internal/operators/mergeMap.ts", "node_modules/rxjs/src/internal/operators/mergeAll.ts", "node_modules/rxjs/src/internal/operators/concatAll.ts", "node_modules/rxjs/src/internal/observable/concat.ts", "node_modules/rxjs/src/internal/observable/defer.ts", "node_modules/rxjs/src/internal/observable/fromEvent.ts", "node_modules/rxjs/src/internal/observable/fromEventPattern.ts", "node_modules/rxjs/src/internal/observable/timer.ts", "node_modules/rxjs/src/internal/observable/merge.ts", "node_modules/rxjs/src/internal/observable/never.ts", "node_modules/rxjs/src/internal/util/argsOrArgArray.ts", "node_modules/rxjs/src/internal/operators/filter.ts", "node_modules/rxjs/src/internal/observable/zip.ts", "node_modules/rxjs/src/internal/operators/audit.ts", "node_modules/rxjs/src/internal/operators/auditTime.ts", "node_modules/rxjs/src/internal/operators/bufferCount.ts", "node_modules/rxjs/src/internal/operators/catchError.ts", "node_modules/rxjs/src/internal/operators/scanInternals.ts", "node_modules/rxjs/src/internal/operators/combineLatest.ts", "node_modules/rxjs/src/internal/operators/combineLatestWith.ts", "node_modules/rxjs/src/internal/operators/debounce.ts", "node_modules/rxjs/src/internal/operators/debounceTime.ts", "node_modules/rxjs/src/internal/operators/defaultIfEmpty.ts", "node_modules/rxjs/src/internal/operators/take.ts", "node_modules/rxjs/src/internal/operators/ignoreElements.ts", "node_modules/rxjs/src/internal/operators/mapTo.ts", "node_modules/rxjs/src/internal/operators/delayWhen.ts", "node_modules/rxjs/src/internal/operators/delay.ts", "node_modules/rxjs/src/internal/operators/distinct.ts", "node_modules/rxjs/src/internal/operators/distinctUntilChanged.ts", "node_modules/rxjs/src/internal/operators/distinctUntilKeyChanged.ts", "node_modules/rxjs/src/internal/operators/throwIfEmpty.ts", "node_modules/rxjs/src/internal/operators/endWith.ts", "node_modules/rxjs/src/internal/operators/exhaustMap.ts", "node_modules/rxjs/src/internal/operators/finalize.ts", "node_modules/rxjs/src/internal/operators/first.ts", "node_modules/rxjs/src/internal/operators/takeLast.ts", "node_modules/rxjs/src/internal/operators/merge.ts", "node_modules/rxjs/src/internal/operators/mergeWith.ts", "node_modules/rxjs/src/internal/operators/repeat.ts", "node_modules/rxjs/src/internal/operators/scan.ts", "node_modules/rxjs/src/internal/operators/share.ts", "node_modules/rxjs/src/internal/operators/shareReplay.ts", "node_modules/rxjs/src/internal/operators/skip.ts", "node_modules/rxjs/src/internal/operators/skipUntil.ts", "node_modules/rxjs/src/internal/operators/startWith.ts", "node_modules/rxjs/src/internal/operators/switchMap.ts", "node_modules/rxjs/src/internal/operators/takeUntil.ts", "node_modules/rxjs/src/internal/operators/takeWhile.ts", "node_modules/rxjs/src/internal/operators/tap.ts", "node_modules/rxjs/src/internal/operators/throttle.ts", "node_modules/rxjs/src/internal/operators/throttleTime.ts", "node_modules/rxjs/src/internal/operators/withLatestFrom.ts", "node_modules/rxjs/src/internal/operators/zip.ts", "node_modules/rxjs/src/internal/operators/zipWith.ts", "src/templates/assets/javascripts/browser/document/index.ts", "src/templates/assets/javascripts/browser/element/_/index.ts", "src/templates/assets/javascripts/browser/element/focus/index.ts", "src/templates/assets/javascripts/browser/element/hover/index.ts", "src/templates/assets/javascripts/utilities/h/index.ts", "src/templates/assets/javascripts/utilities/round/index.ts", "src/templates/assets/javascripts/browser/script/index.ts", "src/templates/assets/javascripts/browser/element/size/_/index.ts", "src/templates/assets/javascripts/browser/element/size/content/index.ts", "src/templates/assets/javascripts/browser/element/offset/_/index.ts", "src/templates/assets/javascripts/browser/element/offset/content/index.ts", "src/templates/assets/javascripts/browser/element/visibility/index.ts", "src/templates/assets/javascripts/browser/toggle/index.ts", "src/templates/assets/javascripts/browser/keyboard/index.ts", "src/templates/assets/javascripts/browser/location/_/index.ts", "src/templates/assets/javascripts/browser/location/hash/index.ts", "src/templates/assets/javascripts/browser/media/index.ts", "src/templates/assets/javascripts/browser/request/index.ts", "src/templates/assets/javascripts/browser/viewport/offset/index.ts", "src/templates/assets/javascripts/browser/viewport/size/index.ts", "src/templates/assets/javascripts/browser/viewport/_/index.ts", "src/templates/assets/javascripts/browser/viewport/at/index.ts", "src/templates/assets/javascripts/browser/worker/index.ts", "src/templates/assets/javascripts/_/index.ts", "src/templates/assets/javascripts/components/_/index.ts", "src/templates/assets/javascripts/components/announce/index.ts", "src/templates/assets/javascripts/components/consent/index.ts", "src/templates/assets/javascripts/templates/tooltip/index.tsx", "src/templates/assets/javascripts/templates/annotation/index.tsx", "src/templates/assets/javascripts/templates/clipboard/index.tsx", "src/templates/assets/javascripts/templates/search/index.tsx", "src/templates/assets/javascripts/templates/source/index.tsx", "src/templates/assets/javascripts/templates/tabbed/index.tsx", "src/templates/assets/javascripts/templates/table/index.tsx", "src/templates/assets/javascripts/templates/version/index.tsx", "src/templates/assets/javascripts/components/tooltip2/index.ts", "src/templates/assets/javascripts/components/content/annotation/_/index.ts", "src/templates/assets/javascripts/components/content/annotation/list/index.ts", "src/templates/assets/javascripts/components/content/annotation/block/index.ts", "src/templates/assets/javascripts/components/content/code/_/index.ts", "src/templates/assets/javascripts/components/content/details/index.ts", "src/templates/assets/javascripts/components/content/link/index.ts", "src/templates/assets/javascripts/components/content/mermaid/index.css", "src/templates/assets/javascripts/components/content/mermaid/index.ts", "src/templates/assets/javascripts/components/content/table/index.ts", "src/templates/assets/javascripts/components/content/tabs/index.ts", "src/templates/assets/javascripts/components/content/_/index.ts", "src/templates/assets/javascripts/components/dialog/index.ts", "src/templates/assets/javascripts/components/tooltip/index.ts", "src/templates/assets/javascripts/components/header/_/index.ts", "src/templates/assets/javascripts/components/header/title/index.ts", "src/templates/assets/javascripts/components/main/index.ts", "src/templates/assets/javascripts/components/palette/index.ts", "src/templates/assets/javascripts/components/progress/index.ts", "src/templates/assets/javascripts/integrations/sitemap/index.ts", "src/templates/assets/javascripts/integrations/alternate/index.ts", "src/templates/assets/javascripts/integrations/clipboard/index.ts", "src/templates/assets/javascripts/integrations/instant/index.ts", "src/templates/assets/javascripts/integrations/search/highlighter/index.ts", "src/templates/assets/javascripts/integrations/search/worker/message/index.ts", "src/templates/assets/javascripts/integrations/search/worker/_/index.ts", "src/templates/assets/javascripts/integrations/version/findurl/index.ts", "src/templates/assets/javascripts/integrations/version/index.ts", "src/templates/assets/javascripts/components/search/query/index.ts", "src/templates/assets/javascripts/components/search/result/index.ts", "src/templates/assets/javascripts/components/search/share/index.ts", "src/templates/assets/javascripts/components/search/suggest/index.ts", "src/templates/assets/javascripts/components/search/_/index.ts", "src/templates/assets/javascripts/components/search/highlight/index.ts", "src/templates/assets/javascripts/components/sidebar/index.ts", "src/templates/assets/javascripts/components/source/facts/github/index.ts", "src/templates/assets/javascripts/components/source/facts/gitlab/index.ts", "src/templates/assets/javascripts/components/source/facts/_/index.ts", "src/templates/assets/javascripts/components/source/_/index.ts", "src/templates/assets/javascripts/components/tabs/index.ts", "src/templates/assets/javascripts/components/toc/index.ts", "src/templates/assets/javascripts/components/top/index.ts", "src/templates/assets/javascripts/patches/ellipsis/index.ts", "src/templates/assets/javascripts/patches/indeterminate/index.ts", "src/templates/assets/javascripts/patches/scrollfix/index.ts", "src/templates/assets/javascripts/patches/scrolllock/index.ts", "src/templates/assets/javascripts/polyfills/index.ts"], - "sourcesContent": ["(function (global, factory) {\n typeof exports === 'object' && typeof module !== 'undefined' ? factory() :\n typeof define === 'function' && define.amd ? define(factory) :\n (factory());\n}(this, (function () { 'use strict';\n\n /**\n * Applies the :focus-visible polyfill at the given scope.\n * A scope in this case is either the top-level Document or a Shadow Root.\n *\n * @param {(Document|ShadowRoot)} scope\n * @see https://github.com/WICG/focus-visible\n */\n function applyFocusVisiblePolyfill(scope) {\n var hadKeyboardEvent = true;\n var hadFocusVisibleRecently = false;\n var hadFocusVisibleRecentlyTimeout = null;\n\n var inputTypesAllowlist = {\n text: true,\n search: true,\n url: true,\n tel: true,\n email: true,\n password: true,\n number: true,\n date: true,\n month: true,\n week: true,\n time: true,\n datetime: true,\n 'datetime-local': true\n };\n\n /**\n * Helper function for legacy browsers and iframes which sometimes focus\n * elements like document, body, and non-interactive SVG.\n * @param {Element} el\n */\n function isValidFocusTarget(el) {\n if (\n el &&\n el !== document &&\n el.nodeName !== 'HTML' &&\n el.nodeName !== 'BODY' &&\n 'classList' in el &&\n 'contains' in el.classList\n ) {\n return true;\n }\n return false;\n }\n\n /**\n * Computes whether the given element should automatically trigger the\n * `focus-visible` class being added, i.e. whether it should always match\n * `:focus-visible` when focused.\n * @param {Element} el\n * @return {boolean}\n */\n function focusTriggersKeyboardModality(el) {\n var type = el.type;\n var tagName = el.tagName;\n\n if (tagName === 'INPUT' && inputTypesAllowlist[type] && !el.readOnly) {\n return true;\n }\n\n if (tagName === 'TEXTAREA' && !el.readOnly) {\n return true;\n }\n\n if (el.isContentEditable) {\n return true;\n }\n\n return false;\n }\n\n /**\n * Add the `focus-visible` class to the given element if it was not added by\n * the author.\n * @param {Element} el\n */\n function addFocusVisibleClass(el) {\n if (el.classList.contains('focus-visible')) {\n return;\n }\n el.classList.add('focus-visible');\n el.setAttribute('data-focus-visible-added', '');\n }\n\n /**\n * Remove the `focus-visible` class from the given element if it was not\n * originally added by the author.\n * @param {Element} el\n */\n function removeFocusVisibleClass(el) {\n if (!el.hasAttribute('data-focus-visible-added')) {\n return;\n }\n el.classList.remove('focus-visible');\n el.removeAttribute('data-focus-visible-added');\n }\n\n /**\n * If the most recent user interaction was via the keyboard;\n * and the key press did not include a meta, alt/option, or control key;\n * then the modality is keyboard. Otherwise, the modality is not keyboard.\n * Apply `focus-visible` to any current active element and keep track\n * of our keyboard modality state with `hadKeyboardEvent`.\n * @param {KeyboardEvent} e\n */\n function onKeyDown(e) {\n if (e.metaKey || e.altKey || e.ctrlKey) {\n return;\n }\n\n if (isValidFocusTarget(scope.activeElement)) {\n addFocusVisibleClass(scope.activeElement);\n }\n\n hadKeyboardEvent = true;\n }\n\n /**\n * If at any point a user clicks with a pointing device, ensure that we change\n * the modality away from keyboard.\n * This avoids the situation where a user presses a key on an already focused\n * element, and then clicks on a different element, focusing it with a\n * pointing device, while we still think we're in keyboard modality.\n * @param {Event} e\n */\n function onPointerDown(e) {\n hadKeyboardEvent = false;\n }\n\n /**\n * On `focus`, add the `focus-visible` class to the target if:\n * - the target received focus as a result of keyboard navigation, or\n * - the event target is an element that will likely require interaction\n * via the keyboard (e.g. a text box)\n * @param {Event} e\n */\n function onFocus(e) {\n // Prevent IE from focusing the document or HTML element.\n if (!isValidFocusTarget(e.target)) {\n return;\n }\n\n if (hadKeyboardEvent || focusTriggersKeyboardModality(e.target)) {\n addFocusVisibleClass(e.target);\n }\n }\n\n /**\n * On `blur`, remove the `focus-visible` class from the target.\n * @param {Event} e\n */\n function onBlur(e) {\n if (!isValidFocusTarget(e.target)) {\n return;\n }\n\n if (\n e.target.classList.contains('focus-visible') ||\n e.target.hasAttribute('data-focus-visible-added')\n ) {\n // To detect a tab/window switch, we look for a blur event followed\n // rapidly by a visibility change.\n // If we don't see a visibility change within 100ms, it's probably a\n // regular focus change.\n hadFocusVisibleRecently = true;\n window.clearTimeout(hadFocusVisibleRecentlyTimeout);\n hadFocusVisibleRecentlyTimeout = window.setTimeout(function() {\n hadFocusVisibleRecently = false;\n }, 100);\n removeFocusVisibleClass(e.target);\n }\n }\n\n /**\n * If the user changes tabs, keep track of whether or not the previously\n * focused element had .focus-visible.\n * @param {Event} e\n */\n function onVisibilityChange(e) {\n if (document.visibilityState === 'hidden') {\n // If the tab becomes active again, the browser will handle calling focus\n // on the element (Safari actually calls it twice).\n // If this tab change caused a blur on an element with focus-visible,\n // re-apply the class when the user switches back to the tab.\n if (hadFocusVisibleRecently) {\n hadKeyboardEvent = true;\n }\n addInitialPointerMoveListeners();\n }\n }\n\n /**\n * Add a group of listeners to detect usage of any pointing devices.\n * These listeners will be added when the polyfill first loads, and anytime\n * the window is blurred, so that they are active when the window regains\n * focus.\n */\n function addInitialPointerMoveListeners() {\n document.addEventListener('mousemove', onInitialPointerMove);\n document.addEventListener('mousedown', onInitialPointerMove);\n document.addEventListener('mouseup', onInitialPointerMove);\n document.addEventListener('pointermove', onInitialPointerMove);\n document.addEventListener('pointerdown', onInitialPointerMove);\n document.addEventListener('pointerup', onInitialPointerMove);\n document.addEventListener('touchmove', onInitialPointerMove);\n document.addEventListener('touchstart', onInitialPointerMove);\n document.addEventListener('touchend', onInitialPointerMove);\n }\n\n function removeInitialPointerMoveListeners() {\n document.removeEventListener('mousemove', onInitialPointerMove);\n document.removeEventListener('mousedown', onInitialPointerMove);\n document.removeEventListener('mouseup', onInitialPointerMove);\n document.removeEventListener('pointermove', onInitialPointerMove);\n document.removeEventListener('pointerdown', onInitialPointerMove);\n document.removeEventListener('pointerup', onInitialPointerMove);\n document.removeEventListener('touchmove', onInitialPointerMove);\n document.removeEventListener('touchstart', onInitialPointerMove);\n document.removeEventListener('touchend', onInitialPointerMove);\n }\n\n /**\n * When the polfyill first loads, assume the user is in keyboard modality.\n * If any event is received from a pointing device (e.g. mouse, pointer,\n * touch), turn off keyboard modality.\n * This accounts for situations where focus enters the page from the URL bar.\n * @param {Event} e\n */\n function onInitialPointerMove(e) {\n // Work around a Safari quirk that fires a mousemove on whenever the\n // window blurs, even if you're tabbing out of the page. \u00AF\\_(\u30C4)_/\u00AF\n if (e.target.nodeName && e.target.nodeName.toLowerCase() === 'html') {\n return;\n }\n\n hadKeyboardEvent = false;\n removeInitialPointerMoveListeners();\n }\n\n // For some kinds of state, we are interested in changes at the global scope\n // only. For example, global pointer input, global key presses and global\n // visibility change should affect the state at every scope:\n document.addEventListener('keydown', onKeyDown, true);\n document.addEventListener('mousedown', onPointerDown, true);\n document.addEventListener('pointerdown', onPointerDown, true);\n document.addEventListener('touchstart', onPointerDown, true);\n document.addEventListener('visibilitychange', onVisibilityChange, true);\n\n addInitialPointerMoveListeners();\n\n // For focus and blur, we specifically care about state changes in the local\n // scope. This is because focus / blur events that originate from within a\n // shadow root are not re-dispatched from the host element if it was already\n // the active element in its own scope:\n scope.addEventListener('focus', onFocus, true);\n scope.addEventListener('blur', onBlur, true);\n\n // We detect that a node is a ShadowRoot by ensuring that it is a\n // DocumentFragment and also has a host property. This check covers native\n // implementation and polyfill implementation transparently. If we only cared\n // about the native implementation, we could just check if the scope was\n // an instance of a ShadowRoot.\n if (scope.nodeType === Node.DOCUMENT_FRAGMENT_NODE && scope.host) {\n // Since a ShadowRoot is a special kind of DocumentFragment, it does not\n // have a root element to add a class to. So, we add this attribute to the\n // host element instead:\n scope.host.setAttribute('data-js-focus-visible', '');\n } else if (scope.nodeType === Node.DOCUMENT_NODE) {\n document.documentElement.classList.add('js-focus-visible');\n document.documentElement.setAttribute('data-js-focus-visible', '');\n }\n }\n\n // It is important to wrap all references to global window and document in\n // these checks to support server-side rendering use cases\n // @see https://github.com/WICG/focus-visible/issues/199\n if (typeof window !== 'undefined' && typeof document !== 'undefined') {\n // Make the polyfill helper globally available. This can be used as a signal\n // to interested libraries that wish to coordinate with the polyfill for e.g.,\n // applying the polyfill to a shadow root:\n window.applyFocusVisiblePolyfill = applyFocusVisiblePolyfill;\n\n // Notify interested libraries of the polyfill's presence, in case the\n // polyfill was loaded lazily:\n var event;\n\n try {\n event = new CustomEvent('focus-visible-polyfill-ready');\n } catch (error) {\n // IE11 does not support using CustomEvent as a constructor directly:\n event = document.createEvent('CustomEvent');\n event.initCustomEvent('focus-visible-polyfill-ready', false, false, {});\n }\n\n window.dispatchEvent(event);\n }\n\n if (typeof document !== 'undefined') {\n // Apply the polyfill to the global document, so that no JavaScript\n // coordination is required to use the polyfill in the top-level document:\n applyFocusVisiblePolyfill(document);\n }\n\n})));\n", "/*!\n * escape-html\n * Copyright(c) 2012-2013 TJ Holowaychuk\n * Copyright(c) 2015 Andreas Lubbe\n * Copyright(c) 2015 Tiancheng \"Timothy\" Gu\n * MIT Licensed\n */\n\n'use strict';\n\n/**\n * Module variables.\n * @private\n */\n\nvar matchHtmlRegExp = /[\"'&<>]/;\n\n/**\n * Module exports.\n * @public\n */\n\nmodule.exports = escapeHtml;\n\n/**\n * Escape special characters in the given string of html.\n *\n * @param {string} string The string to escape for inserting into HTML\n * @return {string}\n * @public\n */\n\nfunction escapeHtml(string) {\n var str = '' + string;\n var match = matchHtmlRegExp.exec(str);\n\n if (!match) {\n return str;\n }\n\n var escape;\n var html = '';\n var index = 0;\n var lastIndex = 0;\n\n for (index = match.index; index < str.length; index++) {\n switch (str.charCodeAt(index)) {\n case 34: // \"\n escape = '"';\n break;\n case 38: // &\n escape = '&';\n break;\n case 39: // '\n escape = ''';\n break;\n case 60: // <\n escape = '<';\n break;\n case 62: // >\n escape = '>';\n break;\n default:\n continue;\n }\n\n if (lastIndex !== index) {\n html += str.substring(lastIndex, index);\n }\n\n lastIndex = index + 1;\n html += escape;\n }\n\n return lastIndex !== index\n ? html + str.substring(lastIndex, index)\n : html;\n}\n", "/*!\n * clipboard.js v2.0.11\n * https://clipboardjs.com/\n *\n * Licensed MIT \u00A9 Zeno Rocha\n */\n(function webpackUniversalModuleDefinition(root, factory) {\n\tif(typeof exports === 'object' && typeof module === 'object')\n\t\tmodule.exports = factory();\n\telse if(typeof define === 'function' && define.amd)\n\t\tdefine([], factory);\n\telse if(typeof exports === 'object')\n\t\texports[\"ClipboardJS\"] = factory();\n\telse\n\t\troot[\"ClipboardJS\"] = factory();\n})(this, function() {\nreturn /******/ (function() { // webpackBootstrap\n/******/ \tvar __webpack_modules__ = ({\n\n/***/ 686:\n/***/ (function(__unused_webpack_module, __webpack_exports__, __webpack_require__) {\n\n\"use strict\";\n\n// EXPORTS\n__webpack_require__.d(__webpack_exports__, {\n \"default\": function() { return /* binding */ clipboard; }\n});\n\n// EXTERNAL MODULE: ./node_modules/tiny-emitter/index.js\nvar tiny_emitter = __webpack_require__(279);\nvar tiny_emitter_default = /*#__PURE__*/__webpack_require__.n(tiny_emitter);\n// EXTERNAL MODULE: ./node_modules/good-listener/src/listen.js\nvar listen = __webpack_require__(370);\nvar listen_default = /*#__PURE__*/__webpack_require__.n(listen);\n// EXTERNAL MODULE: ./node_modules/select/src/select.js\nvar src_select = __webpack_require__(817);\nvar select_default = /*#__PURE__*/__webpack_require__.n(src_select);\n;// CONCATENATED MODULE: ./src/common/command.js\n/**\n * Executes a given operation type.\n * @param {String} type\n * @return {Boolean}\n */\nfunction command(type) {\n try {\n return document.execCommand(type);\n } catch (err) {\n return false;\n }\n}\n;// CONCATENATED MODULE: ./src/actions/cut.js\n\n\n/**\n * Cut action wrapper.\n * @param {String|HTMLElement} target\n * @return {String}\n */\n\nvar ClipboardActionCut = function ClipboardActionCut(target) {\n var selectedText = select_default()(target);\n command('cut');\n return selectedText;\n};\n\n/* harmony default export */ var actions_cut = (ClipboardActionCut);\n;// CONCATENATED MODULE: ./src/common/create-fake-element.js\n/**\n * Creates a fake textarea element with a value.\n * @param {String} value\n * @return {HTMLElement}\n */\nfunction createFakeElement(value) {\n var isRTL = document.documentElement.getAttribute('dir') === 'rtl';\n var fakeElement = document.createElement('textarea'); // Prevent zooming on iOS\n\n fakeElement.style.fontSize = '12pt'; // Reset box model\n\n fakeElement.style.border = '0';\n fakeElement.style.padding = '0';\n fakeElement.style.margin = '0'; // Move element out of screen horizontally\n\n fakeElement.style.position = 'absolute';\n fakeElement.style[isRTL ? 'right' : 'left'] = '-9999px'; // Move element to the same position vertically\n\n var yPosition = window.pageYOffset || document.documentElement.scrollTop;\n fakeElement.style.top = \"\".concat(yPosition, \"px\");\n fakeElement.setAttribute('readonly', '');\n fakeElement.value = value;\n return fakeElement;\n}\n;// CONCATENATED MODULE: ./src/actions/copy.js\n\n\n\n/**\n * Create fake copy action wrapper using a fake element.\n * @param {String} target\n * @param {Object} options\n * @return {String}\n */\n\nvar fakeCopyAction = function fakeCopyAction(value, options) {\n var fakeElement = createFakeElement(value);\n options.container.appendChild(fakeElement);\n var selectedText = select_default()(fakeElement);\n command('copy');\n fakeElement.remove();\n return selectedText;\n};\n/**\n * Copy action wrapper.\n * @param {String|HTMLElement} target\n * @param {Object} options\n * @return {String}\n */\n\n\nvar ClipboardActionCopy = function ClipboardActionCopy(target) {\n var options = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : {\n container: document.body\n };\n var selectedText = '';\n\n if (typeof target === 'string') {\n selectedText = fakeCopyAction(target, options);\n } else if (target instanceof HTMLInputElement && !['text', 'search', 'url', 'tel', 'password'].includes(target === null || target === void 0 ? void 0 : target.type)) {\n // If input type doesn't support `setSelectionRange`. Simulate it. https://developer.mozilla.org/en-US/docs/Web/API/HTMLInputElement/setSelectionRange\n selectedText = fakeCopyAction(target.value, options);\n } else {\n selectedText = select_default()(target);\n command('copy');\n }\n\n return selectedText;\n};\n\n/* harmony default export */ var actions_copy = (ClipboardActionCopy);\n;// CONCATENATED MODULE: ./src/actions/default.js\nfunction _typeof(obj) { \"@babel/helpers - typeof\"; if (typeof Symbol === \"function\" && typeof Symbol.iterator === \"symbol\") { _typeof = function _typeof(obj) { return typeof obj; }; } else { _typeof = function _typeof(obj) { return obj && typeof Symbol === \"function\" && obj.constructor === Symbol && obj !== Symbol.prototype ? \"symbol\" : typeof obj; }; } return _typeof(obj); }\n\n\n\n/**\n * Inner function which performs selection from either `text` or `target`\n * properties and then executes copy or cut operations.\n * @param {Object} options\n */\n\nvar ClipboardActionDefault = function ClipboardActionDefault() {\n var options = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : {};\n // Defines base properties passed from constructor.\n var _options$action = options.action,\n action = _options$action === void 0 ? 'copy' : _options$action,\n container = options.container,\n target = options.target,\n text = options.text; // Sets the `action` to be performed which can be either 'copy' or 'cut'.\n\n if (action !== 'copy' && action !== 'cut') {\n throw new Error('Invalid \"action\" value, use either \"copy\" or \"cut\"');\n } // Sets the `target` property using an element that will be have its content copied.\n\n\n if (target !== undefined) {\n if (target && _typeof(target) === 'object' && target.nodeType === 1) {\n if (action === 'copy' && target.hasAttribute('disabled')) {\n throw new Error('Invalid \"target\" attribute. Please use \"readonly\" instead of \"disabled\" attribute');\n }\n\n if (action === 'cut' && (target.hasAttribute('readonly') || target.hasAttribute('disabled'))) {\n throw new Error('Invalid \"target\" attribute. You can\\'t cut text from elements with \"readonly\" or \"disabled\" attributes');\n }\n } else {\n throw new Error('Invalid \"target\" value, use a valid Element');\n }\n } // Define selection strategy based on `text` property.\n\n\n if (text) {\n return actions_copy(text, {\n container: container\n });\n } // Defines which selection strategy based on `target` property.\n\n\n if (target) {\n return action === 'cut' ? actions_cut(target) : actions_copy(target, {\n container: container\n });\n }\n};\n\n/* harmony default export */ var actions_default = (ClipboardActionDefault);\n;// CONCATENATED MODULE: ./src/clipboard.js\nfunction clipboard_typeof(obj) { \"@babel/helpers - typeof\"; if (typeof Symbol === \"function\" && typeof Symbol.iterator === \"symbol\") { clipboard_typeof = function _typeof(obj) { return typeof obj; }; } else { clipboard_typeof = function _typeof(obj) { return obj && typeof Symbol === \"function\" && obj.constructor === Symbol && obj !== Symbol.prototype ? \"symbol\" : typeof obj; }; } return clipboard_typeof(obj); }\n\nfunction _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError(\"Cannot call a class as a function\"); } }\n\nfunction _defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if (\"value\" in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } }\n\nfunction _createClass(Constructor, protoProps, staticProps) { if (protoProps) _defineProperties(Constructor.prototype, protoProps); if (staticProps) _defineProperties(Constructor, staticProps); return Constructor; }\n\nfunction _inherits(subClass, superClass) { if (typeof superClass !== \"function\" && superClass !== null) { throw new TypeError(\"Super expression must either be null or a function\"); } subClass.prototype = Object.create(superClass && superClass.prototype, { constructor: { value: subClass, writable: true, configurable: true } }); if (superClass) _setPrototypeOf(subClass, superClass); }\n\nfunction _setPrototypeOf(o, p) { _setPrototypeOf = Object.setPrototypeOf || function _setPrototypeOf(o, p) { o.__proto__ = p; return o; }; return _setPrototypeOf(o, p); }\n\nfunction _createSuper(Derived) { var hasNativeReflectConstruct = _isNativeReflectConstruct(); return function _createSuperInternal() { var Super = _getPrototypeOf(Derived), result; if (hasNativeReflectConstruct) { var NewTarget = _getPrototypeOf(this).constructor; result = Reflect.construct(Super, arguments, NewTarget); } else { result = Super.apply(this, arguments); } return _possibleConstructorReturn(this, result); }; }\n\nfunction _possibleConstructorReturn(self, call) { if (call && (clipboard_typeof(call) === \"object\" || typeof call === \"function\")) { return call; } return _assertThisInitialized(self); }\n\nfunction _assertThisInitialized(self) { if (self === void 0) { throw new ReferenceError(\"this hasn't been initialised - super() hasn't been called\"); } return self; }\n\nfunction _isNativeReflectConstruct() { if (typeof Reflect === \"undefined\" || !Reflect.construct) return false; if (Reflect.construct.sham) return false; if (typeof Proxy === \"function\") return true; try { Date.prototype.toString.call(Reflect.construct(Date, [], function () {})); return true; } catch (e) { return false; } }\n\nfunction _getPrototypeOf(o) { _getPrototypeOf = Object.setPrototypeOf ? Object.getPrototypeOf : function _getPrototypeOf(o) { return o.__proto__ || Object.getPrototypeOf(o); }; return _getPrototypeOf(o); }\n\n\n\n\n\n\n/**\n * Helper function to retrieve attribute value.\n * @param {String} suffix\n * @param {Element} element\n */\n\nfunction getAttributeValue(suffix, element) {\n var attribute = \"data-clipboard-\".concat(suffix);\n\n if (!element.hasAttribute(attribute)) {\n return;\n }\n\n return element.getAttribute(attribute);\n}\n/**\n * Base class which takes one or more elements, adds event listeners to them,\n * and instantiates a new `ClipboardAction` on each click.\n */\n\n\nvar Clipboard = /*#__PURE__*/function (_Emitter) {\n _inherits(Clipboard, _Emitter);\n\n var _super = _createSuper(Clipboard);\n\n /**\n * @param {String|HTMLElement|HTMLCollection|NodeList} trigger\n * @param {Object} options\n */\n function Clipboard(trigger, options) {\n var _this;\n\n _classCallCheck(this, Clipboard);\n\n _this = _super.call(this);\n\n _this.resolveOptions(options);\n\n _this.listenClick(trigger);\n\n return _this;\n }\n /**\n * Defines if attributes would be resolved using internal setter functions\n * or custom functions that were passed in the constructor.\n * @param {Object} options\n */\n\n\n _createClass(Clipboard, [{\n key: \"resolveOptions\",\n value: function resolveOptions() {\n var options = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : {};\n this.action = typeof options.action === 'function' ? options.action : this.defaultAction;\n this.target = typeof options.target === 'function' ? options.target : this.defaultTarget;\n this.text = typeof options.text === 'function' ? options.text : this.defaultText;\n this.container = clipboard_typeof(options.container) === 'object' ? options.container : document.body;\n }\n /**\n * Adds a click event listener to the passed trigger.\n * @param {String|HTMLElement|HTMLCollection|NodeList} trigger\n */\n\n }, {\n key: \"listenClick\",\n value: function listenClick(trigger) {\n var _this2 = this;\n\n this.listener = listen_default()(trigger, 'click', function (e) {\n return _this2.onClick(e);\n });\n }\n /**\n * Defines a new `ClipboardAction` on each click event.\n * @param {Event} e\n */\n\n }, {\n key: \"onClick\",\n value: function onClick(e) {\n var trigger = e.delegateTarget || e.currentTarget;\n var action = this.action(trigger) || 'copy';\n var text = actions_default({\n action: action,\n container: this.container,\n target: this.target(trigger),\n text: this.text(trigger)\n }); // Fires an event based on the copy operation result.\n\n this.emit(text ? 'success' : 'error', {\n action: action,\n text: text,\n trigger: trigger,\n clearSelection: function clearSelection() {\n if (trigger) {\n trigger.focus();\n }\n\n window.getSelection().removeAllRanges();\n }\n });\n }\n /**\n * Default `action` lookup function.\n * @param {Element} trigger\n */\n\n }, {\n key: \"defaultAction\",\n value: function defaultAction(trigger) {\n return getAttributeValue('action', trigger);\n }\n /**\n * Default `target` lookup function.\n * @param {Element} trigger\n */\n\n }, {\n key: \"defaultTarget\",\n value: function defaultTarget(trigger) {\n var selector = getAttributeValue('target', trigger);\n\n if (selector) {\n return document.querySelector(selector);\n }\n }\n /**\n * Allow fire programmatically a copy action\n * @param {String|HTMLElement} target\n * @param {Object} options\n * @returns Text copied.\n */\n\n }, {\n key: \"defaultText\",\n\n /**\n * Default `text` lookup function.\n * @param {Element} trigger\n */\n value: function defaultText(trigger) {\n return getAttributeValue('text', trigger);\n }\n /**\n * Destroy lifecycle.\n */\n\n }, {\n key: \"destroy\",\n value: function destroy() {\n this.listener.destroy();\n }\n }], [{\n key: \"copy\",\n value: function copy(target) {\n var options = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : {\n container: document.body\n };\n return actions_copy(target, options);\n }\n /**\n * Allow fire programmatically a cut action\n * @param {String|HTMLElement} target\n * @returns Text cutted.\n */\n\n }, {\n key: \"cut\",\n value: function cut(target) {\n return actions_cut(target);\n }\n /**\n * Returns the support of the given action, or all actions if no action is\n * given.\n * @param {String} [action]\n */\n\n }, {\n key: \"isSupported\",\n value: function isSupported() {\n var action = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : ['copy', 'cut'];\n var actions = typeof action === 'string' ? [action] : action;\n var support = !!document.queryCommandSupported;\n actions.forEach(function (action) {\n support = support && !!document.queryCommandSupported(action);\n });\n return support;\n }\n }]);\n\n return Clipboard;\n}((tiny_emitter_default()));\n\n/* harmony default export */ var clipboard = (Clipboard);\n\n/***/ }),\n\n/***/ 828:\n/***/ (function(module) {\n\nvar DOCUMENT_NODE_TYPE = 9;\n\n/**\n * A polyfill for Element.matches()\n */\nif (typeof Element !== 'undefined' && !Element.prototype.matches) {\n var proto = Element.prototype;\n\n proto.matches = proto.matchesSelector ||\n proto.mozMatchesSelector ||\n proto.msMatchesSelector ||\n proto.oMatchesSelector ||\n proto.webkitMatchesSelector;\n}\n\n/**\n * Finds the closest parent that matches a selector.\n *\n * @param {Element} element\n * @param {String} selector\n * @return {Function}\n */\nfunction closest (element, selector) {\n while (element && element.nodeType !== DOCUMENT_NODE_TYPE) {\n if (typeof element.matches === 'function' &&\n element.matches(selector)) {\n return element;\n }\n element = element.parentNode;\n }\n}\n\nmodule.exports = closest;\n\n\n/***/ }),\n\n/***/ 438:\n/***/ (function(module, __unused_webpack_exports, __webpack_require__) {\n\nvar closest = __webpack_require__(828);\n\n/**\n * Delegates event to a selector.\n *\n * @param {Element} element\n * @param {String} selector\n * @param {String} type\n * @param {Function} callback\n * @param {Boolean} useCapture\n * @return {Object}\n */\nfunction _delegate(element, selector, type, callback, useCapture) {\n var listenerFn = listener.apply(this, arguments);\n\n element.addEventListener(type, listenerFn, useCapture);\n\n return {\n destroy: function() {\n element.removeEventListener(type, listenerFn, useCapture);\n }\n }\n}\n\n/**\n * Delegates event to a selector.\n *\n * @param {Element|String|Array} [elements]\n * @param {String} selector\n * @param {String} type\n * @param {Function} callback\n * @param {Boolean} useCapture\n * @return {Object}\n */\nfunction delegate(elements, selector, type, callback, useCapture) {\n // Handle the regular Element usage\n if (typeof elements.addEventListener === 'function') {\n return _delegate.apply(null, arguments);\n }\n\n // Handle Element-less usage, it defaults to global delegation\n if (typeof type === 'function') {\n // Use `document` as the first parameter, then apply arguments\n // This is a short way to .unshift `arguments` without running into deoptimizations\n return _delegate.bind(null, document).apply(null, arguments);\n }\n\n // Handle Selector-based usage\n if (typeof elements === 'string') {\n elements = document.querySelectorAll(elements);\n }\n\n // Handle Array-like based usage\n return Array.prototype.map.call(elements, function (element) {\n return _delegate(element, selector, type, callback, useCapture);\n });\n}\n\n/**\n * Finds closest match and invokes callback.\n *\n * @param {Element} element\n * @param {String} selector\n * @param {String} type\n * @param {Function} callback\n * @return {Function}\n */\nfunction listener(element, selector, type, callback) {\n return function(e) {\n e.delegateTarget = closest(e.target, selector);\n\n if (e.delegateTarget) {\n callback.call(element, e);\n }\n }\n}\n\nmodule.exports = delegate;\n\n\n/***/ }),\n\n/***/ 879:\n/***/ (function(__unused_webpack_module, exports) {\n\n/**\n * Check if argument is a HTML element.\n *\n * @param {Object} value\n * @return {Boolean}\n */\nexports.node = function(value) {\n return value !== undefined\n && value instanceof HTMLElement\n && value.nodeType === 1;\n};\n\n/**\n * Check if argument is a list of HTML elements.\n *\n * @param {Object} value\n * @return {Boolean}\n */\nexports.nodeList = function(value) {\n var type = Object.prototype.toString.call(value);\n\n return value !== undefined\n && (type === '[object NodeList]' || type === '[object HTMLCollection]')\n && ('length' in value)\n && (value.length === 0 || exports.node(value[0]));\n};\n\n/**\n * Check if argument is a string.\n *\n * @param {Object} value\n * @return {Boolean}\n */\nexports.string = function(value) {\n return typeof value === 'string'\n || value instanceof String;\n};\n\n/**\n * Check if argument is a function.\n *\n * @param {Object} value\n * @return {Boolean}\n */\nexports.fn = function(value) {\n var type = Object.prototype.toString.call(value);\n\n return type === '[object Function]';\n};\n\n\n/***/ }),\n\n/***/ 370:\n/***/ (function(module, __unused_webpack_exports, __webpack_require__) {\n\nvar is = __webpack_require__(879);\nvar delegate = __webpack_require__(438);\n\n/**\n * Validates all params and calls the right\n * listener function based on its target type.\n *\n * @param {String|HTMLElement|HTMLCollection|NodeList} target\n * @param {String} type\n * @param {Function} callback\n * @return {Object}\n */\nfunction listen(target, type, callback) {\n if (!target && !type && !callback) {\n throw new Error('Missing required arguments');\n }\n\n if (!is.string(type)) {\n throw new TypeError('Second argument must be a String');\n }\n\n if (!is.fn(callback)) {\n throw new TypeError('Third argument must be a Function');\n }\n\n if (is.node(target)) {\n return listenNode(target, type, callback);\n }\n else if (is.nodeList(target)) {\n return listenNodeList(target, type, callback);\n }\n else if (is.string(target)) {\n return listenSelector(target, type, callback);\n }\n else {\n throw new TypeError('First argument must be a String, HTMLElement, HTMLCollection, or NodeList');\n }\n}\n\n/**\n * Adds an event listener to a HTML element\n * and returns a remove listener function.\n *\n * @param {HTMLElement} node\n * @param {String} type\n * @param {Function} callback\n * @return {Object}\n */\nfunction listenNode(node, type, callback) {\n node.addEventListener(type, callback);\n\n return {\n destroy: function() {\n node.removeEventListener(type, callback);\n }\n }\n}\n\n/**\n * Add an event listener to a list of HTML elements\n * and returns a remove listener function.\n *\n * @param {NodeList|HTMLCollection} nodeList\n * @param {String} type\n * @param {Function} callback\n * @return {Object}\n */\nfunction listenNodeList(nodeList, type, callback) {\n Array.prototype.forEach.call(nodeList, function(node) {\n node.addEventListener(type, callback);\n });\n\n return {\n destroy: function() {\n Array.prototype.forEach.call(nodeList, function(node) {\n node.removeEventListener(type, callback);\n });\n }\n }\n}\n\n/**\n * Add an event listener to a selector\n * and returns a remove listener function.\n *\n * @param {String} selector\n * @param {String} type\n * @param {Function} callback\n * @return {Object}\n */\nfunction listenSelector(selector, type, callback) {\n return delegate(document.body, selector, type, callback);\n}\n\nmodule.exports = listen;\n\n\n/***/ }),\n\n/***/ 817:\n/***/ (function(module) {\n\nfunction select(element) {\n var selectedText;\n\n if (element.nodeName === 'SELECT') {\n element.focus();\n\n selectedText = element.value;\n }\n else if (element.nodeName === 'INPUT' || element.nodeName === 'TEXTAREA') {\n var isReadOnly = element.hasAttribute('readonly');\n\n if (!isReadOnly) {\n element.setAttribute('readonly', '');\n }\n\n element.select();\n element.setSelectionRange(0, element.value.length);\n\n if (!isReadOnly) {\n element.removeAttribute('readonly');\n }\n\n selectedText = element.value;\n }\n else {\n if (element.hasAttribute('contenteditable')) {\n element.focus();\n }\n\n var selection = window.getSelection();\n var range = document.createRange();\n\n range.selectNodeContents(element);\n selection.removeAllRanges();\n selection.addRange(range);\n\n selectedText = selection.toString();\n }\n\n return selectedText;\n}\n\nmodule.exports = select;\n\n\n/***/ }),\n\n/***/ 279:\n/***/ (function(module) {\n\nfunction E () {\n // Keep this empty so it's easier to inherit from\n // (via https://github.com/lipsmack from https://github.com/scottcorgan/tiny-emitter/issues/3)\n}\n\nE.prototype = {\n on: function (name, callback, ctx) {\n var e = this.e || (this.e = {});\n\n (e[name] || (e[name] = [])).push({\n fn: callback,\n ctx: ctx\n });\n\n return this;\n },\n\n once: function (name, callback, ctx) {\n var self = this;\n function listener () {\n self.off(name, listener);\n callback.apply(ctx, arguments);\n };\n\n listener._ = callback\n return this.on(name, listener, ctx);\n },\n\n emit: function (name) {\n var data = [].slice.call(arguments, 1);\n var evtArr = ((this.e || (this.e = {}))[name] || []).slice();\n var i = 0;\n var len = evtArr.length;\n\n for (i; i < len; i++) {\n evtArr[i].fn.apply(evtArr[i].ctx, data);\n }\n\n return this;\n },\n\n off: function (name, callback) {\n var e = this.e || (this.e = {});\n var evts = e[name];\n var liveEvents = [];\n\n if (evts && callback) {\n for (var i = 0, len = evts.length; i < len; i++) {\n if (evts[i].fn !== callback && evts[i].fn._ !== callback)\n liveEvents.push(evts[i]);\n }\n }\n\n // Remove event from queue to prevent memory leak\n // Suggested by https://github.com/lazd\n // Ref: https://github.com/scottcorgan/tiny-emitter/commit/c6ebfaa9bc973b33d110a84a307742b7cf94c953#commitcomment-5024910\n\n (liveEvents.length)\n ? e[name] = liveEvents\n : delete e[name];\n\n return this;\n }\n};\n\nmodule.exports = E;\nmodule.exports.TinyEmitter = E;\n\n\n/***/ })\n\n/******/ \t});\n/************************************************************************/\n/******/ \t// The module cache\n/******/ \tvar __webpack_module_cache__ = {};\n/******/ \t\n/******/ \t// The require function\n/******/ \tfunction __webpack_require__(moduleId) {\n/******/ \t\t// Check if module is in cache\n/******/ \t\tif(__webpack_module_cache__[moduleId]) {\n/******/ \t\t\treturn __webpack_module_cache__[moduleId].exports;\n/******/ \t\t}\n/******/ \t\t// Create a new module (and put it into the cache)\n/******/ \t\tvar module = __webpack_module_cache__[moduleId] = {\n/******/ \t\t\t// no module.id needed\n/******/ \t\t\t// no module.loaded needed\n/******/ \t\t\texports: {}\n/******/ \t\t};\n/******/ \t\n/******/ \t\t// Execute the module function\n/******/ \t\t__webpack_modules__[moduleId](module, module.exports, __webpack_require__);\n/******/ \t\n/******/ \t\t// Return the exports of the module\n/******/ \t\treturn module.exports;\n/******/ \t}\n/******/ \t\n/************************************************************************/\n/******/ \t/* webpack/runtime/compat get default export */\n/******/ \t!function() {\n/******/ \t\t// getDefaultExport function for compatibility with non-harmony modules\n/******/ \t\t__webpack_require__.n = function(module) {\n/******/ \t\t\tvar getter = module && module.__esModule ?\n/******/ \t\t\t\tfunction() { return module['default']; } :\n/******/ \t\t\t\tfunction() { return module; };\n/******/ \t\t\t__webpack_require__.d(getter, { a: getter });\n/******/ \t\t\treturn getter;\n/******/ \t\t};\n/******/ \t}();\n/******/ \t\n/******/ \t/* webpack/runtime/define property getters */\n/******/ \t!function() {\n/******/ \t\t// define getter functions for harmony exports\n/******/ \t\t__webpack_require__.d = function(exports, definition) {\n/******/ \t\t\tfor(var key in definition) {\n/******/ \t\t\t\tif(__webpack_require__.o(definition, key) && !__webpack_require__.o(exports, key)) {\n/******/ \t\t\t\t\tObject.defineProperty(exports, key, { enumerable: true, get: definition[key] });\n/******/ \t\t\t\t}\n/******/ \t\t\t}\n/******/ \t\t};\n/******/ \t}();\n/******/ \t\n/******/ \t/* webpack/runtime/hasOwnProperty shorthand */\n/******/ \t!function() {\n/******/ \t\t__webpack_require__.o = function(obj, prop) { return Object.prototype.hasOwnProperty.call(obj, prop); }\n/******/ \t}();\n/******/ \t\n/************************************************************************/\n/******/ \t// module exports must be returned from runtime so entry inlining is disabled\n/******/ \t// startup\n/******/ \t// Load entry module and return exports\n/******/ \treturn __webpack_require__(686);\n/******/ })()\n.default;\n});", "/*\n * Copyright (c) 2016-2025 Martin Donath \n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport \"focus-visible\"\n\nimport {\n EMPTY,\n NEVER,\n Observable,\n Subject,\n defer,\n delay,\n filter,\n map,\n merge,\n mergeWith,\n shareReplay,\n switchMap\n} from \"rxjs\"\n\nimport { configuration, feature } from \"./_\"\nimport {\n at,\n getActiveElement,\n getOptionalElement,\n requestJSON,\n setLocation,\n setToggle,\n watchDocument,\n watchKeyboard,\n watchLocation,\n watchLocationTarget,\n watchMedia,\n watchPrint,\n watchScript,\n watchViewport\n} from \"./browser\"\nimport {\n getComponentElement,\n getComponentElements,\n mountAnnounce,\n mountBackToTop,\n mountConsent,\n mountContent,\n mountDialog,\n mountHeader,\n mountHeaderTitle,\n mountPalette,\n mountProgress,\n mountSearch,\n mountSearchHiglight,\n mountSidebar,\n mountSource,\n mountTableOfContents,\n mountTabs,\n watchHeader,\n watchMain\n} from \"./components\"\nimport {\n SearchIndex,\n fetchSitemap,\n setupAlternate,\n setupClipboardJS,\n setupInstantNavigation,\n setupVersionSelector\n} from \"./integrations\"\nimport {\n patchEllipsis,\n patchIndeterminate,\n patchScrollfix,\n patchScrolllock\n} from \"./patches\"\nimport \"./polyfills\"\n\n/* ----------------------------------------------------------------------------\n * Functions - @todo refactor\n * ------------------------------------------------------------------------- */\n\n/**\n * Fetch search index\n *\n * @returns Search index observable\n */\nfunction fetchSearchIndex(): Observable {\n if (location.protocol === \"file:\") {\n return watchScript(\n `${new URL(\"search/search_index.js\", config.base)}`\n )\n .pipe(\n // @ts-ignore - @todo fix typings\n map(() => __index),\n shareReplay(1)\n )\n } else {\n return requestJSON(\n new URL(\"search/search_index.json\", config.base)\n )\n }\n}\n\n/* ----------------------------------------------------------------------------\n * Application\n * ------------------------------------------------------------------------- */\n\n/* Yay, JavaScript is available */\ndocument.documentElement.classList.remove(\"no-js\")\ndocument.documentElement.classList.add(\"js\")\n\n/* Set up navigation observables and subjects */\nconst document$ = watchDocument()\nconst location$ = watchLocation()\nconst target$ = watchLocationTarget(location$)\nconst keyboard$ = watchKeyboard()\n\n/* Set up media observables */\nconst viewport$ = watchViewport()\nconst tablet$ = watchMedia(\"(min-width: 60em)\")\nconst screen$ = watchMedia(\"(min-width: 76.25em)\")\nconst print$ = watchPrint()\n\n/* Retrieve search index, if search is enabled */\nconst config = configuration()\nconst index$ = document.forms.namedItem(\"search\")\n ? fetchSearchIndex()\n : NEVER\n\n/* Set up Clipboard.js integration */\nconst alert$ = new Subject()\nsetupClipboardJS({ alert$ })\n\n/* Set up language selector */\nsetupAlternate({ document$ })\n\n/* Set up progress indicator */\nconst progress$ = new Subject()\n\n/* Set up sitemap for instant navigation and previews */\nconst sitemap$ = fetchSitemap(config.base)\n\n/* Set up instant navigation, if enabled */\nif (feature(\"navigation.instant\"))\n setupInstantNavigation({ sitemap$, location$, viewport$, progress$ })\n .subscribe(document$)\n\n/* Set up version selector */\nif (config.version?.provider === \"mike\")\n setupVersionSelector({ document$ })\n\n/* Always close drawer and search on navigation */\nmerge(location$, target$)\n .pipe(\n delay(125)\n )\n .subscribe(() => {\n setToggle(\"drawer\", false)\n setToggle(\"search\", false)\n })\n\n/* Set up global keyboard handlers */\nkeyboard$\n .pipe(\n filter(({ mode }) => mode === \"global\")\n )\n .subscribe(key => {\n switch (key.type) {\n\n /* Go to previous page */\n case \"p\":\n case \",\":\n const prev = getOptionalElement(\"link[rel=prev]\")\n if (typeof prev !== \"undefined\")\n setLocation(prev)\n break\n\n /* Go to next page */\n case \"n\":\n case \".\":\n const next = getOptionalElement(\"link[rel=next]\")\n if (typeof next !== \"undefined\")\n setLocation(next)\n break\n\n /* Expand navigation, see https://bit.ly/3ZjG5io */\n case \"Enter\":\n const active = getActiveElement()\n if (active instanceof HTMLLabelElement)\n active.click()\n }\n })\n\n/* Set up patches */\npatchEllipsis({ viewport$, document$ })\npatchIndeterminate({ document$, tablet$ })\npatchScrollfix({ document$ })\npatchScrolllock({ viewport$, tablet$ })\n\n/* Set up header and main area observable */\nconst header$ = watchHeader(getComponentElement(\"header\"), { viewport$ })\nconst main$ = document$\n .pipe(\n map(() => getComponentElement(\"main\")),\n switchMap(el => watchMain(el, { viewport$, header$ })),\n shareReplay(1)\n )\n\n/* Set up control component observables */\nconst control$ = merge(\n\n /* Consent */\n ...getComponentElements(\"consent\")\n .map(el => mountConsent(el, { target$ })),\n\n /* Dialog */\n ...getComponentElements(\"dialog\")\n .map(el => mountDialog(el, { alert$ })),\n\n /* Color palette */\n ...getComponentElements(\"palette\")\n .map(el => mountPalette(el)),\n\n /* Progress bar */\n ...getComponentElements(\"progress\")\n .map(el => mountProgress(el, { progress$ })),\n\n /* Search */\n ...getComponentElements(\"search\")\n .map(el => mountSearch(el, { index$, keyboard$ })),\n\n /* Repository information */\n ...getComponentElements(\"source\")\n .map(el => mountSource(el))\n)\n\n/* Set up content component observables */\nconst content$ = defer(() => merge(\n\n /* Announcement bar */\n ...getComponentElements(\"announce\")\n .map(el => mountAnnounce(el)),\n\n /* Content */\n ...getComponentElements(\"content\")\n .map(el => mountContent(el, { sitemap$, viewport$, target$, print$ })),\n\n /* Search highlighting */\n ...getComponentElements(\"content\")\n .map(el => feature(\"search.highlight\")\n ? mountSearchHiglight(el, { index$, location$ })\n : EMPTY\n ),\n\n /* Header */\n ...getComponentElements(\"header\")\n .map(el => mountHeader(el, { viewport$, header$, main$ })),\n\n /* Header title */\n ...getComponentElements(\"header-title\")\n .map(el => mountHeaderTitle(el, { viewport$, header$ })),\n\n /* Sidebar */\n ...getComponentElements(\"sidebar\")\n .map(el => el.getAttribute(\"data-md-type\") === \"navigation\"\n ? at(screen$, () => mountSidebar(el, { viewport$, header$, main$ }))\n : at(tablet$, () => mountSidebar(el, { viewport$, header$, main$ }))\n ),\n\n /* Navigation tabs */\n ...getComponentElements(\"tabs\")\n .map(el => mountTabs(el, { viewport$, header$ })),\n\n /* Table of contents */\n ...getComponentElements(\"toc\")\n .map(el => mountTableOfContents(el, {\n viewport$, header$, main$, target$\n })),\n\n /* Back-to-top button */\n ...getComponentElements(\"top\")\n .map(el => mountBackToTop(el, { viewport$, header$, main$, target$ }))\n))\n\n/* Set up component observables */\nconst component$ = document$\n .pipe(\n switchMap(() => content$),\n mergeWith(control$),\n shareReplay(1)\n )\n\n/* Subscribe to all components */\ncomponent$.subscribe()\n\n/* ----------------------------------------------------------------------------\n * Exports\n * ------------------------------------------------------------------------- */\n\nwindow.document$ = document$ /* Document observable */\nwindow.location$ = location$ /* Location subject */\nwindow.target$ = target$ /* Location target observable */\nwindow.keyboard$ = keyboard$ /* Keyboard observable */\nwindow.viewport$ = viewport$ /* Viewport observable */\nwindow.tablet$ = tablet$ /* Media tablet observable */\nwindow.screen$ = screen$ /* Media screen observable */\nwindow.print$ = print$ /* Media print observable */\nwindow.alert$ = alert$ /* Alert subject */\nwindow.progress$ = progress$ /* Progress indicator subject */\nwindow.component$ = component$ /* Component observable */\n", "/******************************************************************************\nCopyright (c) Microsoft Corporation.\n\nPermission to use, copy, modify, and/or distribute this software for any\npurpose with or without fee is hereby granted.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH\nREGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY\nAND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,\nINDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM\nLOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR\nOTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR\nPERFORMANCE OF THIS SOFTWARE.\n***************************************************************************** */\n/* global Reflect, Promise, SuppressedError, Symbol, Iterator */\n\nvar extendStatics = function(d, b) {\n extendStatics = Object.setPrototypeOf ||\n ({ __proto__: [] } instanceof Array && function (d, b) { d.__proto__ = b; }) ||\n function (d, b) { for (var p in b) if (Object.prototype.hasOwnProperty.call(b, p)) d[p] = b[p]; };\n return extendStatics(d, b);\n};\n\nexport function __extends(d, b) {\n if (typeof b !== \"function\" && b !== null)\n throw new TypeError(\"Class extends value \" + String(b) + \" is not a constructor or null\");\n extendStatics(d, b);\n function __() { this.constructor = d; }\n d.prototype = b === null ? Object.create(b) : (__.prototype = b.prototype, new __());\n}\n\nexport var __assign = function() {\n __assign = Object.assign || function __assign(t) {\n for (var s, i = 1, n = arguments.length; i < n; i++) {\n s = arguments[i];\n for (var p in s) if (Object.prototype.hasOwnProperty.call(s, p)) t[p] = s[p];\n }\n return t;\n }\n return __assign.apply(this, arguments);\n}\n\nexport function __rest(s, e) {\n var t = {};\n for (var p in s) if (Object.prototype.hasOwnProperty.call(s, p) && e.indexOf(p) < 0)\n t[p] = s[p];\n if (s != null && typeof Object.getOwnPropertySymbols === \"function\")\n for (var i = 0, p = Object.getOwnPropertySymbols(s); i < p.length; i++) {\n if (e.indexOf(p[i]) < 0 && Object.prototype.propertyIsEnumerable.call(s, p[i]))\n t[p[i]] = s[p[i]];\n }\n return t;\n}\n\nexport function __decorate(decorators, target, key, desc) {\n var c = arguments.length, r = c < 3 ? target : desc === null ? desc = Object.getOwnPropertyDescriptor(target, key) : desc, d;\n if (typeof Reflect === \"object\" && typeof Reflect.decorate === \"function\") r = Reflect.decorate(decorators, target, key, desc);\n else for (var i = decorators.length - 1; i >= 0; i--) if (d = decorators[i]) r = (c < 3 ? d(r) : c > 3 ? d(target, key, r) : d(target, key)) || r;\n return c > 3 && r && Object.defineProperty(target, key, r), r;\n}\n\nexport function __param(paramIndex, decorator) {\n return function (target, key) { decorator(target, key, paramIndex); }\n}\n\nexport function __esDecorate(ctor, descriptorIn, decorators, contextIn, initializers, extraInitializers) {\n function accept(f) { if (f !== void 0 && typeof f !== \"function\") throw new TypeError(\"Function expected\"); return f; }\n var kind = contextIn.kind, key = kind === \"getter\" ? \"get\" : kind === \"setter\" ? \"set\" : \"value\";\n var target = !descriptorIn && ctor ? contextIn[\"static\"] ? ctor : ctor.prototype : null;\n var descriptor = descriptorIn || (target ? Object.getOwnPropertyDescriptor(target, contextIn.name) : {});\n var _, done = false;\n for (var i = decorators.length - 1; i >= 0; i--) {\n var context = {};\n for (var p in contextIn) context[p] = p === \"access\" ? {} : contextIn[p];\n for (var p in contextIn.access) context.access[p] = contextIn.access[p];\n context.addInitializer = function (f) { if (done) throw new TypeError(\"Cannot add initializers after decoration has completed\"); extraInitializers.push(accept(f || null)); };\n var result = (0, decorators[i])(kind === \"accessor\" ? { get: descriptor.get, set: descriptor.set } : descriptor[key], context);\n if (kind === \"accessor\") {\n if (result === void 0) continue;\n if (result === null || typeof result !== \"object\") throw new TypeError(\"Object expected\");\n if (_ = accept(result.get)) descriptor.get = _;\n if (_ = accept(result.set)) descriptor.set = _;\n if (_ = accept(result.init)) initializers.unshift(_);\n }\n else if (_ = accept(result)) {\n if (kind === \"field\") initializers.unshift(_);\n else descriptor[key] = _;\n }\n }\n if (target) Object.defineProperty(target, contextIn.name, descriptor);\n done = true;\n};\n\nexport function __runInitializers(thisArg, initializers, value) {\n var useValue = arguments.length > 2;\n for (var i = 0; i < initializers.length; i++) {\n value = useValue ? initializers[i].call(thisArg, value) : initializers[i].call(thisArg);\n }\n return useValue ? value : void 0;\n};\n\nexport function __propKey(x) {\n return typeof x === \"symbol\" ? x : \"\".concat(x);\n};\n\nexport function __setFunctionName(f, name, prefix) {\n if (typeof name === \"symbol\") name = name.description ? \"[\".concat(name.description, \"]\") : \"\";\n return Object.defineProperty(f, \"name\", { configurable: true, value: prefix ? \"\".concat(prefix, \" \", name) : name });\n};\n\nexport function __metadata(metadataKey, metadataValue) {\n if (typeof Reflect === \"object\" && typeof Reflect.metadata === \"function\") return Reflect.metadata(metadataKey, metadataValue);\n}\n\nexport function __awaiter(thisArg, _arguments, P, generator) {\n function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }\n return new (P || (P = Promise))(function (resolve, reject) {\n function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }\n function rejected(value) { try { step(generator[\"throw\"](value)); } catch (e) { reject(e); } }\n function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }\n step((generator = generator.apply(thisArg, _arguments || [])).next());\n });\n}\n\nexport function __generator(thisArg, body) {\n var _ = { label: 0, sent: function() { if (t[0] & 1) throw t[1]; return t[1]; }, trys: [], ops: [] }, f, y, t, g = Object.create((typeof Iterator === \"function\" ? Iterator : Object).prototype);\n return g.next = verb(0), g[\"throw\"] = verb(1), g[\"return\"] = verb(2), typeof Symbol === \"function\" && (g[Symbol.iterator] = function() { return this; }), g;\n function verb(n) { return function (v) { return step([n, v]); }; }\n function step(op) {\n if (f) throw new TypeError(\"Generator is already executing.\");\n while (g && (g = 0, op[0] && (_ = 0)), _) try {\n if (f = 1, y && (t = op[0] & 2 ? y[\"return\"] : op[0] ? y[\"throw\"] || ((t = y[\"return\"]) && t.call(y), 0) : y.next) && !(t = t.call(y, op[1])).done) return t;\n if (y = 0, t) op = [op[0] & 2, t.value];\n switch (op[0]) {\n case 0: case 1: t = op; break;\n case 4: _.label++; return { value: op[1], done: false };\n case 5: _.label++; y = op[1]; op = [0]; continue;\n case 7: op = _.ops.pop(); _.trys.pop(); continue;\n default:\n if (!(t = _.trys, t = t.length > 0 && t[t.length - 1]) && (op[0] === 6 || op[0] === 2)) { _ = 0; continue; }\n if (op[0] === 3 && (!t || (op[1] > t[0] && op[1] < t[3]))) { _.label = op[1]; break; }\n if (op[0] === 6 && _.label < t[1]) { _.label = t[1]; t = op; break; }\n if (t && _.label < t[2]) { _.label = t[2]; _.ops.push(op); break; }\n if (t[2]) _.ops.pop();\n _.trys.pop(); continue;\n }\n op = body.call(thisArg, _);\n } catch (e) { op = [6, e]; y = 0; } finally { f = t = 0; }\n if (op[0] & 5) throw op[1]; return { value: op[0] ? op[1] : void 0, done: true };\n }\n}\n\nexport var __createBinding = Object.create ? (function(o, m, k, k2) {\n if (k2 === undefined) k2 = k;\n var desc = Object.getOwnPropertyDescriptor(m, k);\n if (!desc || (\"get\" in desc ? !m.__esModule : desc.writable || desc.configurable)) {\n desc = { enumerable: true, get: function() { return m[k]; } };\n }\n Object.defineProperty(o, k2, desc);\n}) : (function(o, m, k, k2) {\n if (k2 === undefined) k2 = k;\n o[k2] = m[k];\n});\n\nexport function __exportStar(m, o) {\n for (var p in m) if (p !== \"default\" && !Object.prototype.hasOwnProperty.call(o, p)) __createBinding(o, m, p);\n}\n\nexport function __values(o) {\n var s = typeof Symbol === \"function\" && Symbol.iterator, m = s && o[s], i = 0;\n if (m) return m.call(o);\n if (o && typeof o.length === \"number\") return {\n next: function () {\n if (o && i >= o.length) o = void 0;\n return { value: o && o[i++], done: !o };\n }\n };\n throw new TypeError(s ? \"Object is not iterable.\" : \"Symbol.iterator is not defined.\");\n}\n\nexport function __read(o, n) {\n var m = typeof Symbol === \"function\" && o[Symbol.iterator];\n if (!m) return o;\n var i = m.call(o), r, ar = [], e;\n try {\n while ((n === void 0 || n-- > 0) && !(r = i.next()).done) ar.push(r.value);\n }\n catch (error) { e = { error: error }; }\n finally {\n try {\n if (r && !r.done && (m = i[\"return\"])) m.call(i);\n }\n finally { if (e) throw e.error; }\n }\n return ar;\n}\n\n/** @deprecated */\nexport function __spread() {\n for (var ar = [], i = 0; i < arguments.length; i++)\n ar = ar.concat(__read(arguments[i]));\n return ar;\n}\n\n/** @deprecated */\nexport function __spreadArrays() {\n for (var s = 0, i = 0, il = arguments.length; i < il; i++) s += arguments[i].length;\n for (var r = Array(s), k = 0, i = 0; i < il; i++)\n for (var a = arguments[i], j = 0, jl = a.length; j < jl; j++, k++)\n r[k] = a[j];\n return r;\n}\n\nexport function __spreadArray(to, from, pack) {\n if (pack || arguments.length === 2) for (var i = 0, l = from.length, ar; i < l; i++) {\n if (ar || !(i in from)) {\n if (!ar) ar = Array.prototype.slice.call(from, 0, i);\n ar[i] = from[i];\n }\n }\n return to.concat(ar || Array.prototype.slice.call(from));\n}\n\nexport function __await(v) {\n return this instanceof __await ? (this.v = v, this) : new __await(v);\n}\n\nexport function __asyncGenerator(thisArg, _arguments, generator) {\n if (!Symbol.asyncIterator) throw new TypeError(\"Symbol.asyncIterator is not defined.\");\n var g = generator.apply(thisArg, _arguments || []), i, q = [];\n return i = Object.create((typeof AsyncIterator === \"function\" ? AsyncIterator : Object).prototype), verb(\"next\"), verb(\"throw\"), verb(\"return\", awaitReturn), i[Symbol.asyncIterator] = function () { return this; }, i;\n function awaitReturn(f) { return function (v) { return Promise.resolve(v).then(f, reject); }; }\n function verb(n, f) { if (g[n]) { i[n] = function (v) { return new Promise(function (a, b) { q.push([n, v, a, b]) > 1 || resume(n, v); }); }; if (f) i[n] = f(i[n]); } }\n function resume(n, v) { try { step(g[n](v)); } catch (e) { settle(q[0][3], e); } }\n function step(r) { r.value instanceof __await ? Promise.resolve(r.value.v).then(fulfill, reject) : settle(q[0][2], r); }\n function fulfill(value) { resume(\"next\", value); }\n function reject(value) { resume(\"throw\", value); }\n function settle(f, v) { if (f(v), q.shift(), q.length) resume(q[0][0], q[0][1]); }\n}\n\nexport function __asyncDelegator(o) {\n var i, p;\n return i = {}, verb(\"next\"), verb(\"throw\", function (e) { throw e; }), verb(\"return\"), i[Symbol.iterator] = function () { return this; }, i;\n function verb(n, f) { i[n] = o[n] ? function (v) { return (p = !p) ? { value: __await(o[n](v)), done: false } : f ? f(v) : v; } : f; }\n}\n\nexport function __asyncValues(o) {\n if (!Symbol.asyncIterator) throw new TypeError(\"Symbol.asyncIterator is not defined.\");\n var m = o[Symbol.asyncIterator], i;\n return m ? m.call(o) : (o = typeof __values === \"function\" ? __values(o) : o[Symbol.iterator](), i = {}, verb(\"next\"), verb(\"throw\"), verb(\"return\"), i[Symbol.asyncIterator] = function () { return this; }, i);\n function verb(n) { i[n] = o[n] && function (v) { return new Promise(function (resolve, reject) { v = o[n](v), settle(resolve, reject, v.done, v.value); }); }; }\n function settle(resolve, reject, d, v) { Promise.resolve(v).then(function(v) { resolve({ value: v, done: d }); }, reject); }\n}\n\nexport function __makeTemplateObject(cooked, raw) {\n if (Object.defineProperty) { Object.defineProperty(cooked, \"raw\", { value: raw }); } else { cooked.raw = raw; }\n return cooked;\n};\n\nvar __setModuleDefault = Object.create ? (function(o, v) {\n Object.defineProperty(o, \"default\", { enumerable: true, value: v });\n}) : function(o, v) {\n o[\"default\"] = v;\n};\n\nexport function __importStar(mod) {\n if (mod && mod.__esModule) return mod;\n var result = {};\n if (mod != null) for (var k in mod) if (k !== \"default\" && Object.prototype.hasOwnProperty.call(mod, k)) __createBinding(result, mod, k);\n __setModuleDefault(result, mod);\n return result;\n}\n\nexport function __importDefault(mod) {\n return (mod && mod.__esModule) ? mod : { default: mod };\n}\n\nexport function __classPrivateFieldGet(receiver, state, kind, f) {\n if (kind === \"a\" && !f) throw new TypeError(\"Private accessor was defined without a getter\");\n if (typeof state === \"function\" ? receiver !== state || !f : !state.has(receiver)) throw new TypeError(\"Cannot read private member from an object whose class did not declare it\");\n return kind === \"m\" ? f : kind === \"a\" ? f.call(receiver) : f ? f.value : state.get(receiver);\n}\n\nexport function __classPrivateFieldSet(receiver, state, value, kind, f) {\n if (kind === \"m\") throw new TypeError(\"Private method is not writable\");\n if (kind === \"a\" && !f) throw new TypeError(\"Private accessor was defined without a setter\");\n if (typeof state === \"function\" ? receiver !== state || !f : !state.has(receiver)) throw new TypeError(\"Cannot write private member to an object whose class did not declare it\");\n return (kind === \"a\" ? f.call(receiver, value) : f ? f.value = value : state.set(receiver, value)), value;\n}\n\nexport function __classPrivateFieldIn(state, receiver) {\n if (receiver === null || (typeof receiver !== \"object\" && typeof receiver !== \"function\")) throw new TypeError(\"Cannot use 'in' operator on non-object\");\n return typeof state === \"function\" ? receiver === state : state.has(receiver);\n}\n\nexport function __addDisposableResource(env, value, async) {\n if (value !== null && value !== void 0) {\n if (typeof value !== \"object\" && typeof value !== \"function\") throw new TypeError(\"Object expected.\");\n var dispose, inner;\n if (async) {\n if (!Symbol.asyncDispose) throw new TypeError(\"Symbol.asyncDispose is not defined.\");\n dispose = value[Symbol.asyncDispose];\n }\n if (dispose === void 0) {\n if (!Symbol.dispose) throw new TypeError(\"Symbol.dispose is not defined.\");\n dispose = value[Symbol.dispose];\n if (async) inner = dispose;\n }\n if (typeof dispose !== \"function\") throw new TypeError(\"Object not disposable.\");\n if (inner) dispose = function() { try { inner.call(this); } catch (e) { return Promise.reject(e); } };\n env.stack.push({ value: value, dispose: dispose, async: async });\n }\n else if (async) {\n env.stack.push({ async: true });\n }\n return value;\n}\n\nvar _SuppressedError = typeof SuppressedError === \"function\" ? SuppressedError : function (error, suppressed, message) {\n var e = new Error(message);\n return e.name = \"SuppressedError\", e.error = error, e.suppressed = suppressed, e;\n};\n\nexport function __disposeResources(env) {\n function fail(e) {\n env.error = env.hasError ? new _SuppressedError(e, env.error, \"An error was suppressed during disposal.\") : e;\n env.hasError = true;\n }\n var r, s = 0;\n function next() {\n while (r = env.stack.pop()) {\n try {\n if (!r.async && s === 1) return s = 0, env.stack.push(r), Promise.resolve().then(next);\n if (r.dispose) {\n var result = r.dispose.call(r.value);\n if (r.async) return s |= 2, Promise.resolve(result).then(next, function(e) { fail(e); return next(); });\n }\n else s |= 1;\n }\n catch (e) {\n fail(e);\n }\n }\n if (s === 1) return env.hasError ? Promise.reject(env.error) : Promise.resolve();\n if (env.hasError) throw env.error;\n }\n return next();\n}\n\nexport default {\n __extends,\n __assign,\n __rest,\n __decorate,\n __param,\n __metadata,\n __awaiter,\n __generator,\n __createBinding,\n __exportStar,\n __values,\n __read,\n __spread,\n __spreadArrays,\n __spreadArray,\n __await,\n __asyncGenerator,\n __asyncDelegator,\n __asyncValues,\n __makeTemplateObject,\n __importStar,\n __importDefault,\n __classPrivateFieldGet,\n __classPrivateFieldSet,\n __classPrivateFieldIn,\n __addDisposableResource,\n __disposeResources,\n};\n", "/**\n * Returns true if the object is a function.\n * @param value The value to check\n */\nexport function isFunction(value: any): value is (...args: any[]) => any {\n return typeof value === 'function';\n}\n", "/**\n * Used to create Error subclasses until the community moves away from ES5.\n *\n * This is because compiling from TypeScript down to ES5 has issues with subclassing Errors\n * as well as other built-in types: https://github.com/Microsoft/TypeScript/issues/12123\n *\n * @param createImpl A factory function to create the actual constructor implementation. The returned\n * function should be a named function that calls `_super` internally.\n */\nexport function createErrorClass(createImpl: (_super: any) => any): T {\n const _super = (instance: any) => {\n Error.call(instance);\n instance.stack = new Error().stack;\n };\n\n const ctorFunc = createImpl(_super);\n ctorFunc.prototype = Object.create(Error.prototype);\n ctorFunc.prototype.constructor = ctorFunc;\n return ctorFunc;\n}\n", "import { createErrorClass } from './createErrorClass';\n\nexport interface UnsubscriptionError extends Error {\n readonly errors: any[];\n}\n\nexport interface UnsubscriptionErrorCtor {\n /**\n * @deprecated Internal implementation detail. Do not construct error instances.\n * Cannot be tagged as internal: https://github.com/ReactiveX/rxjs/issues/6269\n */\n new (errors: any[]): UnsubscriptionError;\n}\n\n/**\n * An error thrown when one or more errors have occurred during the\n * `unsubscribe` of a {@link Subscription}.\n */\nexport const UnsubscriptionError: UnsubscriptionErrorCtor = createErrorClass(\n (_super) =>\n function UnsubscriptionErrorImpl(this: any, errors: (Error | string)[]) {\n _super(this);\n this.message = errors\n ? `${errors.length} errors occurred during unsubscription:\n${errors.map((err, i) => `${i + 1}) ${err.toString()}`).join('\\n ')}`\n : '';\n this.name = 'UnsubscriptionError';\n this.errors = errors;\n }\n);\n", "/**\n * Removes an item from an array, mutating it.\n * @param arr The array to remove the item from\n * @param item The item to remove\n */\nexport function arrRemove(arr: T[] | undefined | null, item: T) {\n if (arr) {\n const index = arr.indexOf(item);\n 0 <= index && arr.splice(index, 1);\n }\n}\n", "import { isFunction } from './util/isFunction';\nimport { UnsubscriptionError } from './util/UnsubscriptionError';\nimport { SubscriptionLike, TeardownLogic, Unsubscribable } from './types';\nimport { arrRemove } from './util/arrRemove';\n\n/**\n * Represents a disposable resource, such as the execution of an Observable. A\n * Subscription has one important method, `unsubscribe`, that takes no argument\n * and just disposes the resource held by the subscription.\n *\n * Additionally, subscriptions may be grouped together through the `add()`\n * method, which will attach a child Subscription to the current Subscription.\n * When a Subscription is unsubscribed, all its children (and its grandchildren)\n * will be unsubscribed as well.\n */\nexport class Subscription implements SubscriptionLike {\n public static EMPTY = (() => {\n const empty = new Subscription();\n empty.closed = true;\n return empty;\n })();\n\n /**\n * A flag to indicate whether this Subscription has already been unsubscribed.\n */\n public closed = false;\n\n private _parentage: Subscription[] | Subscription | null = null;\n\n /**\n * The list of registered finalizers to execute upon unsubscription. Adding and removing from this\n * list occurs in the {@link #add} and {@link #remove} methods.\n */\n private _finalizers: Exclude[] | null = null;\n\n /**\n * @param initialTeardown A function executed first as part of the finalization\n * process that is kicked off when {@link #unsubscribe} is called.\n */\n constructor(private initialTeardown?: () => void) {}\n\n /**\n * Disposes the resources held by the subscription. May, for instance, cancel\n * an ongoing Observable execution or cancel any other type of work that\n * started when the Subscription was created.\n */\n unsubscribe(): void {\n let errors: any[] | undefined;\n\n if (!this.closed) {\n this.closed = true;\n\n // Remove this from it's parents.\n const { _parentage } = this;\n if (_parentage) {\n this._parentage = null;\n if (Array.isArray(_parentage)) {\n for (const parent of _parentage) {\n parent.remove(this);\n }\n } else {\n _parentage.remove(this);\n }\n }\n\n const { initialTeardown: initialFinalizer } = this;\n if (isFunction(initialFinalizer)) {\n try {\n initialFinalizer();\n } catch (e) {\n errors = e instanceof UnsubscriptionError ? e.errors : [e];\n }\n }\n\n const { _finalizers } = this;\n if (_finalizers) {\n this._finalizers = null;\n for (const finalizer of _finalizers) {\n try {\n execFinalizer(finalizer);\n } catch (err) {\n errors = errors ?? [];\n if (err instanceof UnsubscriptionError) {\n errors = [...errors, ...err.errors];\n } else {\n errors.push(err);\n }\n }\n }\n }\n\n if (errors) {\n throw new UnsubscriptionError(errors);\n }\n }\n }\n\n /**\n * Adds a finalizer to this subscription, so that finalization will be unsubscribed/called\n * when this subscription is unsubscribed. If this subscription is already {@link #closed},\n * because it has already been unsubscribed, then whatever finalizer is passed to it\n * will automatically be executed (unless the finalizer itself is also a closed subscription).\n *\n * Closed Subscriptions cannot be added as finalizers to any subscription. Adding a closed\n * subscription to a any subscription will result in no operation. (A noop).\n *\n * Adding a subscription to itself, or adding `null` or `undefined` will not perform any\n * operation at all. (A noop).\n *\n * `Subscription` instances that are added to this instance will automatically remove themselves\n * if they are unsubscribed. Functions and {@link Unsubscribable} objects that you wish to remove\n * will need to be removed manually with {@link #remove}\n *\n * @param teardown The finalization logic to add to this subscription.\n */\n add(teardown: TeardownLogic): void {\n // Only add the finalizer if it's not undefined\n // and don't add a subscription to itself.\n if (teardown && teardown !== this) {\n if (this.closed) {\n // If this subscription is already closed,\n // execute whatever finalizer is handed to it automatically.\n execFinalizer(teardown);\n } else {\n if (teardown instanceof Subscription) {\n // We don't add closed subscriptions, and we don't add the same subscription\n // twice. Subscription unsubscribe is idempotent.\n if (teardown.closed || teardown._hasParent(this)) {\n return;\n }\n teardown._addParent(this);\n }\n (this._finalizers = this._finalizers ?? []).push(teardown);\n }\n }\n }\n\n /**\n * Checks to see if a this subscription already has a particular parent.\n * This will signal that this subscription has already been added to the parent in question.\n * @param parent the parent to check for\n */\n private _hasParent(parent: Subscription) {\n const { _parentage } = this;\n return _parentage === parent || (Array.isArray(_parentage) && _parentage.includes(parent));\n }\n\n /**\n * Adds a parent to this subscription so it can be removed from the parent if it\n * unsubscribes on it's own.\n *\n * NOTE: THIS ASSUMES THAT {@link _hasParent} HAS ALREADY BEEN CHECKED.\n * @param parent The parent subscription to add\n */\n private _addParent(parent: Subscription) {\n const { _parentage } = this;\n this._parentage = Array.isArray(_parentage) ? (_parentage.push(parent), _parentage) : _parentage ? [_parentage, parent] : parent;\n }\n\n /**\n * Called on a child when it is removed via {@link #remove}.\n * @param parent The parent to remove\n */\n private _removeParent(parent: Subscription) {\n const { _parentage } = this;\n if (_parentage === parent) {\n this._parentage = null;\n } else if (Array.isArray(_parentage)) {\n arrRemove(_parentage, parent);\n }\n }\n\n /**\n * Removes a finalizer from this subscription that was previously added with the {@link #add} method.\n *\n * Note that `Subscription` instances, when unsubscribed, will automatically remove themselves\n * from every other `Subscription` they have been added to. This means that using the `remove` method\n * is not a common thing and should be used thoughtfully.\n *\n * If you add the same finalizer instance of a function or an unsubscribable object to a `Subscription` instance\n * more than once, you will need to call `remove` the same number of times to remove all instances.\n *\n * All finalizer instances are removed to free up memory upon unsubscription.\n *\n * @param teardown The finalizer to remove from this subscription\n */\n remove(teardown: Exclude): void {\n const { _finalizers } = this;\n _finalizers && arrRemove(_finalizers, teardown);\n\n if (teardown instanceof Subscription) {\n teardown._removeParent(this);\n }\n }\n}\n\nexport const EMPTY_SUBSCRIPTION = Subscription.EMPTY;\n\nexport function isSubscription(value: any): value is Subscription {\n return (\n value instanceof Subscription ||\n (value && 'closed' in value && isFunction(value.remove) && isFunction(value.add) && isFunction(value.unsubscribe))\n );\n}\n\nfunction execFinalizer(finalizer: Unsubscribable | (() => void)) {\n if (isFunction(finalizer)) {\n finalizer();\n } else {\n finalizer.unsubscribe();\n }\n}\n", "import { Subscriber } from './Subscriber';\nimport { ObservableNotification } from './types';\n\n/**\n * The {@link GlobalConfig} object for RxJS. It is used to configure things\n * like how to react on unhandled errors.\n */\nexport const config: GlobalConfig = {\n onUnhandledError: null,\n onStoppedNotification: null,\n Promise: undefined,\n useDeprecatedSynchronousErrorHandling: false,\n useDeprecatedNextContext: false,\n};\n\n/**\n * The global configuration object for RxJS, used to configure things\n * like how to react on unhandled errors. Accessible via {@link config}\n * object.\n */\nexport interface GlobalConfig {\n /**\n * A registration point for unhandled errors from RxJS. These are errors that\n * cannot were not handled by consuming code in the usual subscription path. For\n * example, if you have this configured, and you subscribe to an observable without\n * providing an error handler, errors from that subscription will end up here. This\n * will _always_ be called asynchronously on another job in the runtime. This is because\n * we do not want errors thrown in this user-configured handler to interfere with the\n * behavior of the library.\n */\n onUnhandledError: ((err: any) => void) | null;\n\n /**\n * A registration point for notifications that cannot be sent to subscribers because they\n * have completed, errored or have been explicitly unsubscribed. By default, next, complete\n * and error notifications sent to stopped subscribers are noops. However, sometimes callers\n * might want a different behavior. For example, with sources that attempt to report errors\n * to stopped subscribers, a caller can configure RxJS to throw an unhandled error instead.\n * This will _always_ be called asynchronously on another job in the runtime. This is because\n * we do not want errors thrown in this user-configured handler to interfere with the\n * behavior of the library.\n */\n onStoppedNotification: ((notification: ObservableNotification, subscriber: Subscriber) => void) | null;\n\n /**\n * The promise constructor used by default for {@link Observable#toPromise toPromise} and {@link Observable#forEach forEach}\n * methods.\n *\n * @deprecated As of version 8, RxJS will no longer support this sort of injection of a\n * Promise constructor. If you need a Promise implementation other than native promises,\n * please polyfill/patch Promise as you see appropriate. Will be removed in v8.\n */\n Promise?: PromiseConstructorLike;\n\n /**\n * If true, turns on synchronous error rethrowing, which is a deprecated behavior\n * in v6 and higher. This behavior enables bad patterns like wrapping a subscribe\n * call in a try/catch block. It also enables producer interference, a nasty bug\n * where a multicast can be broken for all observers by a downstream consumer with\n * an unhandled error. DO NOT USE THIS FLAG UNLESS IT'S NEEDED TO BUY TIME\n * FOR MIGRATION REASONS.\n *\n * @deprecated As of version 8, RxJS will no longer support synchronous throwing\n * of unhandled errors. All errors will be thrown on a separate call stack to prevent bad\n * behaviors described above. Will be removed in v8.\n */\n useDeprecatedSynchronousErrorHandling: boolean;\n\n /**\n * If true, enables an as-of-yet undocumented feature from v5: The ability to access\n * `unsubscribe()` via `this` context in `next` functions created in observers passed\n * to `subscribe`.\n *\n * This is being removed because the performance was severely problematic, and it could also cause\n * issues when types other than POJOs are passed to subscribe as subscribers, as they will likely have\n * their `this` context overwritten.\n *\n * @deprecated As of version 8, RxJS will no longer support altering the\n * context of next functions provided as part of an observer to Subscribe. Instead,\n * you will have access to a subscription or a signal or token that will allow you to do things like\n * unsubscribe and test closed status. Will be removed in v8.\n */\n useDeprecatedNextContext: boolean;\n}\n", "import type { TimerHandle } from './timerHandle';\ntype SetTimeoutFunction = (handler: () => void, timeout?: number, ...args: any[]) => TimerHandle;\ntype ClearTimeoutFunction = (handle: TimerHandle) => void;\n\ninterface TimeoutProvider {\n setTimeout: SetTimeoutFunction;\n clearTimeout: ClearTimeoutFunction;\n delegate:\n | {\n setTimeout: SetTimeoutFunction;\n clearTimeout: ClearTimeoutFunction;\n }\n | undefined;\n}\n\nexport const timeoutProvider: TimeoutProvider = {\n // When accessing the delegate, use the variable rather than `this` so that\n // the functions can be called without being bound to the provider.\n setTimeout(handler: () => void, timeout?: number, ...args) {\n const { delegate } = timeoutProvider;\n if (delegate?.setTimeout) {\n return delegate.setTimeout(handler, timeout, ...args);\n }\n return setTimeout(handler, timeout, ...args);\n },\n clearTimeout(handle) {\n const { delegate } = timeoutProvider;\n return (delegate?.clearTimeout || clearTimeout)(handle as any);\n },\n delegate: undefined,\n};\n", "import { config } from '../config';\nimport { timeoutProvider } from '../scheduler/timeoutProvider';\n\n/**\n * Handles an error on another job either with the user-configured {@link onUnhandledError},\n * or by throwing it on that new job so it can be picked up by `window.onerror`, `process.on('error')`, etc.\n *\n * This should be called whenever there is an error that is out-of-band with the subscription\n * or when an error hits a terminal boundary of the subscription and no error handler was provided.\n *\n * @param err the error to report\n */\nexport function reportUnhandledError(err: any) {\n timeoutProvider.setTimeout(() => {\n const { onUnhandledError } = config;\n if (onUnhandledError) {\n // Execute the user-configured error handler.\n onUnhandledError(err);\n } else {\n // Throw so it is picked up by the runtime's uncaught error mechanism.\n throw err;\n }\n });\n}\n", "/* tslint:disable:no-empty */\nexport function noop() { }\n", "import { CompleteNotification, NextNotification, ErrorNotification } from './types';\n\n/**\n * A completion object optimized for memory use and created to be the\n * same \"shape\" as other notifications in v8.\n * @internal\n */\nexport const COMPLETE_NOTIFICATION = (() => createNotification('C', undefined, undefined) as CompleteNotification)();\n\n/**\n * Internal use only. Creates an optimized error notification that is the same \"shape\"\n * as other notifications.\n * @internal\n */\nexport function errorNotification(error: any): ErrorNotification {\n return createNotification('E', undefined, error) as any;\n}\n\n/**\n * Internal use only. Creates an optimized next notification that is the same \"shape\"\n * as other notifications.\n * @internal\n */\nexport function nextNotification(value: T) {\n return createNotification('N', value, undefined) as NextNotification;\n}\n\n/**\n * Ensures that all notifications created internally have the same \"shape\" in v8.\n *\n * TODO: This is only exported to support a crazy legacy test in `groupBy`.\n * @internal\n */\nexport function createNotification(kind: 'N' | 'E' | 'C', value: any, error: any) {\n return {\n kind,\n value,\n error,\n };\n}\n", "import { config } from '../config';\n\nlet context: { errorThrown: boolean; error: any } | null = null;\n\n/**\n * Handles dealing with errors for super-gross mode. Creates a context, in which\n * any synchronously thrown errors will be passed to {@link captureError}. Which\n * will record the error such that it will be rethrown after the call back is complete.\n * TODO: Remove in v8\n * @param cb An immediately executed function.\n */\nexport function errorContext(cb: () => void) {\n if (config.useDeprecatedSynchronousErrorHandling) {\n const isRoot = !context;\n if (isRoot) {\n context = { errorThrown: false, error: null };\n }\n cb();\n if (isRoot) {\n const { errorThrown, error } = context!;\n context = null;\n if (errorThrown) {\n throw error;\n }\n }\n } else {\n // This is the general non-deprecated path for everyone that\n // isn't crazy enough to use super-gross mode (useDeprecatedSynchronousErrorHandling)\n cb();\n }\n}\n\n/**\n * Captures errors only in super-gross mode.\n * @param err the error to capture\n */\nexport function captureError(err: any) {\n if (config.useDeprecatedSynchronousErrorHandling && context) {\n context.errorThrown = true;\n context.error = err;\n }\n}\n", "import { isFunction } from './util/isFunction';\nimport { Observer, ObservableNotification } from './types';\nimport { isSubscription, Subscription } from './Subscription';\nimport { config } from './config';\nimport { reportUnhandledError } from './util/reportUnhandledError';\nimport { noop } from './util/noop';\nimport { nextNotification, errorNotification, COMPLETE_NOTIFICATION } from './NotificationFactories';\nimport { timeoutProvider } from './scheduler/timeoutProvider';\nimport { captureError } from './util/errorContext';\n\n/**\n * Implements the {@link Observer} interface and extends the\n * {@link Subscription} class. While the {@link Observer} is the public API for\n * consuming the values of an {@link Observable}, all Observers get converted to\n * a Subscriber, in order to provide Subscription-like capabilities such as\n * `unsubscribe`. Subscriber is a common type in RxJS, and crucial for\n * implementing operators, but it is rarely used as a public API.\n */\nexport class Subscriber extends Subscription implements Observer {\n /**\n * A static factory for a Subscriber, given a (potentially partial) definition\n * of an Observer.\n * @param next The `next` callback of an Observer.\n * @param error The `error` callback of an\n * Observer.\n * @param complete The `complete` callback of an\n * Observer.\n * @return A Subscriber wrapping the (partially defined)\n * Observer represented by the given arguments.\n * @deprecated Do not use. Will be removed in v8. There is no replacement for this\n * method, and there is no reason to be creating instances of `Subscriber` directly.\n * If you have a specific use case, please file an issue.\n */\n static create(next?: (x?: T) => void, error?: (e?: any) => void, complete?: () => void): Subscriber {\n return new SafeSubscriber(next, error, complete);\n }\n\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n protected isStopped: boolean = false;\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n protected destination: Subscriber | Observer; // this `any` is the escape hatch to erase extra type param (e.g. R)\n\n /**\n * @deprecated Internal implementation detail, do not use directly. Will be made internal in v8.\n * There is no reason to directly create an instance of Subscriber. This type is exported for typings reasons.\n */\n constructor(destination?: Subscriber | Observer) {\n super();\n if (destination) {\n this.destination = destination;\n // Automatically chain subscriptions together here.\n // if destination is a Subscription, then it is a Subscriber.\n if (isSubscription(destination)) {\n destination.add(this);\n }\n } else {\n this.destination = EMPTY_OBSERVER;\n }\n }\n\n /**\n * The {@link Observer} callback to receive notifications of type `next` from\n * the Observable, with a value. The Observable may call this method 0 or more\n * times.\n * @param value The `next` value.\n */\n next(value: T): void {\n if (this.isStopped) {\n handleStoppedNotification(nextNotification(value), this);\n } else {\n this._next(value!);\n }\n }\n\n /**\n * The {@link Observer} callback to receive notifications of type `error` from\n * the Observable, with an attached `Error`. Notifies the Observer that\n * the Observable has experienced an error condition.\n * @param err The `error` exception.\n */\n error(err?: any): void {\n if (this.isStopped) {\n handleStoppedNotification(errorNotification(err), this);\n } else {\n this.isStopped = true;\n this._error(err);\n }\n }\n\n /**\n * The {@link Observer} callback to receive a valueless notification of type\n * `complete` from the Observable. Notifies the Observer that the Observable\n * has finished sending push-based notifications.\n */\n complete(): void {\n if (this.isStopped) {\n handleStoppedNotification(COMPLETE_NOTIFICATION, this);\n } else {\n this.isStopped = true;\n this._complete();\n }\n }\n\n unsubscribe(): void {\n if (!this.closed) {\n this.isStopped = true;\n super.unsubscribe();\n this.destination = null!;\n }\n }\n\n protected _next(value: T): void {\n this.destination.next(value);\n }\n\n protected _error(err: any): void {\n try {\n this.destination.error(err);\n } finally {\n this.unsubscribe();\n }\n }\n\n protected _complete(): void {\n try {\n this.destination.complete();\n } finally {\n this.unsubscribe();\n }\n }\n}\n\n/**\n * This bind is captured here because we want to be able to have\n * compatibility with monoid libraries that tend to use a method named\n * `bind`. In particular, a library called Monio requires this.\n */\nconst _bind = Function.prototype.bind;\n\nfunction bind any>(fn: Fn, thisArg: any): Fn {\n return _bind.call(fn, thisArg);\n}\n\n/**\n * Internal optimization only, DO NOT EXPOSE.\n * @internal\n */\nclass ConsumerObserver implements Observer {\n constructor(private partialObserver: Partial>) {}\n\n next(value: T): void {\n const { partialObserver } = this;\n if (partialObserver.next) {\n try {\n partialObserver.next(value);\n } catch (error) {\n handleUnhandledError(error);\n }\n }\n }\n\n error(err: any): void {\n const { partialObserver } = this;\n if (partialObserver.error) {\n try {\n partialObserver.error(err);\n } catch (error) {\n handleUnhandledError(error);\n }\n } else {\n handleUnhandledError(err);\n }\n }\n\n complete(): void {\n const { partialObserver } = this;\n if (partialObserver.complete) {\n try {\n partialObserver.complete();\n } catch (error) {\n handleUnhandledError(error);\n }\n }\n }\n}\n\nexport class SafeSubscriber extends Subscriber {\n constructor(\n observerOrNext?: Partial> | ((value: T) => void) | null,\n error?: ((e?: any) => void) | null,\n complete?: (() => void) | null\n ) {\n super();\n\n let partialObserver: Partial>;\n if (isFunction(observerOrNext) || !observerOrNext) {\n // The first argument is a function, not an observer. The next\n // two arguments *could* be observers, or they could be empty.\n partialObserver = {\n next: (observerOrNext ?? undefined) as ((value: T) => void) | undefined,\n error: error ?? undefined,\n complete: complete ?? undefined,\n };\n } else {\n // The first argument is a partial observer.\n let context: any;\n if (this && config.useDeprecatedNextContext) {\n // This is a deprecated path that made `this.unsubscribe()` available in\n // next handler functions passed to subscribe. This only exists behind a flag\n // now, as it is *very* slow.\n context = Object.create(observerOrNext);\n context.unsubscribe = () => this.unsubscribe();\n partialObserver = {\n next: observerOrNext.next && bind(observerOrNext.next, context),\n error: observerOrNext.error && bind(observerOrNext.error, context),\n complete: observerOrNext.complete && bind(observerOrNext.complete, context),\n };\n } else {\n // The \"normal\" path. Just use the partial observer directly.\n partialObserver = observerOrNext;\n }\n }\n\n // Wrap the partial observer to ensure it's a full observer, and\n // make sure proper error handling is accounted for.\n this.destination = new ConsumerObserver(partialObserver);\n }\n}\n\nfunction handleUnhandledError(error: any) {\n if (config.useDeprecatedSynchronousErrorHandling) {\n captureError(error);\n } else {\n // Ideal path, we report this as an unhandled error,\n // which is thrown on a new call stack.\n reportUnhandledError(error);\n }\n}\n\n/**\n * An error handler used when no error handler was supplied\n * to the SafeSubscriber -- meaning no error handler was supplied\n * do the `subscribe` call on our observable.\n * @param err The error to handle\n */\nfunction defaultErrorHandler(err: any) {\n throw err;\n}\n\n/**\n * A handler for notifications that cannot be sent to a stopped subscriber.\n * @param notification The notification being sent.\n * @param subscriber The stopped subscriber.\n */\nfunction handleStoppedNotification(notification: ObservableNotification, subscriber: Subscriber) {\n const { onStoppedNotification } = config;\n onStoppedNotification && timeoutProvider.setTimeout(() => onStoppedNotification(notification, subscriber));\n}\n\n/**\n * The observer used as a stub for subscriptions where the user did not\n * pass any arguments to `subscribe`. Comes with the default error handling\n * behavior.\n */\nexport const EMPTY_OBSERVER: Readonly> & { closed: true } = {\n closed: true,\n next: noop,\n error: defaultErrorHandler,\n complete: noop,\n};\n", "/**\n * Symbol.observable or a string \"@@observable\". Used for interop\n *\n * @deprecated We will no longer be exporting this symbol in upcoming versions of RxJS.\n * Instead polyfill and use Symbol.observable directly *or* use https://www.npmjs.com/package/symbol-observable\n */\nexport const observable: string | symbol = (() => (typeof Symbol === 'function' && Symbol.observable) || '@@observable')();\n", "/**\n * This function takes one parameter and just returns it. Simply put,\n * this is like `(x: T): T => x`.\n *\n * ## Examples\n *\n * This is useful in some cases when using things like `mergeMap`\n *\n * ```ts\n * import { interval, take, map, range, mergeMap, identity } from 'rxjs';\n *\n * const source$ = interval(1000).pipe(take(5));\n *\n * const result$ = source$.pipe(\n * map(i => range(i)),\n * mergeMap(identity) // same as mergeMap(x => x)\n * );\n *\n * result$.subscribe({\n * next: console.log\n * });\n * ```\n *\n * Or when you want to selectively apply an operator\n *\n * ```ts\n * import { interval, take, identity } from 'rxjs';\n *\n * const shouldLimit = () => Math.random() < 0.5;\n *\n * const source$ = interval(1000);\n *\n * const result$ = source$.pipe(shouldLimit() ? take(5) : identity);\n *\n * result$.subscribe({\n * next: console.log\n * });\n * ```\n *\n * @param x Any value that is returned by this function\n * @returns The value passed as the first parameter to this function\n */\nexport function identity(x: T): T {\n return x;\n}\n", "import { identity } from './identity';\nimport { UnaryFunction } from '../types';\n\nexport function pipe(): typeof identity;\nexport function pipe(fn1: UnaryFunction): UnaryFunction;\nexport function pipe(fn1: UnaryFunction, fn2: UnaryFunction): UnaryFunction;\nexport function pipe(fn1: UnaryFunction, fn2: UnaryFunction, fn3: UnaryFunction): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction\n): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction,\n fn5: UnaryFunction\n): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction,\n fn5: UnaryFunction,\n fn6: UnaryFunction\n): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction,\n fn5: UnaryFunction,\n fn6: UnaryFunction,\n fn7: UnaryFunction\n): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction,\n fn5: UnaryFunction,\n fn6: UnaryFunction,\n fn7: UnaryFunction,\n fn8: UnaryFunction\n): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction,\n fn5: UnaryFunction,\n fn6: UnaryFunction,\n fn7: UnaryFunction,\n fn8: UnaryFunction,\n fn9: UnaryFunction\n): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction,\n fn5: UnaryFunction,\n fn6: UnaryFunction,\n fn7: UnaryFunction,\n fn8: UnaryFunction,\n fn9: UnaryFunction,\n ...fns: UnaryFunction[]\n): UnaryFunction;\n\n/**\n * pipe() can be called on one or more functions, each of which can take one argument (\"UnaryFunction\")\n * and uses it to return a value.\n * It returns a function that takes one argument, passes it to the first UnaryFunction, and then\n * passes the result to the next one, passes that result to the next one, and so on. \n */\nexport function pipe(...fns: Array>): UnaryFunction {\n return pipeFromArray(fns);\n}\n\n/** @internal */\nexport function pipeFromArray(fns: Array>): UnaryFunction {\n if (fns.length === 0) {\n return identity as UnaryFunction;\n }\n\n if (fns.length === 1) {\n return fns[0];\n }\n\n return function piped(input: T): R {\n return fns.reduce((prev: any, fn: UnaryFunction) => fn(prev), input as any);\n };\n}\n", "import { Operator } from './Operator';\nimport { SafeSubscriber, Subscriber } from './Subscriber';\nimport { isSubscription, Subscription } from './Subscription';\nimport { TeardownLogic, OperatorFunction, Subscribable, Observer } from './types';\nimport { observable as Symbol_observable } from './symbol/observable';\nimport { pipeFromArray } from './util/pipe';\nimport { config } from './config';\nimport { isFunction } from './util/isFunction';\nimport { errorContext } from './util/errorContext';\n\n/**\n * A representation of any set of values over any amount of time. This is the most basic building block\n * of RxJS.\n */\nexport class Observable implements Subscribable {\n /**\n * @deprecated Internal implementation detail, do not use directly. Will be made internal in v8.\n */\n source: Observable | undefined;\n\n /**\n * @deprecated Internal implementation detail, do not use directly. Will be made internal in v8.\n */\n operator: Operator | undefined;\n\n /**\n * @param subscribe The function that is called when the Observable is\n * initially subscribed to. This function is given a Subscriber, to which new values\n * can be `next`ed, or an `error` method can be called to raise an error, or\n * `complete` can be called to notify of a successful completion.\n */\n constructor(subscribe?: (this: Observable, subscriber: Subscriber) => TeardownLogic) {\n if (subscribe) {\n this._subscribe = subscribe;\n }\n }\n\n // HACK: Since TypeScript inherits static properties too, we have to\n // fight against TypeScript here so Subject can have a different static create signature\n /**\n * Creates a new Observable by calling the Observable constructor\n * @param subscribe the subscriber function to be passed to the Observable constructor\n * @return A new observable.\n * @deprecated Use `new Observable()` instead. Will be removed in v8.\n */\n static create: (...args: any[]) => any = (subscribe?: (subscriber: Subscriber) => TeardownLogic) => {\n return new Observable(subscribe);\n };\n\n /**\n * Creates a new Observable, with this Observable instance as the source, and the passed\n * operator defined as the new observable's operator.\n * @param operator the operator defining the operation to take on the observable\n * @return A new observable with the Operator applied.\n * @deprecated Internal implementation detail, do not use directly. Will be made internal in v8.\n * If you have implemented an operator using `lift`, it is recommended that you create an\n * operator by simply returning `new Observable()` directly. See \"Creating new operators from\n * scratch\" section here: https://rxjs.dev/guide/operators\n */\n lift(operator?: Operator): Observable {\n const observable = new Observable();\n observable.source = this;\n observable.operator = operator;\n return observable;\n }\n\n subscribe(observerOrNext?: Partial> | ((value: T) => void)): Subscription;\n /** @deprecated Instead of passing separate callback arguments, use an observer argument. Signatures taking separate callback arguments will be removed in v8. Details: https://rxjs.dev/deprecations/subscribe-arguments */\n subscribe(next?: ((value: T) => void) | null, error?: ((error: any) => void) | null, complete?: (() => void) | null): Subscription;\n /**\n * Invokes an execution of an Observable and registers Observer handlers for notifications it will emit.\n *\n * Use it when you have all these Observables, but still nothing is happening.\n *\n * `subscribe` is not a regular operator, but a method that calls Observable's internal `subscribe` function. It\n * might be for example a function that you passed to Observable's constructor, but most of the time it is\n * a library implementation, which defines what will be emitted by an Observable, and when it be will emitted. This means\n * that calling `subscribe` is actually the moment when Observable starts its work, not when it is created, as it is often\n * the thought.\n *\n * Apart from starting the execution of an Observable, this method allows you to listen for values\n * that an Observable emits, as well as for when it completes or errors. You can achieve this in two\n * of the following ways.\n *\n * The first way is creating an object that implements {@link Observer} interface. It should have methods\n * defined by that interface, but note that it should be just a regular JavaScript object, which you can create\n * yourself in any way you want (ES6 class, classic function constructor, object literal etc.). In particular, do\n * not attempt to use any RxJS implementation details to create Observers - you don't need them. Remember also\n * that your object does not have to implement all methods. If you find yourself creating a method that doesn't\n * do anything, you can simply omit it. Note however, if the `error` method is not provided and an error happens,\n * it will be thrown asynchronously. Errors thrown asynchronously cannot be caught using `try`/`catch`. Instead,\n * use the {@link onUnhandledError} configuration option or use a runtime handler (like `window.onerror` or\n * `process.on('error)`) to be notified of unhandled errors. Because of this, it's recommended that you provide\n * an `error` method to avoid missing thrown errors.\n *\n * The second way is to give up on Observer object altogether and simply provide callback functions in place of its methods.\n * This means you can provide three functions as arguments to `subscribe`, where the first function is equivalent\n * of a `next` method, the second of an `error` method and the third of a `complete` method. Just as in case of an Observer,\n * if you do not need to listen for something, you can omit a function by passing `undefined` or `null`,\n * since `subscribe` recognizes these functions by where they were placed in function call. When it comes\n * to the `error` function, as with an Observer, if not provided, errors emitted by an Observable will be thrown asynchronously.\n *\n * You can, however, subscribe with no parameters at all. This may be the case where you're not interested in terminal events\n * and you also handled emissions internally by using operators (e.g. using `tap`).\n *\n * Whichever style of calling `subscribe` you use, in both cases it returns a Subscription object.\n * This object allows you to call `unsubscribe` on it, which in turn will stop the work that an Observable does and will clean\n * up all resources that an Observable used. Note that cancelling a subscription will not call `complete` callback\n * provided to `subscribe` function, which is reserved for a regular completion signal that comes from an Observable.\n *\n * Remember that callbacks provided to `subscribe` are not guaranteed to be called asynchronously.\n * It is an Observable itself that decides when these functions will be called. For example {@link of}\n * by default emits all its values synchronously. Always check documentation for how given Observable\n * will behave when subscribed and if its default behavior can be modified with a `scheduler`.\n *\n * #### Examples\n *\n * Subscribe with an {@link guide/observer Observer}\n *\n * ```ts\n * import { of } from 'rxjs';\n *\n * const sumObserver = {\n * sum: 0,\n * next(value) {\n * console.log('Adding: ' + value);\n * this.sum = this.sum + value;\n * },\n * error() {\n * // We actually could just remove this method,\n * // since we do not really care about errors right now.\n * },\n * complete() {\n * console.log('Sum equals: ' + this.sum);\n * }\n * };\n *\n * of(1, 2, 3) // Synchronously emits 1, 2, 3 and then completes.\n * .subscribe(sumObserver);\n *\n * // Logs:\n * // 'Adding: 1'\n * // 'Adding: 2'\n * // 'Adding: 3'\n * // 'Sum equals: 6'\n * ```\n *\n * Subscribe with functions ({@link deprecations/subscribe-arguments deprecated})\n *\n * ```ts\n * import { of } from 'rxjs'\n *\n * let sum = 0;\n *\n * of(1, 2, 3).subscribe(\n * value => {\n * console.log('Adding: ' + value);\n * sum = sum + value;\n * },\n * undefined,\n * () => console.log('Sum equals: ' + sum)\n * );\n *\n * // Logs:\n * // 'Adding: 1'\n * // 'Adding: 2'\n * // 'Adding: 3'\n * // 'Sum equals: 6'\n * ```\n *\n * Cancel a subscription\n *\n * ```ts\n * import { interval } from 'rxjs';\n *\n * const subscription = interval(1000).subscribe({\n * next(num) {\n * console.log(num)\n * },\n * complete() {\n * // Will not be called, even when cancelling subscription.\n * console.log('completed!');\n * }\n * });\n *\n * setTimeout(() => {\n * subscription.unsubscribe();\n * console.log('unsubscribed!');\n * }, 2500);\n *\n * // Logs:\n * // 0 after 1s\n * // 1 after 2s\n * // 'unsubscribed!' after 2.5s\n * ```\n *\n * @param observerOrNext Either an {@link Observer} with some or all callback methods,\n * or the `next` handler that is called for each value emitted from the subscribed Observable.\n * @param error A handler for a terminal event resulting from an error. If no error handler is provided,\n * the error will be thrown asynchronously as unhandled.\n * @param complete A handler for a terminal event resulting from successful completion.\n * @return A subscription reference to the registered handlers.\n */\n subscribe(\n observerOrNext?: Partial> | ((value: T) => void) | null,\n error?: ((error: any) => void) | null,\n complete?: (() => void) | null\n ): Subscription {\n const subscriber = isSubscriber(observerOrNext) ? observerOrNext : new SafeSubscriber(observerOrNext, error, complete);\n\n errorContext(() => {\n const { operator, source } = this;\n subscriber.add(\n operator\n ? // We're dealing with a subscription in the\n // operator chain to one of our lifted operators.\n operator.call(subscriber, source)\n : source\n ? // If `source` has a value, but `operator` does not, something that\n // had intimate knowledge of our API, like our `Subject`, must have\n // set it. We're going to just call `_subscribe` directly.\n this._subscribe(subscriber)\n : // In all other cases, we're likely wrapping a user-provided initializer\n // function, so we need to catch errors and handle them appropriately.\n this._trySubscribe(subscriber)\n );\n });\n\n return subscriber;\n }\n\n /** @internal */\n protected _trySubscribe(sink: Subscriber): TeardownLogic {\n try {\n return this._subscribe(sink);\n } catch (err) {\n // We don't need to return anything in this case,\n // because it's just going to try to `add()` to a subscription\n // above.\n sink.error(err);\n }\n }\n\n /**\n * Used as a NON-CANCELLABLE means of subscribing to an observable, for use with\n * APIs that expect promises, like `async/await`. You cannot unsubscribe from this.\n *\n * **WARNING**: Only use this with observables you *know* will complete. If the source\n * observable does not complete, you will end up with a promise that is hung up, and\n * potentially all of the state of an async function hanging out in memory. To avoid\n * this situation, look into adding something like {@link timeout}, {@link take},\n * {@link takeWhile}, or {@link takeUntil} amongst others.\n *\n * #### Example\n *\n * ```ts\n * import { interval, take } from 'rxjs';\n *\n * const source$ = interval(1000).pipe(take(4));\n *\n * async function getTotal() {\n * let total = 0;\n *\n * await source$.forEach(value => {\n * total += value;\n * console.log('observable -> ' + value);\n * });\n *\n * return total;\n * }\n *\n * getTotal().then(\n * total => console.log('Total: ' + total)\n * );\n *\n * // Expected:\n * // 'observable -> 0'\n * // 'observable -> 1'\n * // 'observable -> 2'\n * // 'observable -> 3'\n * // 'Total: 6'\n * ```\n *\n * @param next A handler for each value emitted by the observable.\n * @return A promise that either resolves on observable completion or\n * rejects with the handled error.\n */\n forEach(next: (value: T) => void): Promise;\n\n /**\n * @param next a handler for each value emitted by the observable\n * @param promiseCtor a constructor function used to instantiate the Promise\n * @return a promise that either resolves on observable completion or\n * rejects with the handled error\n * @deprecated Passing a Promise constructor will no longer be available\n * in upcoming versions of RxJS. This is because it adds weight to the library, for very\n * little benefit. If you need this functionality, it is recommended that you either\n * polyfill Promise, or you create an adapter to convert the returned native promise\n * to whatever promise implementation you wanted. Will be removed in v8.\n */\n forEach(next: (value: T) => void, promiseCtor: PromiseConstructorLike): Promise;\n\n forEach(next: (value: T) => void, promiseCtor?: PromiseConstructorLike): Promise {\n promiseCtor = getPromiseCtor(promiseCtor);\n\n return new promiseCtor((resolve, reject) => {\n const subscriber = new SafeSubscriber({\n next: (value) => {\n try {\n next(value);\n } catch (err) {\n reject(err);\n subscriber.unsubscribe();\n }\n },\n error: reject,\n complete: resolve,\n });\n this.subscribe(subscriber);\n }) as Promise;\n }\n\n /** @internal */\n protected _subscribe(subscriber: Subscriber): TeardownLogic {\n return this.source?.subscribe(subscriber);\n }\n\n /**\n * An interop point defined by the es7-observable spec https://github.com/zenparsing/es-observable\n * @return This instance of the observable.\n */\n [Symbol_observable]() {\n return this;\n }\n\n /* tslint:disable:max-line-length */\n pipe(): Observable;\n pipe(op1: OperatorFunction): Observable;\n pipe(op1: OperatorFunction, op2: OperatorFunction): Observable;\n pipe(op1: OperatorFunction, op2: OperatorFunction, op3: OperatorFunction): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction\n ): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction,\n op5: OperatorFunction\n ): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction,\n op5: OperatorFunction,\n op6: OperatorFunction\n ): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction,\n op5: OperatorFunction,\n op6: OperatorFunction,\n op7: OperatorFunction\n ): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction,\n op5: OperatorFunction,\n op6: OperatorFunction,\n op7: OperatorFunction,\n op8: OperatorFunction\n ): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction,\n op5: OperatorFunction,\n op6: OperatorFunction,\n op7: OperatorFunction,\n op8: OperatorFunction,\n op9: OperatorFunction\n ): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction,\n op5: OperatorFunction,\n op6: OperatorFunction,\n op7: OperatorFunction,\n op8: OperatorFunction,\n op9: OperatorFunction,\n ...operations: OperatorFunction[]\n ): Observable;\n /* tslint:enable:max-line-length */\n\n /**\n * Used to stitch together functional operators into a chain.\n *\n * ## Example\n *\n * ```ts\n * import { interval, filter, map, scan } from 'rxjs';\n *\n * interval(1000)\n * .pipe(\n * filter(x => x % 2 === 0),\n * map(x => x + x),\n * scan((acc, x) => acc + x)\n * )\n * .subscribe(x => console.log(x));\n * ```\n *\n * @return The Observable result of all the operators having been called\n * in the order they were passed in.\n */\n pipe(...operations: OperatorFunction[]): Observable {\n return pipeFromArray(operations)(this);\n }\n\n /* tslint:disable:max-line-length */\n /** @deprecated Replaced with {@link firstValueFrom} and {@link lastValueFrom}. Will be removed in v8. Details: https://rxjs.dev/deprecations/to-promise */\n toPromise(): Promise;\n /** @deprecated Replaced with {@link firstValueFrom} and {@link lastValueFrom}. Will be removed in v8. Details: https://rxjs.dev/deprecations/to-promise */\n toPromise(PromiseCtor: typeof Promise): Promise;\n /** @deprecated Replaced with {@link firstValueFrom} and {@link lastValueFrom}. Will be removed in v8. Details: https://rxjs.dev/deprecations/to-promise */\n toPromise(PromiseCtor: PromiseConstructorLike): Promise;\n /* tslint:enable:max-line-length */\n\n /**\n * Subscribe to this Observable and get a Promise resolving on\n * `complete` with the last emission (if any).\n *\n * **WARNING**: Only use this with observables you *know* will complete. If the source\n * observable does not complete, you will end up with a promise that is hung up, and\n * potentially all of the state of an async function hanging out in memory. To avoid\n * this situation, look into adding something like {@link timeout}, {@link take},\n * {@link takeWhile}, or {@link takeUntil} amongst others.\n *\n * @param [promiseCtor] a constructor function used to instantiate\n * the Promise\n * @return A Promise that resolves with the last value emit, or\n * rejects on an error. If there were no emissions, Promise\n * resolves with undefined.\n * @deprecated Replaced with {@link firstValueFrom} and {@link lastValueFrom}. Will be removed in v8. Details: https://rxjs.dev/deprecations/to-promise\n */\n toPromise(promiseCtor?: PromiseConstructorLike): Promise {\n promiseCtor = getPromiseCtor(promiseCtor);\n\n return new promiseCtor((resolve, reject) => {\n let value: T | undefined;\n this.subscribe(\n (x: T) => (value = x),\n (err: any) => reject(err),\n () => resolve(value)\n );\n }) as Promise;\n }\n}\n\n/**\n * Decides between a passed promise constructor from consuming code,\n * A default configured promise constructor, and the native promise\n * constructor and returns it. If nothing can be found, it will throw\n * an error.\n * @param promiseCtor The optional promise constructor to passed by consuming code\n */\nfunction getPromiseCtor(promiseCtor: PromiseConstructorLike | undefined) {\n return promiseCtor ?? config.Promise ?? Promise;\n}\n\nfunction isObserver(value: any): value is Observer {\n return value && isFunction(value.next) && isFunction(value.error) && isFunction(value.complete);\n}\n\nfunction isSubscriber(value: any): value is Subscriber {\n return (value && value instanceof Subscriber) || (isObserver(value) && isSubscription(value));\n}\n", "import { Observable } from '../Observable';\nimport { Subscriber } from '../Subscriber';\nimport { OperatorFunction } from '../types';\nimport { isFunction } from './isFunction';\n\n/**\n * Used to determine if an object is an Observable with a lift function.\n */\nexport function hasLift(source: any): source is { lift: InstanceType['lift'] } {\n return isFunction(source?.lift);\n}\n\n/**\n * Creates an `OperatorFunction`. Used to define operators throughout the library in a concise way.\n * @param init The logic to connect the liftedSource to the subscriber at the moment of subscription.\n */\nexport function operate(\n init: (liftedSource: Observable, subscriber: Subscriber) => (() => void) | void\n): OperatorFunction {\n return (source: Observable) => {\n if (hasLift(source)) {\n return source.lift(function (this: Subscriber, liftedSource: Observable) {\n try {\n return init(liftedSource, this);\n } catch (err) {\n this.error(err);\n }\n });\n }\n throw new TypeError('Unable to lift unknown Observable type');\n };\n}\n", "import { Subscriber } from '../Subscriber';\n\n/**\n * Creates an instance of an `OperatorSubscriber`.\n * @param destination The downstream subscriber.\n * @param onNext Handles next values, only called if this subscriber is not stopped or closed. Any\n * error that occurs in this function is caught and sent to the `error` method of this subscriber.\n * @param onError Handles errors from the subscription, any errors that occur in this handler are caught\n * and send to the `destination` error handler.\n * @param onComplete Handles completion notification from the subscription. Any errors that occur in\n * this handler are sent to the `destination` error handler.\n * @param onFinalize Additional teardown logic here. This will only be called on teardown if the\n * subscriber itself is not already closed. This is called after all other teardown logic is executed.\n */\nexport function createOperatorSubscriber(\n destination: Subscriber,\n onNext?: (value: T) => void,\n onComplete?: () => void,\n onError?: (err: any) => void,\n onFinalize?: () => void\n): Subscriber {\n return new OperatorSubscriber(destination, onNext, onComplete, onError, onFinalize);\n}\n\n/**\n * A generic helper for allowing operators to be created with a Subscriber and\n * use closures to capture necessary state from the operator function itself.\n */\nexport class OperatorSubscriber extends Subscriber {\n /**\n * Creates an instance of an `OperatorSubscriber`.\n * @param destination The downstream subscriber.\n * @param onNext Handles next values, only called if this subscriber is not stopped or closed. Any\n * error that occurs in this function is caught and sent to the `error` method of this subscriber.\n * @param onError Handles errors from the subscription, any errors that occur in this handler are caught\n * and send to the `destination` error handler.\n * @param onComplete Handles completion notification from the subscription. Any errors that occur in\n * this handler are sent to the `destination` error handler.\n * @param onFinalize Additional finalization logic here. This will only be called on finalization if the\n * subscriber itself is not already closed. This is called after all other finalization logic is executed.\n * @param shouldUnsubscribe An optional check to see if an unsubscribe call should truly unsubscribe.\n * NOTE: This currently **ONLY** exists to support the strange behavior of {@link groupBy}, where unsubscription\n * to the resulting observable does not actually disconnect from the source if there are active subscriptions\n * to any grouped observable. (DO NOT EXPOSE OR USE EXTERNALLY!!!)\n */\n constructor(\n destination: Subscriber,\n onNext?: (value: T) => void,\n onComplete?: () => void,\n onError?: (err: any) => void,\n private onFinalize?: () => void,\n private shouldUnsubscribe?: () => boolean\n ) {\n // It's important - for performance reasons - that all of this class's\n // members are initialized and that they are always initialized in the same\n // order. This will ensure that all OperatorSubscriber instances have the\n // same hidden class in V8. This, in turn, will help keep the number of\n // hidden classes involved in property accesses within the base class as\n // low as possible. If the number of hidden classes involved exceeds four,\n // the property accesses will become megamorphic and performance penalties\n // will be incurred - i.e. inline caches won't be used.\n //\n // The reasons for ensuring all instances have the same hidden class are\n // further discussed in this blog post from Benedikt Meurer:\n // https://benediktmeurer.de/2018/03/23/impact-of-polymorphism-on-component-based-frameworks-like-react/\n super(destination);\n this._next = onNext\n ? function (this: OperatorSubscriber, value: T) {\n try {\n onNext(value);\n } catch (err) {\n destination.error(err);\n }\n }\n : super._next;\n this._error = onError\n ? function (this: OperatorSubscriber, err: any) {\n try {\n onError(err);\n } catch (err) {\n // Send any errors that occur down stream.\n destination.error(err);\n } finally {\n // Ensure finalization.\n this.unsubscribe();\n }\n }\n : super._error;\n this._complete = onComplete\n ? function (this: OperatorSubscriber) {\n try {\n onComplete();\n } catch (err) {\n // Send any errors that occur down stream.\n destination.error(err);\n } finally {\n // Ensure finalization.\n this.unsubscribe();\n }\n }\n : super._complete;\n }\n\n unsubscribe() {\n if (!this.shouldUnsubscribe || this.shouldUnsubscribe()) {\n const { closed } = this;\n super.unsubscribe();\n // Execute additional teardown if we have any and we didn't already do so.\n !closed && this.onFinalize?.();\n }\n }\n}\n", "import { Subscription } from '../Subscription';\n\ninterface AnimationFrameProvider {\n schedule(callback: FrameRequestCallback): Subscription;\n requestAnimationFrame: typeof requestAnimationFrame;\n cancelAnimationFrame: typeof cancelAnimationFrame;\n delegate:\n | {\n requestAnimationFrame: typeof requestAnimationFrame;\n cancelAnimationFrame: typeof cancelAnimationFrame;\n }\n | undefined;\n}\n\nexport const animationFrameProvider: AnimationFrameProvider = {\n // When accessing the delegate, use the variable rather than `this` so that\n // the functions can be called without being bound to the provider.\n schedule(callback) {\n let request = requestAnimationFrame;\n let cancel: typeof cancelAnimationFrame | undefined = cancelAnimationFrame;\n const { delegate } = animationFrameProvider;\n if (delegate) {\n request = delegate.requestAnimationFrame;\n cancel = delegate.cancelAnimationFrame;\n }\n const handle = request((timestamp) => {\n // Clear the cancel function. The request has been fulfilled, so\n // attempting to cancel the request upon unsubscription would be\n // pointless.\n cancel = undefined;\n callback(timestamp);\n });\n return new Subscription(() => cancel?.(handle));\n },\n requestAnimationFrame(...args) {\n const { delegate } = animationFrameProvider;\n return (delegate?.requestAnimationFrame || requestAnimationFrame)(...args);\n },\n cancelAnimationFrame(...args) {\n const { delegate } = animationFrameProvider;\n return (delegate?.cancelAnimationFrame || cancelAnimationFrame)(...args);\n },\n delegate: undefined,\n};\n", "import { createErrorClass } from './createErrorClass';\n\nexport interface ObjectUnsubscribedError extends Error {}\n\nexport interface ObjectUnsubscribedErrorCtor {\n /**\n * @deprecated Internal implementation detail. Do not construct error instances.\n * Cannot be tagged as internal: https://github.com/ReactiveX/rxjs/issues/6269\n */\n new (): ObjectUnsubscribedError;\n}\n\n/**\n * An error thrown when an action is invalid because the object has been\n * unsubscribed.\n *\n * @see {@link Subject}\n * @see {@link BehaviorSubject}\n *\n * @class ObjectUnsubscribedError\n */\nexport const ObjectUnsubscribedError: ObjectUnsubscribedErrorCtor = createErrorClass(\n (_super) =>\n function ObjectUnsubscribedErrorImpl(this: any) {\n _super(this);\n this.name = 'ObjectUnsubscribedError';\n this.message = 'object unsubscribed';\n }\n);\n", "import { Operator } from './Operator';\nimport { Observable } from './Observable';\nimport { Subscriber } from './Subscriber';\nimport { Subscription, EMPTY_SUBSCRIPTION } from './Subscription';\nimport { Observer, SubscriptionLike, TeardownLogic } from './types';\nimport { ObjectUnsubscribedError } from './util/ObjectUnsubscribedError';\nimport { arrRemove } from './util/arrRemove';\nimport { errorContext } from './util/errorContext';\n\n/**\n * A Subject is a special type of Observable that allows values to be\n * multicasted to many Observers. Subjects are like EventEmitters.\n *\n * Every Subject is an Observable and an Observer. You can subscribe to a\n * Subject, and you can call next to feed values as well as error and complete.\n */\nexport class Subject extends Observable implements SubscriptionLike {\n closed = false;\n\n private currentObservers: Observer[] | null = null;\n\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n observers: Observer[] = [];\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n isStopped = false;\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n hasError = false;\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n thrownError: any = null;\n\n /**\n * Creates a \"subject\" by basically gluing an observer to an observable.\n *\n * @deprecated Recommended you do not use. Will be removed at some point in the future. Plans for replacement still under discussion.\n */\n static create: (...args: any[]) => any = (destination: Observer, source: Observable): AnonymousSubject => {\n return new AnonymousSubject(destination, source);\n };\n\n constructor() {\n // NOTE: This must be here to obscure Observable's constructor.\n super();\n }\n\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n lift(operator: Operator): Observable {\n const subject = new AnonymousSubject(this, this);\n subject.operator = operator as any;\n return subject as any;\n }\n\n /** @internal */\n protected _throwIfClosed() {\n if (this.closed) {\n throw new ObjectUnsubscribedError();\n }\n }\n\n next(value: T) {\n errorContext(() => {\n this._throwIfClosed();\n if (!this.isStopped) {\n if (!this.currentObservers) {\n this.currentObservers = Array.from(this.observers);\n }\n for (const observer of this.currentObservers) {\n observer.next(value);\n }\n }\n });\n }\n\n error(err: any) {\n errorContext(() => {\n this._throwIfClosed();\n if (!this.isStopped) {\n this.hasError = this.isStopped = true;\n this.thrownError = err;\n const { observers } = this;\n while (observers.length) {\n observers.shift()!.error(err);\n }\n }\n });\n }\n\n complete() {\n errorContext(() => {\n this._throwIfClosed();\n if (!this.isStopped) {\n this.isStopped = true;\n const { observers } = this;\n while (observers.length) {\n observers.shift()!.complete();\n }\n }\n });\n }\n\n unsubscribe() {\n this.isStopped = this.closed = true;\n this.observers = this.currentObservers = null!;\n }\n\n get observed() {\n return this.observers?.length > 0;\n }\n\n /** @internal */\n protected _trySubscribe(subscriber: Subscriber): TeardownLogic {\n this._throwIfClosed();\n return super._trySubscribe(subscriber);\n }\n\n /** @internal */\n protected _subscribe(subscriber: Subscriber): Subscription {\n this._throwIfClosed();\n this._checkFinalizedStatuses(subscriber);\n return this._innerSubscribe(subscriber);\n }\n\n /** @internal */\n protected _innerSubscribe(subscriber: Subscriber) {\n const { hasError, isStopped, observers } = this;\n if (hasError || isStopped) {\n return EMPTY_SUBSCRIPTION;\n }\n this.currentObservers = null;\n observers.push(subscriber);\n return new Subscription(() => {\n this.currentObservers = null;\n arrRemove(observers, subscriber);\n });\n }\n\n /** @internal */\n protected _checkFinalizedStatuses(subscriber: Subscriber) {\n const { hasError, thrownError, isStopped } = this;\n if (hasError) {\n subscriber.error(thrownError);\n } else if (isStopped) {\n subscriber.complete();\n }\n }\n\n /**\n * Creates a new Observable with this Subject as the source. You can do this\n * to create custom Observer-side logic of the Subject and conceal it from\n * code that uses the Observable.\n * @return Observable that this Subject casts to.\n */\n asObservable(): Observable {\n const observable: any = new Observable();\n observable.source = this;\n return observable;\n }\n}\n\nexport class AnonymousSubject extends Subject {\n constructor(\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n public destination?: Observer,\n source?: Observable\n ) {\n super();\n this.source = source;\n }\n\n next(value: T) {\n this.destination?.next?.(value);\n }\n\n error(err: any) {\n this.destination?.error?.(err);\n }\n\n complete() {\n this.destination?.complete?.();\n }\n\n /** @internal */\n protected _subscribe(subscriber: Subscriber): Subscription {\n return this.source?.subscribe(subscriber) ?? EMPTY_SUBSCRIPTION;\n }\n}\n", "import { Subject } from './Subject';\nimport { Subscriber } from './Subscriber';\nimport { Subscription } from './Subscription';\n\n/**\n * A variant of Subject that requires an initial value and emits its current\n * value whenever it is subscribed to.\n */\nexport class BehaviorSubject extends Subject {\n constructor(private _value: T) {\n super();\n }\n\n get value(): T {\n return this.getValue();\n }\n\n /** @internal */\n protected _subscribe(subscriber: Subscriber): Subscription {\n const subscription = super._subscribe(subscriber);\n !subscription.closed && subscriber.next(this._value);\n return subscription;\n }\n\n getValue(): T {\n const { hasError, thrownError, _value } = this;\n if (hasError) {\n throw thrownError;\n }\n this._throwIfClosed();\n return _value;\n }\n\n next(value: T): void {\n super.next((this._value = value));\n }\n}\n", "import { TimestampProvider } from '../types';\n\ninterface DateTimestampProvider extends TimestampProvider {\n delegate: TimestampProvider | undefined;\n}\n\nexport const dateTimestampProvider: DateTimestampProvider = {\n now() {\n // Use the variable rather than `this` so that the function can be called\n // without being bound to the provider.\n return (dateTimestampProvider.delegate || Date).now();\n },\n delegate: undefined,\n};\n", "import { Subject } from './Subject';\nimport { TimestampProvider } from './types';\nimport { Subscriber } from './Subscriber';\nimport { Subscription } from './Subscription';\nimport { dateTimestampProvider } from './scheduler/dateTimestampProvider';\n\n/**\n * A variant of {@link Subject} that \"replays\" old values to new subscribers by emitting them when they first subscribe.\n *\n * `ReplaySubject` has an internal buffer that will store a specified number of values that it has observed. Like `Subject`,\n * `ReplaySubject` \"observes\" values by having them passed to its `next` method. When it observes a value, it will store that\n * value for a time determined by the configuration of the `ReplaySubject`, as passed to its constructor.\n *\n * When a new subscriber subscribes to the `ReplaySubject` instance, it will synchronously emit all values in its buffer in\n * a First-In-First-Out (FIFO) manner. The `ReplaySubject` will also complete, if it has observed completion; and it will\n * error if it has observed an error.\n *\n * There are two main configuration items to be concerned with:\n *\n * 1. `bufferSize` - This will determine how many items are stored in the buffer, defaults to infinite.\n * 2. `windowTime` - The amount of time to hold a value in the buffer before removing it from the buffer.\n *\n * Both configurations may exist simultaneously. So if you would like to buffer a maximum of 3 values, as long as the values\n * are less than 2 seconds old, you could do so with a `new ReplaySubject(3, 2000)`.\n *\n * ### Differences with BehaviorSubject\n *\n * `BehaviorSubject` is similar to `new ReplaySubject(1)`, with a couple of exceptions:\n *\n * 1. `BehaviorSubject` comes \"primed\" with a single value upon construction.\n * 2. `ReplaySubject` will replay values, even after observing an error, where `BehaviorSubject` will not.\n *\n * @see {@link Subject}\n * @see {@link BehaviorSubject}\n * @see {@link shareReplay}\n */\nexport class ReplaySubject extends Subject {\n private _buffer: (T | number)[] = [];\n private _infiniteTimeWindow = true;\n\n /**\n * @param _bufferSize The size of the buffer to replay on subscription\n * @param _windowTime The amount of time the buffered items will stay buffered\n * @param _timestampProvider An object with a `now()` method that provides the current timestamp. This is used to\n * calculate the amount of time something has been buffered.\n */\n constructor(\n private _bufferSize = Infinity,\n private _windowTime = Infinity,\n private _timestampProvider: TimestampProvider = dateTimestampProvider\n ) {\n super();\n this._infiniteTimeWindow = _windowTime === Infinity;\n this._bufferSize = Math.max(1, _bufferSize);\n this._windowTime = Math.max(1, _windowTime);\n }\n\n next(value: T): void {\n const { isStopped, _buffer, _infiniteTimeWindow, _timestampProvider, _windowTime } = this;\n if (!isStopped) {\n _buffer.push(value);\n !_infiniteTimeWindow && _buffer.push(_timestampProvider.now() + _windowTime);\n }\n this._trimBuffer();\n super.next(value);\n }\n\n /** @internal */\n protected _subscribe(subscriber: Subscriber): Subscription {\n this._throwIfClosed();\n this._trimBuffer();\n\n const subscription = this._innerSubscribe(subscriber);\n\n const { _infiniteTimeWindow, _buffer } = this;\n // We use a copy here, so reentrant code does not mutate our array while we're\n // emitting it to a new subscriber.\n const copy = _buffer.slice();\n for (let i = 0; i < copy.length && !subscriber.closed; i += _infiniteTimeWindow ? 1 : 2) {\n subscriber.next(copy[i] as T);\n }\n\n this._checkFinalizedStatuses(subscriber);\n\n return subscription;\n }\n\n private _trimBuffer() {\n const { _bufferSize, _timestampProvider, _buffer, _infiniteTimeWindow } = this;\n // If we don't have an infinite buffer size, and we're over the length,\n // use splice to truncate the old buffer values off. Note that we have to\n // double the size for instances where we're not using an infinite time window\n // because we're storing the values and the timestamps in the same array.\n const adjustedBufferSize = (_infiniteTimeWindow ? 1 : 2) * _bufferSize;\n _bufferSize < Infinity && adjustedBufferSize < _buffer.length && _buffer.splice(0, _buffer.length - adjustedBufferSize);\n\n // Now, if we're not in an infinite time window, remove all values where the time is\n // older than what is allowed.\n if (!_infiniteTimeWindow) {\n const now = _timestampProvider.now();\n let last = 0;\n // Search the array for the first timestamp that isn't expired and\n // truncate the buffer up to that point.\n for (let i = 1; i < _buffer.length && (_buffer[i] as number) <= now; i += 2) {\n last = i;\n }\n last && _buffer.splice(0, last + 1);\n }\n }\n}\n", "import { Scheduler } from '../Scheduler';\nimport { Subscription } from '../Subscription';\nimport { SchedulerAction } from '../types';\n\n/**\n * A unit of work to be executed in a `scheduler`. An action is typically\n * created from within a {@link SchedulerLike} and an RxJS user does not need to concern\n * themselves about creating and manipulating an Action.\n *\n * ```ts\n * class Action extends Subscription {\n * new (scheduler: Scheduler, work: (state?: T) => void);\n * schedule(state?: T, delay: number = 0): Subscription;\n * }\n * ```\n */\nexport class Action extends Subscription {\n constructor(scheduler: Scheduler, work: (this: SchedulerAction, state?: T) => void) {\n super();\n }\n /**\n * Schedules this action on its parent {@link SchedulerLike} for execution. May be passed\n * some context object, `state`. May happen at some point in the future,\n * according to the `delay` parameter, if specified.\n * @param state Some contextual data that the `work` function uses when called by the\n * Scheduler.\n * @param delay Time to wait before executing the work, where the time unit is implicit\n * and defined by the Scheduler.\n * @return A subscription in order to be able to unsubscribe the scheduled work.\n */\n public schedule(state?: T, delay: number = 0): Subscription {\n return this;\n }\n}\n", "import type { TimerHandle } from './timerHandle';\ntype SetIntervalFunction = (handler: () => void, timeout?: number, ...args: any[]) => TimerHandle;\ntype ClearIntervalFunction = (handle: TimerHandle) => void;\n\ninterface IntervalProvider {\n setInterval: SetIntervalFunction;\n clearInterval: ClearIntervalFunction;\n delegate:\n | {\n setInterval: SetIntervalFunction;\n clearInterval: ClearIntervalFunction;\n }\n | undefined;\n}\n\nexport const intervalProvider: IntervalProvider = {\n // When accessing the delegate, use the variable rather than `this` so that\n // the functions can be called without being bound to the provider.\n setInterval(handler: () => void, timeout?: number, ...args) {\n const { delegate } = intervalProvider;\n if (delegate?.setInterval) {\n return delegate.setInterval(handler, timeout, ...args);\n }\n return setInterval(handler, timeout, ...args);\n },\n clearInterval(handle) {\n const { delegate } = intervalProvider;\n return (delegate?.clearInterval || clearInterval)(handle as any);\n },\n delegate: undefined,\n};\n", "import { Action } from './Action';\nimport { SchedulerAction } from '../types';\nimport { Subscription } from '../Subscription';\nimport { AsyncScheduler } from './AsyncScheduler';\nimport { intervalProvider } from './intervalProvider';\nimport { arrRemove } from '../util/arrRemove';\nimport { TimerHandle } from './timerHandle';\n\nexport class AsyncAction extends Action {\n public id: TimerHandle | undefined;\n public state?: T;\n // @ts-ignore: Property has no initializer and is not definitely assigned\n public delay: number;\n protected pending: boolean = false;\n\n constructor(protected scheduler: AsyncScheduler, protected work: (this: SchedulerAction, state?: T) => void) {\n super(scheduler, work);\n }\n\n public schedule(state?: T, delay: number = 0): Subscription {\n if (this.closed) {\n return this;\n }\n\n // Always replace the current state with the new state.\n this.state = state;\n\n const id = this.id;\n const scheduler = this.scheduler;\n\n //\n // Important implementation note:\n //\n // Actions only execute once by default, unless rescheduled from within the\n // scheduled callback. This allows us to implement single and repeat\n // actions via the same code path, without adding API surface area, as well\n // as mimic traditional recursion but across asynchronous boundaries.\n //\n // However, JS runtimes and timers distinguish between intervals achieved by\n // serial `setTimeout` calls vs. a single `setInterval` call. An interval of\n // serial `setTimeout` calls can be individually delayed, which delays\n // scheduling the next `setTimeout`, and so on. `setInterval` attempts to\n // guarantee the interval callback will be invoked more precisely to the\n // interval period, regardless of load.\n //\n // Therefore, we use `setInterval` to schedule single and repeat actions.\n // If the action reschedules itself with the same delay, the interval is not\n // canceled. If the action doesn't reschedule, or reschedules with a\n // different delay, the interval will be canceled after scheduled callback\n // execution.\n //\n if (id != null) {\n this.id = this.recycleAsyncId(scheduler, id, delay);\n }\n\n // Set the pending flag indicating that this action has been scheduled, or\n // has recursively rescheduled itself.\n this.pending = true;\n\n this.delay = delay;\n // If this action has already an async Id, don't request a new one.\n this.id = this.id ?? this.requestAsyncId(scheduler, this.id, delay);\n\n return this;\n }\n\n protected requestAsyncId(scheduler: AsyncScheduler, _id?: TimerHandle, delay: number = 0): TimerHandle {\n return intervalProvider.setInterval(scheduler.flush.bind(scheduler, this), delay);\n }\n\n protected recycleAsyncId(_scheduler: AsyncScheduler, id?: TimerHandle, delay: number | null = 0): TimerHandle | undefined {\n // If this action is rescheduled with the same delay time, don't clear the interval id.\n if (delay != null && this.delay === delay && this.pending === false) {\n return id;\n }\n // Otherwise, if the action's delay time is different from the current delay,\n // or the action has been rescheduled before it's executed, clear the interval id\n if (id != null) {\n intervalProvider.clearInterval(id);\n }\n\n return undefined;\n }\n\n /**\n * Immediately executes this action and the `work` it contains.\n */\n public execute(state: T, delay: number): any {\n if (this.closed) {\n return new Error('executing a cancelled action');\n }\n\n this.pending = false;\n const error = this._execute(state, delay);\n if (error) {\n return error;\n } else if (this.pending === false && this.id != null) {\n // Dequeue if the action didn't reschedule itself. Don't call\n // unsubscribe(), because the action could reschedule later.\n // For example:\n // ```\n // scheduler.schedule(function doWork(counter) {\n // /* ... I'm a busy worker bee ... */\n // var originalAction = this;\n // /* wait 100ms before rescheduling the action */\n // setTimeout(function () {\n // originalAction.schedule(counter + 1);\n // }, 100);\n // }, 1000);\n // ```\n this.id = this.recycleAsyncId(this.scheduler, this.id, null);\n }\n }\n\n protected _execute(state: T, _delay: number): any {\n let errored: boolean = false;\n let errorValue: any;\n try {\n this.work(state);\n } catch (e) {\n errored = true;\n // HACK: Since code elsewhere is relying on the \"truthiness\" of the\n // return here, we can't have it return \"\" or 0 or false.\n // TODO: Clean this up when we refactor schedulers mid-version-8 or so.\n errorValue = e ? e : new Error('Scheduled action threw falsy error');\n }\n if (errored) {\n this.unsubscribe();\n return errorValue;\n }\n }\n\n unsubscribe() {\n if (!this.closed) {\n const { id, scheduler } = this;\n const { actions } = scheduler;\n\n this.work = this.state = this.scheduler = null!;\n this.pending = false;\n\n arrRemove(actions, this);\n if (id != null) {\n this.id = this.recycleAsyncId(scheduler, id, null);\n }\n\n this.delay = null!;\n super.unsubscribe();\n }\n }\n}\n", "import { Action } from './scheduler/Action';\nimport { Subscription } from './Subscription';\nimport { SchedulerLike, SchedulerAction } from './types';\nimport { dateTimestampProvider } from './scheduler/dateTimestampProvider';\n\n/**\n * An execution context and a data structure to order tasks and schedule their\n * execution. Provides a notion of (potentially virtual) time, through the\n * `now()` getter method.\n *\n * Each unit of work in a Scheduler is called an `Action`.\n *\n * ```ts\n * class Scheduler {\n * now(): number;\n * schedule(work, delay?, state?): Subscription;\n * }\n * ```\n *\n * @deprecated Scheduler is an internal implementation detail of RxJS, and\n * should not be used directly. Rather, create your own class and implement\n * {@link SchedulerLike}. Will be made internal in v8.\n */\nexport class Scheduler implements SchedulerLike {\n public static now: () => number = dateTimestampProvider.now;\n\n constructor(private schedulerActionCtor: typeof Action, now: () => number = Scheduler.now) {\n this.now = now;\n }\n\n /**\n * A getter method that returns a number representing the current time\n * (at the time this function was called) according to the scheduler's own\n * internal clock.\n * @return A number that represents the current time. May or may not\n * have a relation to wall-clock time. May or may not refer to a time unit\n * (e.g. milliseconds).\n */\n public now: () => number;\n\n /**\n * Schedules a function, `work`, for execution. May happen at some point in\n * the future, according to the `delay` parameter, if specified. May be passed\n * some context object, `state`, which will be passed to the `work` function.\n *\n * The given arguments will be processed an stored as an Action object in a\n * queue of actions.\n *\n * @param work A function representing a task, or some unit of work to be\n * executed by the Scheduler.\n * @param delay Time to wait before executing the work, where the time unit is\n * implicit and defined by the Scheduler itself.\n * @param state Some contextual data that the `work` function uses when called\n * by the Scheduler.\n * @return A subscription in order to be able to unsubscribe the scheduled work.\n */\n public schedule(work: (this: SchedulerAction, state?: T) => void, delay: number = 0, state?: T): Subscription {\n return new this.schedulerActionCtor(this, work).schedule(state, delay);\n }\n}\n", "import { Scheduler } from '../Scheduler';\nimport { Action } from './Action';\nimport { AsyncAction } from './AsyncAction';\nimport { TimerHandle } from './timerHandle';\n\nexport class AsyncScheduler extends Scheduler {\n public actions: Array> = [];\n /**\n * A flag to indicate whether the Scheduler is currently executing a batch of\n * queued actions.\n * @internal\n */\n public _active: boolean = false;\n /**\n * An internal ID used to track the latest asynchronous task such as those\n * coming from `setTimeout`, `setInterval`, `requestAnimationFrame`, and\n * others.\n * @internal\n */\n public _scheduled: TimerHandle | undefined;\n\n constructor(SchedulerAction: typeof Action, now: () => number = Scheduler.now) {\n super(SchedulerAction, now);\n }\n\n public flush(action: AsyncAction): void {\n const { actions } = this;\n\n if (this._active) {\n actions.push(action);\n return;\n }\n\n let error: any;\n this._active = true;\n\n do {\n if ((error = action.execute(action.state, action.delay))) {\n break;\n }\n } while ((action = actions.shift()!)); // exhaust the scheduler queue\n\n this._active = false;\n\n if (error) {\n while ((action = actions.shift()!)) {\n action.unsubscribe();\n }\n throw error;\n }\n }\n}\n", "import { AsyncAction } from './AsyncAction';\nimport { AsyncScheduler } from './AsyncScheduler';\n\n/**\n *\n * Async Scheduler\n *\n * Schedule task as if you used setTimeout(task, duration)\n *\n * `async` scheduler schedules tasks asynchronously, by putting them on the JavaScript\n * event loop queue. It is best used to delay tasks in time or to schedule tasks repeating\n * in intervals.\n *\n * If you just want to \"defer\" task, that is to perform it right after currently\n * executing synchronous code ends (commonly achieved by `setTimeout(deferredTask, 0)`),\n * better choice will be the {@link asapScheduler} scheduler.\n *\n * ## Examples\n * Use async scheduler to delay task\n * ```ts\n * import { asyncScheduler } from 'rxjs';\n *\n * const task = () => console.log('it works!');\n *\n * asyncScheduler.schedule(task, 2000);\n *\n * // After 2 seconds logs:\n * // \"it works!\"\n * ```\n *\n * Use async scheduler to repeat task in intervals\n * ```ts\n * import { asyncScheduler } from 'rxjs';\n *\n * function task(state) {\n * console.log(state);\n * this.schedule(state + 1, 1000); // `this` references currently executing Action,\n * // which we reschedule with new state and delay\n * }\n *\n * asyncScheduler.schedule(task, 3000, 0);\n *\n * // Logs:\n * // 0 after 3s\n * // 1 after 4s\n * // 2 after 5s\n * // 3 after 6s\n * ```\n */\n\nexport const asyncScheduler = new AsyncScheduler(AsyncAction);\n\n/**\n * @deprecated Renamed to {@link asyncScheduler}. Will be removed in v8.\n */\nexport const async = asyncScheduler;\n", "import { AsyncAction } from './AsyncAction';\nimport { Subscription } from '../Subscription';\nimport { QueueScheduler } from './QueueScheduler';\nimport { SchedulerAction } from '../types';\nimport { TimerHandle } from './timerHandle';\n\nexport class QueueAction extends AsyncAction {\n constructor(protected scheduler: QueueScheduler, protected work: (this: SchedulerAction, state?: T) => void) {\n super(scheduler, work);\n }\n\n public schedule(state?: T, delay: number = 0): Subscription {\n if (delay > 0) {\n return super.schedule(state, delay);\n }\n this.delay = delay;\n this.state = state;\n this.scheduler.flush(this);\n return this;\n }\n\n public execute(state: T, delay: number): any {\n return delay > 0 || this.closed ? super.execute(state, delay) : this._execute(state, delay);\n }\n\n protected requestAsyncId(scheduler: QueueScheduler, id?: TimerHandle, delay: number = 0): TimerHandle {\n // If delay exists and is greater than 0, or if the delay is null (the\n // action wasn't rescheduled) but was originally scheduled as an async\n // action, then recycle as an async action.\n\n if ((delay != null && delay > 0) || (delay == null && this.delay > 0)) {\n return super.requestAsyncId(scheduler, id, delay);\n }\n\n // Otherwise flush the scheduler starting with this action.\n scheduler.flush(this);\n\n // HACK: In the past, this was returning `void`. However, `void` isn't a valid\n // `TimerHandle`, and generally the return value here isn't really used. So the\n // compromise is to return `0` which is both \"falsy\" and a valid `TimerHandle`,\n // as opposed to refactoring every other instanceo of `requestAsyncId`.\n return 0;\n }\n}\n", "import { AsyncScheduler } from './AsyncScheduler';\n\nexport class QueueScheduler extends AsyncScheduler {\n}\n", "import { QueueAction } from './QueueAction';\nimport { QueueScheduler } from './QueueScheduler';\n\n/**\n *\n * Queue Scheduler\n *\n * Put every next task on a queue, instead of executing it immediately\n *\n * `queue` scheduler, when used with delay, behaves the same as {@link asyncScheduler} scheduler.\n *\n * When used without delay, it schedules given task synchronously - executes it right when\n * it is scheduled. However when called recursively, that is when inside the scheduled task,\n * another task is scheduled with queue scheduler, instead of executing immediately as well,\n * that task will be put on a queue and wait for current one to finish.\n *\n * This means that when you execute task with `queue` scheduler, you are sure it will end\n * before any other task scheduled with that scheduler will start.\n *\n * ## Examples\n * Schedule recursively first, then do something\n * ```ts\n * import { queueScheduler } from 'rxjs';\n *\n * queueScheduler.schedule(() => {\n * queueScheduler.schedule(() => console.log('second')); // will not happen now, but will be put on a queue\n *\n * console.log('first');\n * });\n *\n * // Logs:\n * // \"first\"\n * // \"second\"\n * ```\n *\n * Reschedule itself recursively\n * ```ts\n * import { queueScheduler } from 'rxjs';\n *\n * queueScheduler.schedule(function(state) {\n * if (state !== 0) {\n * console.log('before', state);\n * this.schedule(state - 1); // `this` references currently executing Action,\n * // which we reschedule with new state\n * console.log('after', state);\n * }\n * }, 0, 3);\n *\n * // In scheduler that runs recursively, you would expect:\n * // \"before\", 3\n * // \"before\", 2\n * // \"before\", 1\n * // \"after\", 1\n * // \"after\", 2\n * // \"after\", 3\n *\n * // But with queue it logs:\n * // \"before\", 3\n * // \"after\", 3\n * // \"before\", 2\n * // \"after\", 2\n * // \"before\", 1\n * // \"after\", 1\n * ```\n */\n\nexport const queueScheduler = new QueueScheduler(QueueAction);\n\n/**\n * @deprecated Renamed to {@link queueScheduler}. Will be removed in v8.\n */\nexport const queue = queueScheduler;\n", "import { AsyncAction } from './AsyncAction';\nimport { AnimationFrameScheduler } from './AnimationFrameScheduler';\nimport { SchedulerAction } from '../types';\nimport { animationFrameProvider } from './animationFrameProvider';\nimport { TimerHandle } from './timerHandle';\n\nexport class AnimationFrameAction extends AsyncAction {\n constructor(protected scheduler: AnimationFrameScheduler, protected work: (this: SchedulerAction, state?: T) => void) {\n super(scheduler, work);\n }\n\n protected requestAsyncId(scheduler: AnimationFrameScheduler, id?: TimerHandle, delay: number = 0): TimerHandle {\n // If delay is greater than 0, request as an async action.\n if (delay !== null && delay > 0) {\n return super.requestAsyncId(scheduler, id, delay);\n }\n // Push the action to the end of the scheduler queue.\n scheduler.actions.push(this);\n // If an animation frame has already been requested, don't request another\n // one. If an animation frame hasn't been requested yet, request one. Return\n // the current animation frame request id.\n return scheduler._scheduled || (scheduler._scheduled = animationFrameProvider.requestAnimationFrame(() => scheduler.flush(undefined)));\n }\n\n protected recycleAsyncId(scheduler: AnimationFrameScheduler, id?: TimerHandle, delay: number = 0): TimerHandle | undefined {\n // If delay exists and is greater than 0, or if the delay is null (the\n // action wasn't rescheduled) but was originally scheduled as an async\n // action, then recycle as an async action.\n if (delay != null ? delay > 0 : this.delay > 0) {\n return super.recycleAsyncId(scheduler, id, delay);\n }\n // If the scheduler queue has no remaining actions with the same async id,\n // cancel the requested animation frame and set the scheduled flag to\n // undefined so the next AnimationFrameAction will request its own.\n const { actions } = scheduler;\n if (id != null && id === scheduler._scheduled && actions[actions.length - 1]?.id !== id) {\n animationFrameProvider.cancelAnimationFrame(id as number);\n scheduler._scheduled = undefined;\n }\n // Return undefined so the action knows to request a new async id if it's rescheduled.\n return undefined;\n }\n}\n", "import { AsyncAction } from './AsyncAction';\nimport { AsyncScheduler } from './AsyncScheduler';\n\nexport class AnimationFrameScheduler extends AsyncScheduler {\n public flush(action?: AsyncAction): void {\n this._active = true;\n // The async id that effects a call to flush is stored in _scheduled.\n // Before executing an action, it's necessary to check the action's async\n // id to determine whether it's supposed to be executed in the current\n // flush.\n // Previous implementations of this method used a count to determine this,\n // but that was unsound, as actions that are unsubscribed - i.e. cancelled -\n // are removed from the actions array and that can shift actions that are\n // scheduled to be executed in a subsequent flush into positions at which\n // they are executed within the current flush.\n let flushId;\n if (action) {\n flushId = action.id;\n } else {\n flushId = this._scheduled;\n this._scheduled = undefined;\n }\n\n const { actions } = this;\n let error: any;\n action = action || actions.shift()!;\n\n do {\n if ((error = action.execute(action.state, action.delay))) {\n break;\n }\n } while ((action = actions[0]) && action.id === flushId && actions.shift());\n\n this._active = false;\n\n if (error) {\n while ((action = actions[0]) && action.id === flushId && actions.shift()) {\n action.unsubscribe();\n }\n throw error;\n }\n }\n}\n", "import { AnimationFrameAction } from './AnimationFrameAction';\nimport { AnimationFrameScheduler } from './AnimationFrameScheduler';\n\n/**\n *\n * Animation Frame Scheduler\n *\n * Perform task when `window.requestAnimationFrame` would fire\n *\n * When `animationFrame` scheduler is used with delay, it will fall back to {@link asyncScheduler} scheduler\n * behaviour.\n *\n * Without delay, `animationFrame` scheduler can be used to create smooth browser animations.\n * It makes sure scheduled task will happen just before next browser content repaint,\n * thus performing animations as efficiently as possible.\n *\n * ## Example\n * Schedule div height animation\n * ```ts\n * // html:
\n * import { animationFrameScheduler } from 'rxjs';\n *\n * const div = document.querySelector('div');\n *\n * animationFrameScheduler.schedule(function(height) {\n * div.style.height = height + \"px\";\n *\n * this.schedule(height + 1); // `this` references currently executing Action,\n * // which we reschedule with new state\n * }, 0, 0);\n *\n * // You will see a div element growing in height\n * ```\n */\n\nexport const animationFrameScheduler = new AnimationFrameScheduler(AnimationFrameAction);\n\n/**\n * @deprecated Renamed to {@link animationFrameScheduler}. Will be removed in v8.\n */\nexport const animationFrame = animationFrameScheduler;\n", "import { Observable } from '../Observable';\nimport { SchedulerLike } from '../types';\n\n/**\n * A simple Observable that emits no items to the Observer and immediately\n * emits a complete notification.\n *\n * Just emits 'complete', and nothing else.\n *\n * ![](empty.png)\n *\n * A simple Observable that only emits the complete notification. It can be used\n * for composing with other Observables, such as in a {@link mergeMap}.\n *\n * ## Examples\n *\n * Log complete notification\n *\n * ```ts\n * import { EMPTY } from 'rxjs';\n *\n * EMPTY.subscribe({\n * next: () => console.log('Next'),\n * complete: () => console.log('Complete!')\n * });\n *\n * // Outputs\n * // Complete!\n * ```\n *\n * Emit the number 7, then complete\n *\n * ```ts\n * import { EMPTY, startWith } from 'rxjs';\n *\n * const result = EMPTY.pipe(startWith(7));\n * result.subscribe(x => console.log(x));\n *\n * // Outputs\n * // 7\n * ```\n *\n * Map and flatten only odd numbers to the sequence `'a'`, `'b'`, `'c'`\n *\n * ```ts\n * import { interval, mergeMap, of, EMPTY } from 'rxjs';\n *\n * const interval$ = interval(1000);\n * const result = interval$.pipe(\n * mergeMap(x => x % 2 === 1 ? of('a', 'b', 'c') : EMPTY),\n * );\n * result.subscribe(x => console.log(x));\n *\n * // Results in the following to the console:\n * // x is equal to the count on the interval, e.g. (0, 1, 2, 3, ...)\n * // x will occur every 1000ms\n * // if x % 2 is equal to 1, print a, b, c (each on its own)\n * // if x % 2 is not equal to 1, nothing will be output\n * ```\n *\n * @see {@link Observable}\n * @see {@link NEVER}\n * @see {@link of}\n * @see {@link throwError}\n */\nexport const EMPTY = new Observable((subscriber) => subscriber.complete());\n\n/**\n * @param scheduler A {@link SchedulerLike} to use for scheduling\n * the emission of the complete notification.\n * @deprecated Replaced with the {@link EMPTY} constant or {@link scheduled} (e.g. `scheduled([], scheduler)`). Will be removed in v8.\n */\nexport function empty(scheduler?: SchedulerLike) {\n return scheduler ? emptyScheduled(scheduler) : EMPTY;\n}\n\nfunction emptyScheduled(scheduler: SchedulerLike) {\n return new Observable((subscriber) => scheduler.schedule(() => subscriber.complete()));\n}\n", "import { SchedulerLike } from '../types';\nimport { isFunction } from './isFunction';\n\nexport function isScheduler(value: any): value is SchedulerLike {\n return value && isFunction(value.schedule);\n}\n", "import { SchedulerLike } from '../types';\nimport { isFunction } from './isFunction';\nimport { isScheduler } from './isScheduler';\n\nfunction last(arr: T[]): T | undefined {\n return arr[arr.length - 1];\n}\n\nexport function popResultSelector(args: any[]): ((...args: unknown[]) => unknown) | undefined {\n return isFunction(last(args)) ? args.pop() : undefined;\n}\n\nexport function popScheduler(args: any[]): SchedulerLike | undefined {\n return isScheduler(last(args)) ? args.pop() : undefined;\n}\n\nexport function popNumber(args: any[], defaultValue: number): number {\n return typeof last(args) === 'number' ? args.pop()! : defaultValue;\n}\n", "export const isArrayLike = ((x: any): x is ArrayLike => x && typeof x.length === 'number' && typeof x !== 'function');", "import { isFunction } from \"./isFunction\";\n\n/**\n * Tests to see if the object is \"thennable\".\n * @param value the object to test\n */\nexport function isPromise(value: any): value is PromiseLike {\n return isFunction(value?.then);\n}\n", "import { InteropObservable } from '../types';\nimport { observable as Symbol_observable } from '../symbol/observable';\nimport { isFunction } from './isFunction';\n\n/** Identifies an input as being Observable (but not necessary an Rx Observable) */\nexport function isInteropObservable(input: any): input is InteropObservable {\n return isFunction(input[Symbol_observable]);\n}\n", "import { isFunction } from './isFunction';\n\nexport function isAsyncIterable(obj: any): obj is AsyncIterable {\n return Symbol.asyncIterator && isFunction(obj?.[Symbol.asyncIterator]);\n}\n", "/**\n * Creates the TypeError to throw if an invalid object is passed to `from` or `scheduled`.\n * @param input The object that was passed.\n */\nexport function createInvalidObservableTypeError(input: any) {\n // TODO: We should create error codes that can be looked up, so this can be less verbose.\n return new TypeError(\n `You provided ${\n input !== null && typeof input === 'object' ? 'an invalid object' : `'${input}'`\n } where a stream was expected. You can provide an Observable, Promise, ReadableStream, Array, AsyncIterable, or Iterable.`\n );\n}\n", "export function getSymbolIterator(): symbol {\n if (typeof Symbol !== 'function' || !Symbol.iterator) {\n return '@@iterator' as any;\n }\n\n return Symbol.iterator;\n}\n\nexport const iterator = getSymbolIterator();\n", "import { iterator as Symbol_iterator } from '../symbol/iterator';\nimport { isFunction } from './isFunction';\n\n/** Identifies an input as being an Iterable */\nexport function isIterable(input: any): input is Iterable {\n return isFunction(input?.[Symbol_iterator]);\n}\n", "import { ReadableStreamLike } from '../types';\nimport { isFunction } from './isFunction';\n\nexport async function* readableStreamLikeToAsyncGenerator(readableStream: ReadableStreamLike): AsyncGenerator {\n const reader = readableStream.getReader();\n try {\n while (true) {\n const { value, done } = await reader.read();\n if (done) {\n return;\n }\n yield value!;\n }\n } finally {\n reader.releaseLock();\n }\n}\n\nexport function isReadableStreamLike(obj: any): obj is ReadableStreamLike {\n // We don't want to use instanceof checks because they would return\n // false for instances from another Realm, like an