File size: 14,975 Bytes
4db4e9d
 
 
 
 
 
 
83ebb04
4db4e9d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
83ebb04
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4db4e9d
 
 
 
 
 
 
83ebb04
 
4db4e9d
83ebb04
4db4e9d
 
83ebb04
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4db4e9d
83ebb04
 
4db4e9d
 
 
 
83ebb04
4db4e9d
 
 
83ebb04
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4db4e9d
 
 
83ebb04
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4db4e9d
 
 
 
337ea13
 
 
 
 
 
 
4db4e9d
 
 
 
 
 
 
 
 
 
 
 
 
337ea13
9e2cabe
4db4e9d
 
9e2cabe
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4db4e9d
9e2cabe
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
"""
Modal Job Submission Module

Handles submission of SMOLTRACE evaluation jobs to Modal's serverless compute platform.
"""

import os
import sys
import uuid
from typing import Dict, Optional, List


def submit_modal_job(
    model: str,
    provider: str,
    agent_type: str,
    hardware: str,
    dataset_name: str,
    split: str = "train",
    difficulty: str = "all",
    parallel_workers: int = 1,
    hf_token: Optional[str] = None,
    hf_inference_provider: Optional[str] = None,
    search_provider: str = "duckduckgo",
    enable_tools: Optional[List[str]] = None,
    output_format: str = "hub",
    output_dir: Optional[str] = None,
    enable_otel: bool = True,
    enable_gpu_metrics: bool = True,
    private: bool = False,
    debug: bool = False,
    quiet: bool = False,
    run_id: Optional[str] = None
) -> Dict:
    """
    Submit an evaluation job to Modal

    Args:
        model: Model identifier (e.g., "openai/gpt-4")
        provider: Provider type ("litellm", "inference", "transformers")
        agent_type: Agent type ("tool", "code", "both")
        hardware: Hardware type (e.g., "auto", "gpu_a10", "gpu_h200")
        dataset_name: HuggingFace dataset for evaluation
        split: Dataset split to use
        difficulty: Difficulty filter
        parallel_workers: Number of parallel workers
        hf_token: HuggingFace token
        hf_inference_provider: HF Inference provider
        search_provider: Search provider for agents
        enable_tools: List of tools to enable
        output_format: Output format ("hub" or "json")
        output_dir: Output directory for JSON format
        enable_otel: Enable OpenTelemetry tracing
        enable_gpu_metrics: Enable GPU metrics collection
        private: Make datasets private
        debug: Enable debug mode
        quiet: Enable quiet mode
        run_id: Optional run ID (auto-generated if not provided)

    Returns:
        dict: Job submission result with job_id, status, and details
    """
    try:
        import modal
    except ImportError:
        return {
            "success": False,
            "error": "Modal package not installed. Install with: pip install modal",
            "job_id": None
        }

    # Validate Modal credentials
    modal_token_id = os.environ.get("MODAL_TOKEN_ID")
    modal_token_secret = os.environ.get("MODAL_TOKEN_SECRET")

    if not modal_token_id or not modal_token_secret:
        return {
            "success": False,
            "error": "Modal credentials not configured. Please set MODAL_TOKEN_ID and MODAL_TOKEN_SECRET in Settings.",
            "job_id": None
        }

    # Generate job ID
    job_id = run_id if run_id else f"job_{uuid.uuid4().hex[:8]}"

    # Map hardware to Modal GPU types
    hardware_map = {
        "auto": _auto_select_modal_hardware(provider, model),
        "cpu": None,  # CPU only
        "gpu_t4": "T4",
        "gpu_l4": "L4",
        "gpu_a10": "A10G",
        "gpu_l40s": "L40S",
        "gpu_a100": "A100",
        "gpu_a100_80gb": "A100-80GB",
        "gpu_h100": "H100",
        "gpu_h200": "H200",
        "gpu_b200": "B200"
    }

    modal_gpu = hardware_map.get(hardware, "A10G")

    # Build environment variables
    env_vars = {
        "HF_TOKEN": hf_token or os.environ.get("HF_TOKEN", ""),
    }

    # Add LLM provider API keys from environment
    llm_key_names = [
        "OPENAI_API_KEY", "ANTHROPIC_API_KEY", "GOOGLE_API_KEY",
        "GEMINI_API_KEY", "COHERE_API_KEY", "MISTRAL_API_KEY",
        "TOGETHER_API_KEY", "GROQ_API_KEY", "REPLICATE_API_TOKEN",
        "ANYSCALE_API_KEY", "AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY",
        "AWS_REGION", "AZURE_OPENAI_API_KEY", "AZURE_OPENAI_ENDPOINT",
        "LITELLM_API_KEY"
    ]

    for key_name in llm_key_names:
        value = os.environ.get(key_name)
        if value:
            env_vars[key_name] = value

    # Build SMOLTRACE command
    cmd_parts = ["smoltrace-eval"]
    cmd_parts.append(f"--model {model}")
    cmd_parts.append(f"--provider {provider}")
    if hf_inference_provider:
        cmd_parts.append(f"--hf-inference-provider {hf_inference_provider}")
    cmd_parts.append(f"--search-provider {search_provider}")
    if enable_tools:
        cmd_parts.append(f"--enable-tools {','.join(enable_tools)}")
    cmd_parts.append(f"--agent-type {agent_type}")
    cmd_parts.append(f"--dataset-name {dataset_name}")
    cmd_parts.append(f"--split {split}")
    if difficulty != "all":
        cmd_parts.append(f"--difficulty {difficulty}")
    if parallel_workers > 1:
        cmd_parts.append(f"--parallel-workers {parallel_workers}")
    cmd_parts.append(f"--output-format {output_format}")
    if output_dir and output_format == "json":
        cmd_parts.append(f"--output-dir {output_dir}")
    if enable_otel:
        cmd_parts.append("--enable-otel")
    if not enable_gpu_metrics:
        cmd_parts.append("--disable-gpu-metrics")
    if private:
        cmd_parts.append("--private")
    if debug:
        cmd_parts.append("--debug")
    if quiet:
        cmd_parts.append("--quiet")
    cmd_parts.append(f"--run-id {job_id}")

    command = " ".join(cmd_parts)

    # Create Modal app dynamically
    try:
        app = modal.App(f"smoltrace-eval-{job_id}")

        # Detect current Python version dynamically (must match for serialized=True)
        python_version = f"{sys.version_info.major}.{sys.version_info.minor}"

        # Define Modal function with appropriate base image
        # Note: Must match local Python version when using serialized=True
        if modal_gpu:
            # Use GPU-optimized image with CUDA for GPU jobs (using latest stable CUDA)
            image = modal.Image.from_registry(
                "nvidia/cuda:12.6.0-cudnn-devel-ubuntu22.04",
                add_python=python_version  # Dynamically match current environment
            ).pip_install([
                "smoltrace",
                "ddgs",  # DuckDuckGo search
                "litellm",
                "transformers",
                "torch",
                "accelerate",  # Required for GPU device_map
                "bitsandbytes",  # For quantization support
                "sentencepiece",  # For some tokenizers
                "protobuf",  # For some models
                "hf_transfer",  # Fast HuggingFace downloads
                "nvidia-ml-py"  # GPU metrics collection
            ]).env({
                # Enable fast downloads and verbose logging
                "HF_HUB_ENABLE_HF_TRANSFER": "1",
                "TRANSFORMERS_VERBOSITY": "info",
                "HF_HUB_VERBOSITY": "info"
            })
        else:
            # Use lightweight image for CPU jobs
            image = modal.Image.debian_slim(python_version=python_version).pip_install([
                "smoltrace",
                "ddgs",  # DuckDuckGo search
                "litellm"
            ])

        @app.function(
            image=image,
            gpu=modal_gpu if modal_gpu else None,
            secrets=[
                modal.Secret.from_dict(env_vars)
            ],
            timeout=3600,  # 1 hour timeout
            serialized=True  # Required for functions defined in local scope
        )
        def run_evaluation(command_to_run: str):
            """Run SMOLTRACE evaluation on Modal"""
            import subprocess
            import sys
            import os

            print("=" * 80)
            print(f"Starting SMOLTRACE evaluation on Modal")
            print(f"Command: {command_to_run}")
            print(f"Python version: {sys.version}")

            # Show GPU info if available
            try:
                import torch
                if torch.cuda.is_available():
                    print(f"GPU: {torch.cuda.get_device_name(0)}")
                    print(f"GPU Memory: {torch.cuda.get_device_properties(0).total_memory / 1e9:.2f} GB")
            except:
                pass

            print("=" * 80)
            print("\nNote: Model download may take several minutes for large models (14B = ~28GB)")
            print("Downloading and initializing model...\n")

            try:
                # Run with live output instead of capture_output so we can see progress
                result = subprocess.run(
                    command_to_run,
                    shell=True,
                    capture_output=False,  # Stream output in real-time
                    text=True
                )

                # Since we're not capturing, create a success message
                print("\n" + "=" * 80)
                print("EVALUATION COMPLETED")
                print(f"Return code: {result.returncode}")
                print("=" * 80)

                return {
                    "returncode": result.returncode,
                    "stdout": "Check Modal logs for full output (streaming mode)",
                    "stderr": ""
                }
            except Exception as e:
                error_msg = f"Error running evaluation: {str(e)}"
                print("\n" + "=" * 80)
                print("EVALUATION FAILED")
                print(error_msg)
                print("=" * 80)
                import traceback
                traceback.print_exc()
                return {
                    "returncode": -1,
                    "stdout": "",
                    "stderr": error_msg
                }

        # Submit the job using Modal's remote() in a background thread
        # Note: spawn() doesn't work well with dynamically created apps
        # remote() ensures the job actually executes, threading keeps UI responsive
        import threading

        # Store result in a shared dict since we're using threading
        result_container = {"modal_call_id": None, "started": False}

        def run_job_on_modal():
            """Run the Modal job in background thread"""
            try:
                with app.run():
                    # Use remote() instead of spawn() for dynamic apps
                    # This ensures the function actually executes
                    function_call = run_evaluation.remote(command)
                    result_container["started"] = True
                    print(f"Modal job completed with return code: {function_call.get('returncode', 'unknown')}")
            except Exception as e:
                print(f"Error running Modal job: {e}")
                result_container["error"] = str(e)

        # Start the job in a background thread so we don't block the UI
        job_thread = threading.Thread(target=run_job_on_modal, daemon=True)
        job_thread.start()

        # Give Modal a moment to start the job and capture any immediate errors
        import time
        time.sleep(2)

        # Use job_id as the tracking ID since remote() doesn't give us a call_id
        modal_call_id = f"modal-{job_id}"

        return {
            "success": True,
            "job_id": job_id,
            "modal_call_id": modal_call_id,  # Modal's internal function call ID
            "platform": "Modal",
            "hardware": modal_gpu or "CPU",
            "command": command,
            "status": "submitted",
            "message": f"Job successfully submitted to Modal (hardware: {modal_gpu or 'CPU'})",
            "instructions": f"""
✅ Job submitted successfully!

**Job Details:**
- Run ID: {job_id}
- Modal Call ID: {modal_call_id}
- Hardware: {modal_gpu or "CPU"}
- Platform: Modal (serverless compute)

**What happens next:**
1. Job starts running on Modal infrastructure
2. For GPU jobs: Model downloads first (14B models = ~28GB, can take 10-15 min)
3. SMOLTRACE evaluates your model
4. Results are automatically pushed to HuggingFace datasets
5. They will appear in TraceMind leaderboard when complete

**Monitoring**: Check Modal dashboard for real-time logs and progress:
https://modal.com/apps

**Expected Duration**:
- CPU jobs (API models): 2-5 minutes
- GPU jobs (local models): 15-30 minutes (includes model download)

**Cost**: Modal charges per-second usage. Estimated cost: $0.01-1.00 depending on model size and hardware.
            """.strip()
        }

    except Exception as e:
        error_msg = str(e)

        # Check for common Modal errors
        if "MODAL_TOKEN_ID" in error_msg or "authentication" in error_msg.lower():
            return {
                "success": False,
                "error": "Modal authentication failed. Please verify your MODAL_TOKEN_ID and MODAL_TOKEN_SECRET in Settings.",
                "job_id": job_id,
                "troubleshooting": """
**Steps to fix:**
1. Go to https://modal.com/settings/tokens
2. Create a new token
3. Copy Token ID (starts with 'ak-') and Token Secret (starts with 'as-')
4. Add them to Settings in TraceMind
5. Try again
                """
            }
        else:
            return {
                "success": False,
                "error": f"Failed to submit Modal job: {error_msg}",
                "job_id": job_id,
                "command": command
            }


def _auto_select_modal_hardware(provider: str, model: str) -> Optional[str]:
    """
    Automatically select Modal hardware based on model and provider.

    Memory estimation for agentic workloads:
    - Model weights (FP16): ~2GB per 1B params
    - KV cache for long contexts: ~1.5-2x model size for agentic tasks
    - Inference overhead: ~20-30% additional
    - Total: ~4-5GB per 1B params for safe agentic execution

    Args:
        provider: Provider type
        model: Model identifier

    Returns:
        str: Modal GPU type or None for CPU
    """
    # API models don't need GPU
    if provider in ["litellm", "inference"]:
        return None

    # Local models need GPU - select based on model size
    # Conservative allocation for agentic tasks (model weights + KV cache + inference overhead)
    # Memory estimation: ~4-5GB per 1B params for safe agentic execution
    model_lower = model.lower()

    # Extract model size using regex to capture the number before 'b'
    import re
    size_match = re.search(r'(\d+\.?\d*)b', model_lower)

    if size_match:
        model_size = float(size_match.group(1))

        # Complete coverage from 0.5B to 100B+ with no gaps
        if model_size >= 49:
            # 49B-100B+: H200 (140GB VRAM)
            return "H200"
        elif model_size >= 25:
            # 25B-48B: A100-80GB (e.g., Gemma-27B, Kimi-48B, 30B, 34B)
            return "A100-80GB"
        elif model_size >= 13:
            # 13B-24B: A100-80GB (e.g., 13B, 14B, 15B, 20B, 22B)
            return "A100-80GB"
        elif model_size >= 6:
            # 6B-12B: L40S 48GB (e.g., 6B, 7B, 8B, 9B, 10B, 11B, 12B)
            return "L40S"
        elif model_size >= 1:
            # 1B-5B: T4 16GB (e.g., 1B, 2B, 3B, 4B, 5B)
            return "T4"
        else:
            # < 1B: T4 16GB
            return "T4"
    else:
        # No size detected in model name - default to L40S (safe middle ground)
        return "L40S"