Steve Nguyen commited on
Commit
b50b995
·
1 Parent(s): d8d4968

rename main.py

Browse files
Files changed (1) hide show
  1. app.py +1448 -0
app.py ADDED
@@ -0,0 +1,1448 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Visual Novel Gradio App - Main application with UI and handlers."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import os
6
+ import urllib.parse
7
+ import numpy as np
8
+ from typing import Optional
9
+
10
+ import gradio as gr
11
+ from fastrtc import WebRTC
12
+ from fastapi import FastAPI
13
+ from fastapi.staticfiles import StaticFiles
14
+
15
+ from engine import SceneState, POSITION_OFFSETS, Choice, InputRequest
16
+ from story import build_sample_story
17
+
18
+
19
+ def passthrough_stream(frame):
20
+ """Return the incoming frame untouched so the user sees their feed."""
21
+ return frame
22
+
23
+
24
+ def camera_hint_text(show_camera: bool) -> str:
25
+ if show_camera:
26
+ return "🎥 Webcam overlay is active for this scene."
27
+ return "🕹️ Webcam is hidden for this scene."
28
+
29
+
30
+ def voice_hint_text(show_voice: bool) -> str:
31
+ if show_voice:
32
+ return "🎤 Voice capture is available in this scene."
33
+ return "🔇 Voice capture is hidden for this scene."
34
+
35
+
36
+ def motor_hint_text(show_motors: bool) -> str:
37
+ if show_motors:
38
+ return "🤖 Motor control is available in this scene."
39
+ return "🛑 Motor control hidden for this scene."
40
+
41
+
42
+ def robot_hint_text(show_robot: bool) -> str:
43
+ if show_robot:
44
+ return "🤖 Robot control is available in this scene."
45
+ return "🔒 Robot control hidden for this scene."
46
+
47
+
48
+ # Dynamixel control functions using Python protocol implementation
49
+ def dxl_build_ping_packet(motor_id: int) -> list[int]:
50
+ """Build a ping packet and return as list of bytes."""
51
+ import dynamixel
52
+ packet = dynamixel.ping_packet(motor_id)
53
+ return list(packet)
54
+
55
+
56
+ def dxl_build_torque_packet(motor_id: int, enable: bool) -> list[int]:
57
+ """Build a torque enable/disable packet and return as list of bytes."""
58
+ import dynamixel
59
+ packet = dynamixel.torque_enable_packet(motor_id, enable)
60
+ return list(packet)
61
+
62
+
63
+ def dxl_build_goal_position_packet(motor_id: int, degrees: float) -> list[int]:
64
+ """Build a goal position packet and return as list of bytes."""
65
+ import dynamixel
66
+ # Convert degrees to ticks (0-360° -> 0-4095)
67
+ clamped_deg = max(0.0, min(360.0, degrees))
68
+ ticks = int((clamped_deg / 360.0) * 4095)
69
+ packet = dynamixel.goal_position_packet(motor_id, ticks)
70
+ return list(packet)
71
+
72
+
73
+ def dxl_parse_response(response_bytes: list[int]) -> str:
74
+ """Parse a status packet response and return human-readable result."""
75
+ import dynamixel
76
+ if not response_bytes:
77
+ return "❌ No response received"
78
+ success, message = dynamixel.parse_status_packet(bytes(response_bytes))
79
+ if success:
80
+ return f"✅ {message}"
81
+ else:
82
+ return f"❌ {message}"
83
+
84
+
85
+ def get_scene_motor_packets(story_state: dict) -> list:
86
+ """Extract motor commands from current scene and build packets."""
87
+ scenes = story_state["scenes"]
88
+ current_index = story_state["index"]
89
+ if 0 <= current_index < len(scenes):
90
+ scene = scenes[current_index]
91
+ # Build packet for each motor command
92
+ packets = []
93
+ for cmd in scene.motor_commands:
94
+ packet = dxl_build_goal_position_packet(cmd.motor_id, cmd.position)
95
+ packets.append(packet)
96
+ return packets
97
+ return []
98
+
99
+
100
+ def get_scene_audio(story_state: dict) -> Optional[str]:
101
+ """Extract audio file from current scene."""
102
+ scenes = story_state["scenes"]
103
+ current_index = story_state["index"]
104
+ if 0 <= current_index < len(scenes):
105
+ scene = scenes[current_index]
106
+ return scene.audio_file
107
+ return None
108
+
109
+
110
+ def get_scene_robot_pose(story_state: dict) -> Optional[dict]:
111
+ """Extract robot pose from current scene."""
112
+ scenes = story_state["scenes"]
113
+ current_index = story_state["index"]
114
+ if 0 <= current_index < len(scenes):
115
+ scene = scenes[current_index]
116
+ if scene.robot_pose:
117
+ return {
118
+ "target_head_pose": {
119
+ "x": scene.robot_pose.head_x,
120
+ "y": scene.robot_pose.head_y,
121
+ "z": scene.robot_pose.head_z,
122
+ "roll": scene.robot_pose.head_roll,
123
+ "pitch": scene.robot_pose.head_pitch,
124
+ "yaw": scene.robot_pose.head_yaw,
125
+ },
126
+ "target_body_yaw": scene.robot_pose.body_yaw,
127
+ "target_antennas": [scene.robot_pose.antenna_left, scene.robot_pose.antenna_right],
128
+ }
129
+ return None
130
+
131
+
132
+ def synthesize_tone(sample_rate: int = 16000, duration: float = 1.25) -> tuple[int, np.ndarray]:
133
+ """Generate a short confirmation tone to play back as the AI voice."""
134
+ samples = np.linspace(0, duration, int(sample_rate * duration), endpoint=False)
135
+ carrier = np.sin(2 * np.pi * 520 * samples) + 0.4 * np.sin(2 * np.pi * 880 * samples)
136
+ fade_len = int(sample_rate * 0.08)
137
+ envelope = np.ones_like(carrier)
138
+ envelope[:fade_len] *= np.linspace(0.0, 1.0, fade_len)
139
+ envelope[-fade_len:] *= np.linspace(1.0, 0.0, fade_len)
140
+ tone = 0.18 * carrier * envelope
141
+ return sample_rate, tone.astype(np.float32)
142
+
143
+
144
+ def describe_audio_clip(audio: Optional[tuple[int, np.ndarray]]) -> str:
145
+ if audio is None:
146
+ return "No audio captured yet. Hit record to speak with the companion."
147
+ sample_rate, samples = audio
148
+ num_samples = len(samples) if samples is not None else 0
149
+ if num_samples == 0:
150
+ return "Audio appears empty. Please re-record."
151
+ duration = num_samples / float(sample_rate or 1)
152
+ rms = float(np.sqrt(np.mean(np.square(samples))))
153
+ return f"Captured {duration:.2f}s of audio (RMS ~{rms:.3f}). Ready for the AI."
154
+
155
+
156
+ def process_voice_interaction(
157
+ audio: Optional[tuple[int, np.ndarray]], prompt: str
158
+ ) -> tuple[str, Optional[tuple[int, np.ndarray]], str, tuple[int, np.ndarray]]:
159
+ summary = describe_audio_clip(audio)
160
+ user_prompt = (prompt or "React to the current scene.").strip()
161
+ if audio is None:
162
+ ai_line = (
163
+ "AI response pending: record or upload an audio clip so the agent can react."
164
+ )
165
+ response_audio = synthesize_tone()
166
+ return summary, None, ai_line, response_audio
167
+ ai_line = (
168
+ "Imaginary AI companion: I'm using your latest microphone input "
169
+ f"and the prompt \"{user_prompt}\" to craft a response."
170
+ )
171
+ response_audio = synthesize_tone()
172
+ return summary, audio, ai_line, response_audio
173
+
174
+
175
+ def render_scene(
176
+ scene: SceneState, index: int, total: int, variables: dict
177
+ ) -> tuple[str, str, str, bool, bool, bool, bool, Optional[List[Choice]], Optional[InputRequest]]:
178
+ """Generate the HTML stage, dialogue text, and metadata."""
179
+ char_layers = []
180
+ for sprite in scene.characters.values():
181
+ if not sprite.visible:
182
+ continue
183
+ offset = POSITION_OFFSETS.get(sprite.position, "50%")
184
+ # Build class names with animation
185
+ class_names = "character"
186
+ if sprite.animation:
187
+ class_names += f" anim-{sprite.animation}"
188
+ # Apply scale using CSS variable (so animations can use it)
189
+ char_layers.append(
190
+ f"""
191
+ <div class="{class_names}" style="
192
+ left:{offset};
193
+ background-image:url('{sprite.image_url}');
194
+ --char-scale:{sprite.scale};
195
+ " title="{sprite.name}"></div>
196
+ """
197
+ )
198
+ dialogue_markdown = (
199
+ "" if scene.text else ""
200
+ ) # Avoid duplicating the speech bubble content below the stage.
201
+ metadata = f"{scene.background_label or 'Scene'} · {index + 1} / {total}"
202
+ bubble_html = ""
203
+ text_content = (scene.text or "").strip()
204
+
205
+ # Substitute variables in text (e.g., {player_name})
206
+ for var_name, var_value in variables.items():
207
+ text_content = text_content.replace(f"{{{var_name}}}", str(var_value))
208
+
209
+ if text_content:
210
+ speaker_html = (
211
+ f'<div class="bubble-speaker">{scene.speaker}</div>'
212
+ if scene.speaker
213
+ else ""
214
+ )
215
+ bubble_html = f"""
216
+ <div class="speech-bubble">
217
+ {speaker_html}
218
+ <div class="bubble-text">{text_content}</div>
219
+ </div>
220
+ """
221
+ # Apply blur filters to background and stage
222
+ bg_blur_style = f"filter: blur({scene.background_blur}px);" if scene.background_blur > 0 else ""
223
+ stage_blur_style = f"filter: blur({scene.stage_blur}px);" if scene.stage_blur > 0 else ""
224
+
225
+ # Build stage layer HTML if stage image is set
226
+ stage_layer_html = ""
227
+ if scene.stage_url:
228
+ stage_layer_html = f'<div class="stage-layer" style="background-image:url(\'{scene.stage_url}\'); {stage_blur_style}"></div>'
229
+
230
+ stage_html = f"""
231
+ <div class="stage">
232
+ <div class="stage-background" style="background-image:url('{scene.background_url}'); {bg_blur_style}"></div>
233
+ {stage_layer_html}
234
+ {''.join(char_layers)}
235
+ {bubble_html}
236
+ </div>
237
+ """
238
+ return (
239
+ stage_html,
240
+ dialogue_markdown,
241
+ metadata,
242
+ scene.show_camera,
243
+ scene.show_voice,
244
+ scene.show_motors,
245
+ scene.show_robot,
246
+ scene.choices,
247
+ scene.input_request,
248
+ )
249
+
250
+
251
+ def is_scene_accessible(scene: SceneState, active_paths: set) -> bool:
252
+ """Check if a scene is accessible given the active story paths."""
253
+ # Scenes with no path are always accessible (main path)
254
+ if scene.path is None:
255
+ return True
256
+ # Scenes with a specific path are only accessible if that path is active
257
+ return scene.path in active_paths
258
+
259
+
260
+ def change_scene(
261
+ story_state: dict, direction: int
262
+ ) -> tuple[dict, str, str, str, str, dict, str, dict, str, dict, str, dict, dict, str, dict, dict, dict, dict]:
263
+ scenes: List[SceneState] = story_state["scenes"]
264
+ variables = story_state.get("variables", {})
265
+ active_paths = story_state.get("active_paths", set())
266
+
267
+ if not scenes:
268
+ return (
269
+ story_state,
270
+ "",
271
+ "No scenes available.",
272
+ "",
273
+ camera_hint_text(False),
274
+ gr.update(visible=False),
275
+ voice_hint_text(False),
276
+ gr.update(visible=False),
277
+ motor_hint_text(False),
278
+ gr.update(visible=False),
279
+ robot_hint_text(False),
280
+ gr.update(visible=False),
281
+ gr.update(visible=False, choices=[]),
282
+ gr.update(visible=False),
283
+ gr.update(interactive=True),
284
+ gr.update(interactive=True),
285
+ gr.update(visible=False), # right_column
286
+ )
287
+
288
+ total = len(scenes)
289
+ current_index = story_state["index"]
290
+
291
+ # Find the next accessible scene in the given direction
292
+ new_index = current_index
293
+ search_index = current_index + direction
294
+
295
+ while 0 <= search_index < total:
296
+ if is_scene_accessible(scenes[search_index], active_paths):
297
+ new_index = search_index
298
+ break
299
+ search_index += direction
300
+
301
+ story_state["index"] = new_index
302
+ html, dialogue, meta, show_camera, show_voice, show_motors, show_robot, choices, input_req = render_scene(
303
+ scenes[story_state["index"]], story_state["index"], total, variables
304
+ )
305
+
306
+ # Disable navigation when choices or input are present
307
+ nav_enabled = not bool(choices) and not bool(input_req)
308
+
309
+ # Show right column if any feature is active
310
+ right_column_visible = show_camera or show_voice or show_motors or show_robot
311
+
312
+ return (
313
+ story_state,
314
+ html,
315
+ dialogue,
316
+ meta,
317
+ camera_hint_text(show_camera),
318
+ gr.update(visible=show_camera),
319
+ voice_hint_text(show_voice),
320
+ gr.update(visible=show_voice),
321
+ motor_hint_text(show_motors),
322
+ gr.update(visible=show_motors),
323
+ robot_hint_text(show_robot),
324
+ gr.update(visible=show_robot),
325
+ gr.update(visible=bool(choices), choices=[(c.text, i) for i, c in enumerate(choices)] if choices else [], value=None),
326
+ f"### {input_req.prompt}" if input_req else "",
327
+ gr.update(visible=bool(input_req)),
328
+ gr.update(interactive=nav_enabled),
329
+ gr.update(interactive=nav_enabled),
330
+ gr.update(visible=right_column_visible), # right_column
331
+ )
332
+
333
+
334
+ def handle_choice(story_state: dict, choice_index: int) -> tuple[dict, str, str, str, str, dict, str, dict, str, dict, str, dict, dict, str, dict, dict, dict, dict]:
335
+ """Navigate to the scene selected by the choice."""
336
+ scenes: List[SceneState] = story_state["scenes"]
337
+ variables = story_state.get("variables", {})
338
+ active_paths = story_state.get("active_paths", set())
339
+ current_scene = scenes[story_state["index"]]
340
+
341
+ if current_scene.choices and 0 <= choice_index < len(current_scene.choices):
342
+ chosen = current_scene.choices[choice_index]
343
+ story_state["index"] = chosen.next_scene_index
344
+
345
+ # Activate the path of the chosen scene
346
+ target_scene = scenes[chosen.next_scene_index]
347
+ if target_scene.path:
348
+ active_paths = set(active_paths) # Copy the set
349
+ active_paths.add(target_scene.path)
350
+ story_state["active_paths"] = active_paths
351
+
352
+ html, dialogue, meta, show_camera, show_voice, show_motors, show_robot, choices, input_req = render_scene(
353
+ scenes[story_state["index"]], story_state["index"], len(scenes), variables
354
+ )
355
+
356
+ nav_enabled = not bool(choices) and not bool(input_req)
357
+ right_column_visible = show_camera or show_voice or show_motors or show_robot
358
+
359
+ return (
360
+ story_state,
361
+ html,
362
+ dialogue,
363
+ meta,
364
+ camera_hint_text(show_camera),
365
+ gr.update(visible=show_camera),
366
+ voice_hint_text(show_voice),
367
+ gr.update(visible=show_voice),
368
+ motor_hint_text(show_motors),
369
+ gr.update(visible=show_motors),
370
+ robot_hint_text(show_robot),
371
+ gr.update(visible=show_robot),
372
+ gr.update(visible=bool(choices), choices=[(c.text, i) for i, c in enumerate(choices)] if choices else [], value=None),
373
+ f"### {input_req.prompt}" if input_req else "",
374
+ gr.update(visible=bool(input_req)),
375
+ gr.update(interactive=nav_enabled),
376
+ gr.update(interactive=nav_enabled),
377
+ gr.update(visible=right_column_visible), # right_column
378
+ )
379
+ return change_scene(story_state, 0)
380
+
381
+
382
+ def handle_input(story_state: dict, user_input: str) -> tuple[dict, str, str, str, str, dict, str, dict, str, dict, str, dict, dict, str, dict, dict, dict, dict]:
383
+ """Store user input and advance to next scene."""
384
+ scenes: List[SceneState] = story_state["scenes"]
385
+ variables = story_state.get("variables", {})
386
+ current_scene = scenes[story_state["index"]]
387
+
388
+ if current_scene.input_request and user_input:
389
+ variables[current_scene.input_request.variable_name] = user_input
390
+ story_state["variables"] = variables
391
+
392
+ # Advance to next scene
393
+ story_state["index"] = min(story_state["index"] + 1, len(scenes) - 1)
394
+
395
+ html, dialogue, meta, show_camera, show_voice, show_motors, show_robot, choices, input_req = render_scene(
396
+ scenes[story_state["index"]], story_state["index"], len(scenes), variables
397
+ )
398
+
399
+ nav_enabled = not bool(choices) and not bool(input_req)
400
+ right_column_visible = show_camera or show_voice or show_motors or show_robot
401
+
402
+ return (
403
+ story_state,
404
+ html,
405
+ dialogue,
406
+ meta,
407
+ camera_hint_text(show_camera),
408
+ gr.update(visible=show_camera),
409
+ voice_hint_text(show_voice),
410
+ gr.update(visible=show_voice),
411
+ motor_hint_text(show_motors),
412
+ gr.update(visible=show_motors),
413
+ robot_hint_text(show_robot),
414
+ gr.update(visible=show_robot),
415
+ gr.update(visible=bool(choices), choices=[(c.text, i) for i, c in enumerate(choices)] if choices else [], value=None),
416
+ f"### {input_req.prompt}" if input_req else "",
417
+ gr.update(visible=bool(input_req)),
418
+ gr.update(interactive=nav_enabled),
419
+ gr.update(interactive=nav_enabled),
420
+ gr.update(visible=right_column_visible), # right_column
421
+ )
422
+
423
+
424
+ def load_initial_state() -> tuple[dict, str, str, str, str, dict, str, dict, str, dict, str, dict, dict, str, dict, dict, dict, dict]:
425
+ scenes = build_sample_story()
426
+ story_state = {"scenes": scenes, "index": 0, "variables": {}, "active_paths": set()}
427
+ if scenes:
428
+ html, dialogue, meta, show_camera, show_voice, show_motors, show_robot, choices, input_req = render_scene(
429
+ scenes[0], 0, len(scenes), {}
430
+ )
431
+ else:
432
+ html, dialogue, meta, show_camera, show_voice, show_motors, show_robot, choices, input_req = (
433
+ "",
434
+ "No scenes available.",
435
+ "",
436
+ False,
437
+ False,
438
+ False,
439
+ False,
440
+ None,
441
+ None,
442
+ )
443
+
444
+ nav_enabled = not bool(choices) and not bool(input_req)
445
+ right_column_visible = show_camera or show_voice or show_motors or show_robot
446
+
447
+ return (
448
+ story_state,
449
+ html,
450
+ dialogue,
451
+ meta,
452
+ camera_hint_text(show_camera),
453
+ gr.update(visible=show_camera),
454
+ voice_hint_text(show_voice),
455
+ gr.update(visible=show_voice),
456
+ motor_hint_text(show_motors),
457
+ gr.update(visible=show_motors),
458
+ robot_hint_text(show_robot),
459
+ gr.update(visible=show_robot),
460
+ gr.update(visible=bool(choices), choices=[(c.text, i) for i, c in enumerate(choices)] if choices else [], value=None),
461
+ f"### {input_req.prompt}" if input_req else "",
462
+ gr.update(visible=bool(input_req)),
463
+ gr.update(interactive=nav_enabled),
464
+ gr.update(interactive=nav_enabled),
465
+ gr.update(visible=right_column_visible), # right_column
466
+ )
467
+
468
+
469
+ CUSTOM_CSS = """
470
+ /* Override Gradio's height constraints for stage container */
471
+ #stage-container {
472
+ height: auto !important;
473
+ max-height: none !important;
474
+ }
475
+ #stage-container > div {
476
+ height: auto !important;
477
+ }
478
+ .stage {
479
+ width: 100%;
480
+ height: 80vh;
481
+ min-height: 600px;
482
+ border-radius: 0;
483
+ position: relative;
484
+ overflow: hidden;
485
+ box-shadow: 0 12px 32px rgba(15,23,42,0.45);
486
+ display: flex;
487
+ align-items: flex-end;
488
+ justify-content: center;
489
+ }
490
+ /* Ensure background layers fill the stage */
491
+ .stage-background,
492
+ .stage-layer {
493
+ max-height: none !important;
494
+ }
495
+ .stage-background {
496
+ position: absolute;
497
+ top: 0;
498
+ left: 0;
499
+ width: 100%;
500
+ height: 100%;
501
+ background-size: contain;
502
+ background-position: center;
503
+ background-repeat: no-repeat;
504
+ z-index: 0;
505
+ }
506
+ .stage-layer {
507
+ position: absolute;
508
+ top: 0;
509
+ left: 0;
510
+ width: 100%;
511
+ height: 100%;
512
+ background-size: contain;
513
+ background-position: center;
514
+ background-repeat: no-repeat;
515
+ z-index: 5;
516
+ }
517
+ .character {
518
+ position: absolute;
519
+ bottom: 0;
520
+ width: 200px;
521
+ height: 380px;
522
+ background-size: contain;
523
+ background-repeat: no-repeat;
524
+ --char-scale: 1.0;
525
+ transform: translateX(-50%) scale(var(--char-scale));
526
+ transition: transform 0.4s ease;
527
+ z-index: 10;
528
+ }
529
+ /* Character animations */
530
+ .character.anim-idle {
531
+ animation: anim-idle 4s ease-in-out infinite;
532
+ }
533
+ .character.anim-shake {
534
+ animation: anim-shake 0.5s ease-in-out;
535
+ }
536
+ .character.anim-bounce {
537
+ animation: anim-bounce 0.6s ease-in-out;
538
+ }
539
+ .character.anim-pulse {
540
+ animation: anim-pulse 1s ease-in-out infinite;
541
+ }
542
+ .speech-bubble {
543
+ position: absolute;
544
+ bottom: 18px;
545
+ left: 50%;
546
+ transform: translateX(-50%);
547
+ min-width: 60%;
548
+ max-width: 90%;
549
+ padding: 20px 24px;
550
+ border-radius: 20px;
551
+ background: rgba(15,23,42,0.88);
552
+ color: #f8fafc;
553
+ font-family: "Atkinson Hyperlegible", system-ui, sans-serif;
554
+ box-shadow: 0 10px 28px rgba(0,0,0,0.35);
555
+ z-index: 20;
556
+ }
557
+ .speech-bubble::after {
558
+ content: "";
559
+ position: absolute;
560
+ bottom: -16px;
561
+ left: 50%;
562
+ transform: translateX(-50%);
563
+ border-width: 16px 12px 0 12px;
564
+ border-style: solid;
565
+ border-color: rgba(15,23,42,0.88) transparent transparent transparent;
566
+ }
567
+ .bubble-speaker {
568
+ font-size: 0.85rem;
569
+ letter-spacing: 0.08em;
570
+ font-weight: 700;
571
+ text-transform: uppercase;
572
+ color: #facc15;
573
+ margin-bottom: 6px;
574
+ }
575
+ .bubble-text {
576
+ font-size: 1.05rem;
577
+ line-height: 1.5;
578
+ }
579
+ .camera-column {
580
+ position: relative;
581
+ min-height: 360px;
582
+ gap: 0.75rem;
583
+ }
584
+ .camera-hint {
585
+ font-size: 0.85rem;
586
+ color: #cbd5f5;
587
+ margin-bottom: 0.4rem;
588
+ }
589
+ #camera-wrapper {
590
+ width: 100%;
591
+ max-width: 320px;
592
+ }
593
+ #camera-wrapper > div {
594
+ border-radius: 18px;
595
+ background: rgba(15,23,42,0.88);
596
+ padding: 6px;
597
+ box-shadow: 0 12px 26px rgba(15,23,42,0.55);
598
+ }
599
+ #camera-wrapper video {
600
+ border-radius: 14px;
601
+ object-fit: cover;
602
+ box-shadow: 0 10px 30px rgba(0,0,0,0.4);
603
+ }
604
+ .dxl-card {
605
+ margin-top: 0.5rem;
606
+ padding: 1rem 1.2rem;
607
+ border-radius: 14px;
608
+ background: rgba(15,23,42,0.85);
609
+ color: #e2e8f0;
610
+ box-shadow: 0 10px 26px rgba(0,0,0,0.45);
611
+ }
612
+ .dxl-card h3 {
613
+ margin: 0 0 0.35rem 0;
614
+ }
615
+ .dxl-row {
616
+ display: flex;
617
+ gap: 0.6rem;
618
+ align-items: center;
619
+ margin-bottom: 0.5rem;
620
+ flex-wrap: wrap;
621
+ }
622
+ .dxl-row label {
623
+ font-size: 0.9rem;
624
+ color: #cbd5e1;
625
+ }
626
+ .dxl-row input[type="number"],
627
+ .dxl-row select,
628
+ .dxl-row input[type="range"] {
629
+ flex: 1;
630
+ min-width: 120px;
631
+ }
632
+ .dxl-btn {
633
+ padding: 0.5rem 0.8rem;
634
+ border-radius: 10px;
635
+ border: 1px solid rgba(148,163,184,0.4);
636
+ background: rgba(255,255,255,0.05);
637
+ color: #e2e8f0;
638
+ cursor: pointer;
639
+ transition: transform 0.1s ease, background 0.15s ease;
640
+ }
641
+ .dxl-btn.primary {
642
+ background: linear-gradient(120deg, #06b6d4, #2563eb);
643
+ border-color: rgba(59,130,246,0.5);
644
+ }
645
+ .dxl-btn:disabled {
646
+ opacity: 0.5;
647
+ cursor: not-allowed;
648
+ }
649
+ .dxl-btn:not(:disabled):hover {
650
+ transform: translateY(-1px);
651
+ }
652
+ .dxl-status {
653
+ font-size: 0.9rem;
654
+ color: #a5b4fc;
655
+ min-height: 1.2rem;
656
+ }
657
+ .input-prompt {
658
+ font-size: 1.1rem;
659
+ font-weight: 600;
660
+ color: #1e293b;
661
+ margin-bottom: 0.5rem;
662
+ }
663
+ @keyframes anim-idle {
664
+ 0% { transform: translate(-50%, 0px) scale(var(--char-scale)); }
665
+ 50% { transform: translate(-50%, 12px) scale(var(--char-scale)); }
666
+ 100% { transform: translate(-50%, 0px) scale(var(--char-scale)); }
667
+ }
668
+ @keyframes anim-shake {
669
+ 0%, 100% { transform: translate(-50%, 0) rotate(0deg) scale(var(--char-scale)); }
670
+ 10%, 30%, 50%, 70%, 90% { transform: translate(-52%, 0) rotate(-2deg) scale(var(--char-scale)); }
671
+ 20%, 40%, 60%, 80% { transform: translate(-48%, 0) rotate(2deg) scale(var(--char-scale)); }
672
+ }
673
+ @keyframes anim-bounce {
674
+ 0%, 100% { transform: translate(-50%, 0) scale(var(--char-scale)); }
675
+ 25% { transform: translate(-50%, -30px) scale(var(--char-scale)); }
676
+ 50% { transform: translate(-50%, 0) scale(var(--char-scale)); }
677
+ 75% { transform: translate(-50%, -15px) scale(var(--char-scale)); }
678
+ }
679
+ @keyframes anim-pulse {
680
+ 0%, 100% { transform: translate(-50%, 0) scale(var(--char-scale)); }
681
+ 50% { transform: translate(-50%, 0) scale(calc(var(--char-scale) * 1.05)); }
682
+ }
683
+ """
684
+
685
+ ENUMERATE_CAMERAS_JS = """
686
+ async (currentDevices) => {
687
+ if (!navigator.mediaDevices?.enumerateDevices) {
688
+ return currentDevices || [];
689
+ }
690
+ try {
691
+ const devices = await navigator.mediaDevices.enumerateDevices();
692
+ return devices
693
+ .filter((device) => device.kind === "videoinput")
694
+ .map((device, index) => ({
695
+ label: device.label || `Camera ${index + 1}`,
696
+ deviceId: device.deviceId || null,
697
+ }));
698
+ } catch (error) {
699
+ console.warn("enumerateDevices failed", error);
700
+ return currentDevices || [];
701
+ }
702
+ }
703
+ """
704
+
705
+ def load_dxl_script_js() -> str:
706
+ """Generate JavaScript to dynamically load the DXL script from static files."""
707
+ import time
708
+ timestamp = int(time.time())
709
+ return f"""
710
+ () => {{
711
+ const script = document.createElement('script');
712
+ script.type = 'module';
713
+ script.src = '/web/dxl_webserial.js?v={timestamp}';
714
+ script.onerror = () => console.error("[DXL] Failed to load motor control script");
715
+ document.head.appendChild(script);
716
+ }}
717
+ """
718
+
719
+
720
+ def dxl_send_and_receive_js() -> str:
721
+ """JavaScript to send packet bytes and receive response via Web Serial."""
722
+ return """
723
+ async (packet_bytes) => {
724
+ // Check if dxlSerial is available and connected
725
+ if (typeof window.dxlSerial === 'undefined' || !window.dxlSerial) {
726
+ console.error("[DXL] Serial not available - connect first");
727
+ return [];
728
+ }
729
+
730
+ if (!window.dxlSerial.connected) {
731
+ console.error("[DXL] Not connected to serial port");
732
+ return [];
733
+ }
734
+
735
+ try {
736
+ await window.dxlSerial.writeBytes(packet_bytes);
737
+ const response = await window.dxlSerial.readPacket(800);
738
+ return response;
739
+ } catch (err) {
740
+ console.error("[DXL] Communication error:", err.message);
741
+ return [];
742
+ }
743
+ }
744
+ """
745
+
746
+
747
+ def execute_motor_packets_js() -> str:
748
+ """JavaScript to execute pre-built motor packets."""
749
+ return """
750
+ async (packets) => {
751
+ if (!packets || packets.length === 0) {
752
+ return; // No packets to execute
753
+ }
754
+
755
+ // Check if serial is available
756
+ if (typeof window.dxlSerial === 'undefined' || !window.dxlSerial || !window.dxlSerial.connected) {
757
+ return; // Silently skip if not connected
758
+ }
759
+
760
+ // Execute each packet sequentially
761
+ for (const pkt of packets) {
762
+ try {
763
+ await window.dxlSerial.writeBytes(pkt);
764
+ await window.dxlSerial.readPacket(800);
765
+ } catch (err) {
766
+ console.error(`[Motors] Error:`, err.message);
767
+ }
768
+ }
769
+ }
770
+ """
771
+
772
+
773
+ def play_scene_audio_js() -> str:
774
+ """JavaScript to play audio file."""
775
+ return """
776
+ (audio_path) => {
777
+ if (!audio_path || audio_path === '') {
778
+ return; // No audio to play
779
+ }
780
+
781
+ // Create or reuse audio element
782
+ let audio = document.getElementById('scene-audio-player');
783
+ if (!audio) {
784
+ audio = new Audio();
785
+ audio.id = 'scene-audio-player';
786
+ }
787
+
788
+ console.log('[Audio] Playing:', audio_path);
789
+ audio.src = audio_path;
790
+ audio.play().catch(err => console.error('[Audio] Playback failed:', err));
791
+ }
792
+ """
793
+
794
+
795
+ def load_robot_ws_script_js() -> str:
796
+ """JavaScript to initialize WebSocket connection to Reachy Mini robot."""
797
+ return """
798
+ () => {
799
+ console.log('[Robot] Initializing WebSocket connection...');
800
+
801
+ // Define global initialization function if not already defined
802
+ if (!window.loadRobotWebSocket) {
803
+ window.loadRobotWebSocket = function() {
804
+ const hostDiv = document.getElementById('robot-ws-host');
805
+ if (!hostDiv) {
806
+ console.error('[Robot] Cannot initialize - host div not found');
807
+ return;
808
+ }
809
+
810
+ const ROBOT_URL = 'localhost:8000';
811
+ const WS_URL = `ws://${ROBOT_URL}/api/move/ws/set_target`;
812
+
813
+ console.log('[Robot] Connecting to:', WS_URL);
814
+
815
+ // Global robot state
816
+ window.reachyRobot = {
817
+ ws: null,
818
+ connected: false
819
+ };
820
+
821
+ // Create UI
822
+ hostDiv.innerHTML = `
823
+ <div id="robot-connection-status" style="padding: 8px; border-radius: 4px; background: #f8d7da; color: #721c24; margin-bottom: 10px;">
824
+ <span id="robot-status-dot" style="display: inline-block; width: 8px; height: 8px; border-radius: 50%; background: #dc3545; margin-right: 6px;"></span>
825
+ <span id="robot-status-text">Disconnected - Trying to connect...</span>
826
+ </div>
827
+ `;
828
+
829
+ function updateStatus(connected) {
830
+ const statusDiv = document.getElementById('robot-connection-status');
831
+ const dot = document.getElementById('robot-status-dot');
832
+ const text = document.getElementById('robot-status-text');
833
+
834
+ if (connected) {
835
+ statusDiv.style.background = '#d4edda';
836
+ statusDiv.style.color = '#155724';
837
+ dot.style.background = '#28a745';
838
+ dot.style.boxShadow = '0 0 10px #28a745';
839
+ text.textContent = 'Connected to robot';
840
+ } else {
841
+ statusDiv.style.background = '#f8d7da';
842
+ statusDiv.style.color = '#721c24';
843
+ dot.style.background = '#dc3545';
844
+ dot.style.boxShadow = 'none';
845
+ text.textContent = 'Disconnected - Reconnecting...';
846
+ }
847
+ }
848
+
849
+ function connectWebSocket() {
850
+ console.log('[Robot] Connecting to WebSocket:', WS_URL);
851
+
852
+ window.reachyRobot.ws = new WebSocket(WS_URL);
853
+
854
+ window.reachyRobot.ws.onopen = () => {
855
+ console.log('[Robot] WebSocket connected');
856
+ window.reachyRobot.connected = true;
857
+ updateStatus(true);
858
+ };
859
+
860
+ window.reachyRobot.ws.onclose = () => {
861
+ console.log('[Robot] WebSocket disconnected');
862
+ window.reachyRobot.connected = false;
863
+ updateStatus(false);
864
+ // Reconnect after 2 seconds
865
+ setTimeout(connectWebSocket, 2000);
866
+ };
867
+
868
+ window.reachyRobot.ws.onerror = (error) => {
869
+ console.error('[Robot] WebSocket error:', error);
870
+ };
871
+
872
+ window.reachyRobot.ws.onmessage = (event) => {
873
+ try {
874
+ const message = JSON.parse(event.data);
875
+ if (message.status === 'error') {
876
+ console.error('[Robot] Server error:', message.detail);
877
+ }
878
+ } catch (e) {
879
+ console.error('[Robot] Failed to parse message:', e);
880
+ }
881
+ };
882
+ }
883
+
884
+ connectWebSocket();
885
+ }; // End of window.loadRobotWebSocket definition
886
+ }
887
+
888
+ // Try to initialize (with multiple retries)
889
+ let retryCount = 0;
890
+ const maxRetries = 10;
891
+
892
+ function tryInit() {
893
+ const hostDiv = document.getElementById('robot-ws-host');
894
+ if (!hostDiv) {
895
+ retryCount++;
896
+ if (retryCount <= maxRetries) {
897
+ console.warn(`[Robot] Host div not found, retry ${retryCount}/${maxRetries} in 1 second`);
898
+ setTimeout(tryInit, 1000);
899
+ } else {
900
+ console.warn('[Robot] Gave up waiting for robot widget div. Will initialize on first use.');
901
+ }
902
+ return;
903
+ }
904
+
905
+ if (window.reachyRobot) {
906
+ console.log('[Robot] Already initialized');
907
+ return;
908
+ }
909
+
910
+ // Initialize now
911
+ console.log('[Robot] Found host div, initializing...');
912
+ window.loadRobotWebSocket();
913
+ }
914
+
915
+ tryInit();
916
+ }
917
+ """
918
+
919
+
920
+ def send_robot_pose_js() -> str:
921
+ """JavaScript to send robot pose via WebSocket."""
922
+ return """
923
+ async (pose_data) => {
924
+ if (!pose_data) {
925
+ return; // No pose to send
926
+ }
927
+
928
+ // Initialize WebSocket if not already done (lazy initialization)
929
+ if (!window.reachyRobot) {
930
+ console.log('[Robot] Lazy initialization on first pose send');
931
+ if (window.loadRobotWebSocket) {
932
+ window.loadRobotWebSocket();
933
+ // Wait a bit for connection to establish
934
+ await new Promise(resolve => setTimeout(resolve, 500));
935
+ }
936
+ }
937
+
938
+ if (!window.reachyRobot || !window.reachyRobot.connected || !window.reachyRobot.ws || window.reachyRobot.ws.readyState !== WebSocket.OPEN) {
939
+ console.warn('[Robot] WebSocket not connected, skipping pose command');
940
+ return;
941
+ }
942
+
943
+ try {
944
+ console.log('[Robot] Sending pose:', pose_data);
945
+ window.reachyRobot.ws.send(JSON.stringify(pose_data));
946
+ } catch (error) {
947
+ console.error('[Robot] Failed to send pose:', error);
948
+ }
949
+ }
950
+ """
951
+
952
+
953
+
954
+ def build_app() -> gr.Blocks:
955
+ with gr.Blocks(title="Gradio Visual Novel") as demo:
956
+ gr.HTML(f"<style>{CUSTOM_CSS}</style>", elem_id="vn-styles")
957
+ story_state = gr.State()
958
+
959
+ with gr.Row():
960
+ with gr.Column(scale=3, min_width=640):
961
+ stage = gr.HTML(label="Stage", elem_id="stage-container")
962
+ dialogue = gr.Markdown(label="Dialogue")
963
+ meta = gr.Markdown(label="Scene Info", elem_id="scene-info")
964
+
965
+ # Choice selection
966
+ choice_radio = gr.Radio(label="Make a choice", visible=False)
967
+
968
+ # Text input
969
+ with gr.Group(visible=False) as input_group:
970
+ input_prompt = gr.Markdown("", elem_classes=["input-prompt"])
971
+ with gr.Row():
972
+ user_input = gr.Textbox(label="Your answer", scale=4)
973
+ input_submit_btn = gr.Button("Submit", variant="primary", scale=1)
974
+
975
+ with gr.Row():
976
+ prev_btn = gr.Button("⟵ Back", variant="secondary")
977
+ next_btn = gr.Button("Next ⟶", variant="primary")
978
+ with gr.Column(scale=1, min_width=320, elem_classes=["camera-column"], visible=False) as right_column:
979
+ gr.Markdown("### Live Camera (WebRTC)")
980
+ camera_hint = gr.Markdown(
981
+ camera_hint_text(False), elem_classes=["camera-hint"]
982
+ )
983
+ gr.Markdown(
984
+ "Allow camera access when prompted. The webcam appears only in scenes that request it.",
985
+ elem_classes=["camera-hint"],
986
+ )
987
+ with gr.Group(elem_id="camera-wrapper"):
988
+ webrtc_component = WebRTC(
989
+ label="Webcam Stream",
990
+ mode="send-receive",
991
+ modality="video",
992
+ full_screen=False,
993
+ visible=False,
994
+ )
995
+ webrtc_component.stream(
996
+ fn=passthrough_stream,
997
+ inputs=[webrtc_component],
998
+ outputs=[webrtc_component],
999
+ )
1000
+ voice_hint = gr.Markdown(
1001
+ voice_hint_text(False), elem_classes=["camera-hint"]
1002
+ )
1003
+ with gr.Group(visible=False, elem_id="voice-wrapper") as voice_section:
1004
+ with gr.Accordion("Voice & Audio Agent", open=True):
1005
+ gr.Markdown(
1006
+ "Record a short line to pass to your AI companion. "
1007
+ "We play back your clip and a synthetic confirmation tone.",
1008
+ elem_classes=["camera-hint"],
1009
+ )
1010
+ voice_prompt = gr.Textbox(
1011
+ label="Prompt/context",
1012
+ value="React to the current scene with a friendly reply.",
1013
+ lines=2,
1014
+ )
1015
+ mic = gr.Audio(
1016
+ sources=["microphone", "upload"],
1017
+ type="numpy",
1018
+ label="Record or upload audio",
1019
+ )
1020
+ send_voice_btn = gr.Button(
1021
+ "Send to voice agent", variant="secondary"
1022
+ )
1023
+ voice_summary = gr.Markdown("No audio captured yet.")
1024
+ playback = gr.Audio(label="Your recording", interactive=False)
1025
+ ai_voice_text = gr.Markdown("AI response will appear here.")
1026
+ ai_voice_audio = gr.Audio(
1027
+ label="AI voice reply (synthetic tone)", interactive=False
1028
+ )
1029
+ send_voice_btn.click(
1030
+ fn=process_voice_interaction,
1031
+ inputs=[mic, voice_prompt],
1032
+ outputs=[
1033
+ voice_summary,
1034
+ playback,
1035
+ ai_voice_text,
1036
+ ai_voice_audio,
1037
+ ],
1038
+ )
1039
+ motor_hint = gr.Markdown(
1040
+ motor_hint_text(False), elem_classes=["camera-hint"]
1041
+ )
1042
+ with gr.Group(visible=False, elem_id="dxl-panel-container") as motor_group:
1043
+ with gr.Accordion("Dynamixel XL330 Control", open=True):
1044
+ gr.Markdown(
1045
+ "**Web Serial Control** - Use Chrome/Edge desktop. Connect to serial port, then control motors.",
1046
+ elem_classes=["camera-hint"],
1047
+ )
1048
+
1049
+ # Serial connection panel (still handled by JavaScript)
1050
+ gr.HTML('<div id="dxl-panel-host"></div>', elem_id="dxl-panel-host-wrapper")
1051
+
1052
+ # Motor control inputs (Python-based)
1053
+ with gr.Row():
1054
+ motor_id_input = gr.Number(
1055
+ label="Motor ID",
1056
+ value=1,
1057
+ minimum=0,
1058
+ maximum=252,
1059
+ precision=0,
1060
+ )
1061
+ with gr.Row():
1062
+ goal_slider = gr.Slider(
1063
+ label="Goal Position (degrees)",
1064
+ minimum=0,
1065
+ maximum=360,
1066
+ value=90,
1067
+ step=1,
1068
+ )
1069
+ with gr.Row():
1070
+ ping_btn = gr.Button("Ping", size="sm")
1071
+ torque_on_btn = gr.Button("Torque ON", size="sm", variant="secondary")
1072
+ torque_off_btn = gr.Button("Torque OFF", size="sm")
1073
+ with gr.Row():
1074
+ send_goal_btn = gr.Button("Send Goal Position", variant="primary")
1075
+ motor_status = gr.Markdown("Status: Ready")
1076
+
1077
+ # Robot Control (Reachy Mini via WebSocket)
1078
+ robot_hint = gr.Markdown(
1079
+ robot_hint_text(False), elem_classes=["camera-hint"]
1080
+ )
1081
+ with gr.Group(visible=False, elem_id="robot-panel-container") as robot_group:
1082
+ with gr.Accordion("Reachy Mini Robot Control", open=True):
1083
+ gr.Markdown(
1084
+ "**WebSocket Control** - Connects to localhost:8000 for real-time robot control.",
1085
+ elem_classes=["camera-hint"],
1086
+ )
1087
+
1088
+ # WebSocket connection area (will be managed by JavaScript)
1089
+ # Status is shown dynamically by JavaScript inside this div
1090
+ gr.HTML('<div id="robot-ws-host"></div>', elem_id="robot-ws-host-wrapper")
1091
+
1092
+ # Wire up event handlers
1093
+ all_outputs = [
1094
+ story_state,
1095
+ stage,
1096
+ dialogue,
1097
+ meta,
1098
+ camera_hint,
1099
+ webrtc_component,
1100
+ voice_hint,
1101
+ voice_section,
1102
+ motor_hint,
1103
+ motor_group,
1104
+ robot_hint,
1105
+ robot_group,
1106
+ choice_radio,
1107
+ input_prompt,
1108
+ input_group,
1109
+ prev_btn,
1110
+ next_btn,
1111
+ right_column,
1112
+ ]
1113
+
1114
+ # Hidden JSON for passing packet bytes between Python and JavaScript
1115
+ # Note: gr.State doesn't work well with JavaScript, so we use JSON
1116
+ packet_bytes_json = gr.JSON(visible=False, value=[])
1117
+ response_bytes_json = gr.JSON(visible=False, value=[])
1118
+ motor_packets_json = gr.JSON(visible=False, value=[]) # For scene motor commands
1119
+
1120
+ # Hidden textbox for passing audio path to JavaScript
1121
+ audio_path_box = gr.Textbox(visible=False, value="")
1122
+
1123
+ # Hidden JSON for passing robot pose to JavaScript
1124
+ robot_pose_json = gr.JSON(visible=False, value=None)
1125
+
1126
+ # Load initialization scripts
1127
+ combined_init_js = f"""
1128
+ () => {{
1129
+ // Initialize Dynamixel
1130
+ ({load_dxl_script_js()})();
1131
+ // Initialize Robot WebSocket
1132
+ ({load_robot_ws_script_js()})();
1133
+ }}
1134
+ """
1135
+
1136
+ demo.load(
1137
+ fn=load_initial_state,
1138
+ inputs=None,
1139
+ outputs=all_outputs,
1140
+ js=combined_init_js,
1141
+ )
1142
+
1143
+ # Navigation buttons with automatic motor command execution, audio playback, and robot control
1144
+ # Create parallel chains for audio, motors, and robot to ensure all get the updated state
1145
+
1146
+ # Previous button
1147
+ prev_event = prev_btn.click(
1148
+ fn=lambda state: change_scene(state, -1),
1149
+ inputs=story_state,
1150
+ outputs=all_outputs,
1151
+ )
1152
+ # Audio chain
1153
+ prev_event.then(
1154
+ fn=get_scene_audio,
1155
+ inputs=[story_state],
1156
+ outputs=[audio_path_box],
1157
+ ).then(
1158
+ fn=None,
1159
+ inputs=[audio_path_box],
1160
+ outputs=[],
1161
+ js=play_scene_audio_js(),
1162
+ )
1163
+ # Motor chain (parallel)
1164
+ prev_event.then(
1165
+ fn=get_scene_motor_packets,
1166
+ inputs=[story_state],
1167
+ outputs=[motor_packets_json],
1168
+ ).then(
1169
+ fn=None,
1170
+ inputs=[motor_packets_json],
1171
+ outputs=[],
1172
+ js=execute_motor_packets_js(),
1173
+ )
1174
+ # Robot chain (parallel)
1175
+ prev_event.then(
1176
+ fn=get_scene_robot_pose,
1177
+ inputs=[story_state],
1178
+ outputs=[robot_pose_json],
1179
+ ).then(
1180
+ fn=None,
1181
+ inputs=[robot_pose_json],
1182
+ outputs=[],
1183
+ js=send_robot_pose_js(),
1184
+ )
1185
+
1186
+ # Next button
1187
+ next_event = next_btn.click(
1188
+ fn=lambda state: change_scene(state, 1),
1189
+ inputs=story_state,
1190
+ outputs=all_outputs,
1191
+ )
1192
+ # Audio chain
1193
+ next_event.then(
1194
+ fn=get_scene_audio,
1195
+ inputs=[story_state],
1196
+ outputs=[audio_path_box],
1197
+ ).then(
1198
+ fn=None,
1199
+ inputs=[audio_path_box],
1200
+ outputs=[],
1201
+ js=play_scene_audio_js(),
1202
+ )
1203
+ # Motor chain (parallel)
1204
+ next_event.then(
1205
+ fn=get_scene_motor_packets,
1206
+ inputs=[story_state],
1207
+ outputs=[motor_packets_json],
1208
+ ).then(
1209
+ fn=None,
1210
+ inputs=[motor_packets_json],
1211
+ outputs=[],
1212
+ js=execute_motor_packets_js(),
1213
+ )
1214
+ # Robot chain (parallel)
1215
+ next_event.then(
1216
+ fn=get_scene_robot_pose,
1217
+ inputs=[story_state],
1218
+ outputs=[robot_pose_json],
1219
+ ).then(
1220
+ fn=None,
1221
+ inputs=[robot_pose_json],
1222
+ outputs=[],
1223
+ js=send_robot_pose_js(),
1224
+ )
1225
+
1226
+ # Choice handler
1227
+ choice_event = choice_radio.change(
1228
+ fn=handle_choice,
1229
+ inputs=[story_state, choice_radio],
1230
+ outputs=all_outputs,
1231
+ )
1232
+ # Audio chain
1233
+ choice_event.then(
1234
+ fn=get_scene_audio,
1235
+ inputs=[story_state],
1236
+ outputs=[audio_path_box],
1237
+ ).then(
1238
+ fn=None,
1239
+ inputs=[audio_path_box],
1240
+ outputs=[],
1241
+ js=play_scene_audio_js(),
1242
+ )
1243
+ # Motor chain (parallel)
1244
+ choice_event.then(
1245
+ fn=get_scene_motor_packets,
1246
+ inputs=[story_state],
1247
+ outputs=[motor_packets_json],
1248
+ ).then(
1249
+ fn=None,
1250
+ inputs=[motor_packets_json],
1251
+ outputs=[],
1252
+ js=execute_motor_packets_js(),
1253
+ )
1254
+ # Robot chain (parallel)
1255
+ choice_event.then(
1256
+ fn=get_scene_robot_pose,
1257
+ inputs=[story_state],
1258
+ outputs=[robot_pose_json],
1259
+ ).then(
1260
+ fn=None,
1261
+ inputs=[robot_pose_json],
1262
+ outputs=[],
1263
+ js=send_robot_pose_js(),
1264
+ )
1265
+
1266
+ # Input submit button
1267
+ input_submit_event = input_submit_btn.click(
1268
+ fn=handle_input,
1269
+ inputs=[story_state, user_input],
1270
+ outputs=all_outputs,
1271
+ )
1272
+ # Audio chain
1273
+ input_submit_event.then(
1274
+ fn=get_scene_audio,
1275
+ inputs=[story_state],
1276
+ outputs=[audio_path_box],
1277
+ ).then(
1278
+ fn=None,
1279
+ inputs=[audio_path_box],
1280
+ outputs=[],
1281
+ js=play_scene_audio_js(),
1282
+ )
1283
+ # Motor chain (parallel)
1284
+ input_submit_event.then(
1285
+ fn=get_scene_motor_packets,
1286
+ inputs=[story_state],
1287
+ outputs=[motor_packets_json],
1288
+ ).then(
1289
+ fn=None,
1290
+ inputs=[motor_packets_json],
1291
+ outputs=[],
1292
+ js=execute_motor_packets_js(),
1293
+ )
1294
+ # Robot chain (parallel)
1295
+ input_submit_event.then(
1296
+ fn=get_scene_robot_pose,
1297
+ inputs=[story_state],
1298
+ outputs=[robot_pose_json],
1299
+ ).then(
1300
+ fn=None,
1301
+ inputs=[robot_pose_json],
1302
+ outputs=[],
1303
+ js=send_robot_pose_js(),
1304
+ )
1305
+
1306
+ # Input enter key
1307
+ input_enter_event = user_input.submit(
1308
+ fn=handle_input,
1309
+ inputs=[story_state, user_input],
1310
+ outputs=all_outputs,
1311
+ )
1312
+ # Audio chain
1313
+ input_enter_event.then(
1314
+ fn=get_scene_audio,
1315
+ inputs=[story_state],
1316
+ outputs=[audio_path_box],
1317
+ ).then(
1318
+ fn=None,
1319
+ inputs=[audio_path_box],
1320
+ outputs=[],
1321
+ js=play_scene_audio_js(),
1322
+ )
1323
+ # Motor chain (parallel)
1324
+ input_enter_event.then(
1325
+ fn=get_scene_motor_packets,
1326
+ inputs=[story_state],
1327
+ outputs=[motor_packets_json],
1328
+ ).then(
1329
+ fn=None,
1330
+ inputs=[motor_packets_json],
1331
+ outputs=[],
1332
+ js=execute_motor_packets_js(),
1333
+ )
1334
+ # Robot chain (parallel)
1335
+ input_enter_event.then(
1336
+ fn=get_scene_robot_pose,
1337
+ inputs=[story_state],
1338
+ outputs=[robot_pose_json],
1339
+ ).then(
1340
+ fn=None,
1341
+ inputs=[robot_pose_json],
1342
+ outputs=[],
1343
+ js=send_robot_pose_js(),
1344
+ )
1345
+
1346
+ # Motor control event handlers
1347
+ # Pattern: Python builds packet -> JS sends/receives -> Python parses
1348
+
1349
+ # Ping button
1350
+ ping_btn.click(
1351
+ fn=dxl_build_ping_packet,
1352
+ inputs=[motor_id_input],
1353
+ outputs=[packet_bytes_json],
1354
+ ).then(
1355
+ fn=None,
1356
+ inputs=[packet_bytes_json],
1357
+ outputs=[response_bytes_json],
1358
+ js=dxl_send_and_receive_js(),
1359
+ ).then(
1360
+ fn=dxl_parse_response,
1361
+ inputs=[response_bytes_json],
1362
+ outputs=[motor_status],
1363
+ )
1364
+
1365
+ # Torque ON button
1366
+ torque_on_btn.click(
1367
+ fn=lambda motor_id: dxl_build_torque_packet(motor_id, True),
1368
+ inputs=[motor_id_input],
1369
+ outputs=[packet_bytes_json],
1370
+ ).then(
1371
+ fn=None,
1372
+ inputs=[packet_bytes_json],
1373
+ outputs=[response_bytes_json],
1374
+ js=dxl_send_and_receive_js(),
1375
+ ).then(
1376
+ fn=dxl_parse_response,
1377
+ inputs=[response_bytes_json],
1378
+ outputs=[motor_status],
1379
+ )
1380
+
1381
+ # Torque OFF button
1382
+ torque_off_btn.click(
1383
+ fn=lambda motor_id: dxl_build_torque_packet(motor_id, False),
1384
+ inputs=[motor_id_input],
1385
+ outputs=[packet_bytes_json],
1386
+ ).then(
1387
+ fn=None,
1388
+ inputs=[packet_bytes_json],
1389
+ outputs=[response_bytes_json],
1390
+ js=dxl_send_and_receive_js(),
1391
+ ).then(
1392
+ fn=dxl_parse_response,
1393
+ inputs=[response_bytes_json],
1394
+ outputs=[motor_status],
1395
+ )
1396
+
1397
+ # Send goal position button
1398
+ send_goal_btn.click(
1399
+ fn=dxl_build_goal_position_packet,
1400
+ inputs=[motor_id_input, goal_slider],
1401
+ outputs=[packet_bytes_json],
1402
+ ).then(
1403
+ fn=None,
1404
+ inputs=[packet_bytes_json],
1405
+ outputs=[response_bytes_json],
1406
+ js=dxl_send_and_receive_js(),
1407
+ ).then(
1408
+ fn=dxl_parse_response,
1409
+ inputs=[response_bytes_json],
1410
+ outputs=[motor_status],
1411
+ )
1412
+
1413
+ return demo
1414
+
1415
+
1416
+ def main() -> None:
1417
+ """Launch the Visual Novel Gradio app with FastAPI for static file serving."""
1418
+ # Create FastAPI app
1419
+ fastapi_app = FastAPI()
1420
+
1421
+ # Mount static files for assets and web scripts
1422
+ script_dir = os.path.dirname(os.path.abspath(__file__))
1423
+ assets_dir = os.path.join(script_dir, "assets")
1424
+ web_dir = os.path.join(script_dir, "web")
1425
+ fastapi_app.mount("/user-assets", StaticFiles(directory=assets_dir), name="user-assets")
1426
+ fastapi_app.mount("/web", StaticFiles(directory=web_dir), name="web")
1427
+
1428
+ # Build and mount Gradio app
1429
+ gradio_app = build_app()
1430
+ fastapi_app = gr.mount_gradio_app(fastapi_app, gradio_app, path="/")
1431
+
1432
+ # Launch with proper shutdown handling
1433
+ import uvicorn
1434
+ try:
1435
+ uvicorn.run(
1436
+ fastapi_app,
1437
+ host="127.0.0.1",
1438
+ port=7860,
1439
+ log_level="info",
1440
+ timeout_graceful_shutdown=1 # Quick shutdown
1441
+ )
1442
+ except KeyboardInterrupt:
1443
+ print("\n[INFO] Server stopped")
1444
+
1445
+
1446
+
1447
+ if __name__ == "__main__":
1448
+ main()