|
3 | 3 | "nbformat_minor": 0, |
4 | 4 | "metadata": { |
5 | 5 | "colab": { |
6 | | - "gpuType": "T4" |
| 6 | + "gpuType": "T4", |
| 7 | + "provenance": [] |
7 | 8 | }, |
8 | 9 | "kernelspec": { |
9 | 10 | "name": "python3", |
|
153 | 154 | "\n", |
154 | 155 | "print(\"\\n=== Installing server requirements (prefer repo pins if present) ===\")\n", |
155 | 156 | "if Path(\"requirements-nvidia.txt\").exists():\n", |
156 | | - " sh(\"/content/bin/micromamba run -n cb311 pip install -U pip setuptools wheel\", check=True)\n", |
157 | | - " sh(\"/content/bin/micromamba run -n cb311 pip install -r requirements-nvidia.txt\", check=True)\n", |
| 157 | + " sh(\"/content/bin/micromamba run -n cb311 pip install -U pip setuptools wheel\", check=False)\n", |
| 158 | + " sh(\"/content/bin/micromamba run -n cb311 pip install -r requirements-nvidia.txt\", check=False)\n", |
158 | 159 | "else:\n", |
159 | 160 | " sh(\n", |
160 | 161 | " \"/content/bin/micromamba run -n cb311 pip install -U pip setuptools wheel && \"\n", |
161 | 162 | " \"/content/bin/micromamba run -n cb311 pip install \"\n", |
162 | 163 | " \"fastapi 'uvicorn[standard]' pyyaml soundfile librosa safetensors \"\n", |
163 | 164 | " \"python-multipart requests jinja2 watchdog aiofiles unidecode inflect tqdm \"\n", |
164 | 165 | " \"pydub audiotsm praat-parselmouth\",\n", |
165 | | - " check=True\n", |
| 166 | + " check=False\n", |
166 | 167 | " )\n", |
167 | 168 | "\n", |
168 | 169 | "print(\"\\n=== Removing old stdout log ===\")\n", |
|
191 | 192 | ")\n", |
192 | 193 | "\n", |
193 | 194 | "with open(LOG_STDOUT, \"w\", encoding=\"utf-8\", errors=\"replace\") as f:\n", |
194 | | - " import select\n", |
195 | 195 | " shown_link = False\n", |
196 | 196 | " while True:\n", |
197 | | - " # Wait briefly for stdout, but still poll port status regularly\n", |
198 | | - " ready, _, _ = select.select([proc.stdout], [], [], 0.2)\n", |
199 | | - " if ready:\n", |
200 | | - " line = proc.stdout.readline()\n", |
201 | | - " if line:\n", |
202 | | - " print(line, end=\"\")\n", |
203 | | - " f.write(line)\n", |
204 | | - " f.flush()\n", |
205 | | - " else:\n", |
206 | | - " # EOF on pipe\n", |
207 | | - " if proc.poll() is not None:\n", |
208 | | - " print(\"\\n=== Server process exited with code\", proc.returncode, \"===\")\n", |
209 | | - " break\n", |
| 197 | + " line = proc.stdout.readline()\n", |
| 198 | + " if line:\n", |
| 199 | + " print(line, end=\"\")\n", |
| 200 | + " f.write(line)\n", |
| 201 | + " f.flush()\n", |
210 | 202 | "\n", |
211 | 203 | " if (not shown_link) and port_open():\n", |
212 | 204 | " shown_link = True\n", |
|
215 | 207 | " from google.colab.output import serve_kernel_port_as_window\n", |
216 | 208 | " serve_kernel_port_as_window(PORT)\n", |
217 | 209 | "\n", |
| 210 | + " # Verify model load status via server endpoint\n", |
218 | 211 | " try:\n", |
219 | 212 | " mi = requests.get(f\"http://127.0.0.1:{PORT}/api/model-info\", timeout=2).json()\n", |
220 | 213 | " print(\"\\n/api/model-info:\", mi)\n", |
|
223 | 216 | "\n", |
224 | 217 | " if proc.poll() is not None:\n", |
225 | 218 | " print(\"\\n=== Server process exited with code\", proc.returncode, \"===\")\n", |
226 | | - " break\n" |
| 219 | + " break" |
227 | 220 | ], |
228 | 221 | "metadata": { |
229 | 222 | "id": "n8DXZwq1uQEB" |
|
0 commit comments