|
1 | 1 | import logging |
| 2 | +import math |
2 | 3 | from abc import ABC, abstractmethod |
| 4 | +from time import perf_counter |
| 5 | +from uuid import uuid4 |
3 | 6 | from warnings import warn |
4 | 7 |
|
5 | 8 | import pandas as pd |
|
11 | 14 | from moabb.evaluations.utils import ( |
12 | 15 | Emissions, |
13 | 16 | _convert_sklearn_params_to_optuna, |
| 17 | + _create_save_path, |
14 | 18 | _create_scorer, |
15 | 19 | _DictScorer, |
| 20 | + _ensure_fitted, |
| 21 | + _get_nchan, |
| 22 | + _pipeline_requires_epochs, |
| 23 | + _save_model_cv, |
| 24 | + _score_and_update, |
16 | 25 | check_search_available, |
17 | 26 | ) |
18 | 27 | from moabb.paradigms.base import BaseParadigm |
@@ -144,6 +153,11 @@ def __init__( |
144 | 153 | if additional_columns is None: |
145 | 154 | self.additional_columns = [] |
146 | 155 |
|
| 156 | + if self.cv_class is not None and hasattr(self.cv_class, "metadata_columns"): |
| 157 | + for col in self.cv_class.metadata_columns: |
| 158 | + if col not in self.additional_columns: |
| 159 | + self.additional_columns.append(col) |
| 160 | + |
147 | 161 | if self.optuna and not optuna_available: |
148 | 162 | raise ImportError("Optuna is not available. Please install it first.") |
149 | 163 | if (self.time_out != 60 * 15) and not self.optuna: |
@@ -222,9 +236,178 @@ def _resolve_cv(self, default_class, default_kwargs=None): |
222 | 236 | cv_kwargs = {} if default_kwargs is None else dict(default_kwargs) |
223 | 237 | else: |
224 | 238 | cv_class = self.cv_class |
225 | | - cv_kwargs = {} if self.cv_kwargs is None else dict(self.cv_kwargs) |
| 239 | + cv_kwargs = dict(self.cv_kwargs) |
226 | 240 | return cv_class, cv_kwargs |
227 | 241 |
|
| 242 | + def _load_data( |
| 243 | + self, |
| 244 | + dataset, |
| 245 | + run_pipes, |
| 246 | + process_pipeline, |
| 247 | + postprocess_pipeline, |
| 248 | + subjects=None, |
| 249 | + ): |
| 250 | + """Load data for an evaluation, handling epoch requirements. |
| 251 | +
|
| 252 | + Parameters |
| 253 | + ---------- |
| 254 | + dataset : BaseDataset |
| 255 | + The dataset to load. |
| 256 | + run_pipes : dict |
| 257 | + Pipelines to run (used to check epoch requirements). |
| 258 | + process_pipeline : Pipeline |
| 259 | + The processing pipeline. |
| 260 | + postprocess_pipeline : Pipeline | None |
| 261 | + Optional post-processing pipeline. |
| 262 | + subjects : list | None |
| 263 | + List of subjects to load. If None, loads all subjects. |
| 264 | +
|
| 265 | + Returns |
| 266 | + ------- |
| 267 | + X : array-like or Epochs |
| 268 | + The loaded data. |
| 269 | + y : array-like |
| 270 | + The labels. |
| 271 | + metadata : DataFrame |
| 272 | + The metadata. |
| 273 | + """ |
| 274 | + requires_epochs = any( |
| 275 | + _pipeline_requires_epochs(clf) for clf in run_pipes.values() |
| 276 | + ) |
| 277 | + return_epochs = True if requires_epochs else self.return_epochs |
| 278 | + kwargs = dict( |
| 279 | + dataset=dataset, |
| 280 | + return_epochs=return_epochs, |
| 281 | + return_raws=self.return_raws, |
| 282 | + cache_config=self.cache_config, |
| 283 | + postprocess_pipeline=postprocess_pipeline, |
| 284 | + process_pipelines=None if requires_epochs else [process_pipeline], |
| 285 | + ) |
| 286 | + if subjects is not None: |
| 287 | + kwargs["subjects"] = subjects |
| 288 | + return self.paradigm.get_data(**kwargs) |
| 289 | + |
| 290 | + @staticmethod |
| 291 | + def _get_nchan(X): |
| 292 | + """Extract number of channels from data (Epochs or ndarray).""" |
| 293 | + return _get_nchan(X) |
| 294 | + |
| 295 | + def _build_scored_result( |
| 296 | + self, |
| 297 | + dataset, |
| 298 | + subject, |
| 299 | + session, |
| 300 | + pipeline, |
| 301 | + n_samples, |
| 302 | + n_channels, |
| 303 | + duration, |
| 304 | + scorer, |
| 305 | + model, |
| 306 | + X_test, |
| 307 | + y_test, |
| 308 | + split_metadata=None, |
| 309 | + **extra, |
| 310 | + ): |
| 311 | + """Build a result dict and score it in one place.""" |
| 312 | + metadata = {} |
| 313 | + if split_metadata is None: |
| 314 | + splitter = getattr(getattr(self, "cv", None), "_current_splitter", None) |
| 315 | + if splitter is not None and hasattr(splitter, "get_metadata"): |
| 316 | + split_metadata = splitter.get_metadata() |
| 317 | + if split_metadata: |
| 318 | + metadata.update(split_metadata) |
| 319 | + metadata.update(extra) |
| 320 | + res = self._build_result( |
| 321 | + dataset, |
| 322 | + subject, |
| 323 | + session, |
| 324 | + pipeline, |
| 325 | + n_samples, |
| 326 | + n_channels, |
| 327 | + duration, |
| 328 | + **metadata, |
| 329 | + ) |
| 330 | + try: |
| 331 | + return _score_and_update(res, scorer, model, X_test, y_test) |
| 332 | + except ValueError as err: |
| 333 | + if self.error_score == "raise": |
| 334 | + raise err |
| 335 | + res["score"] = self.error_score |
| 336 | + return res |
| 337 | + |
| 338 | + def _fit_cv(self, model, X_train, y_train, tracker=None): |
| 339 | + """Fit a model for a CV fold with optional CodeCarbon tracking.""" |
| 340 | + task_name = None |
| 341 | + emissions = math.nan |
| 342 | + if tracker is not None: |
| 343 | + task_name = str(uuid4()) |
| 344 | + tracker.start_task(task_name) |
| 345 | + t_start = perf_counter() |
| 346 | + model.fit(X_train, y_train) |
| 347 | + duration = perf_counter() - t_start |
| 348 | + if tracker is not None: |
| 349 | + emissions_data = tracker.stop_task() |
| 350 | + emissions = emissions_data.emissions if emissions_data else math.nan |
| 351 | + _ensure_fitted(model) |
| 352 | + return duration, emissions, task_name |
| 353 | + |
| 354 | + def _maybe_save_model_cv( |
| 355 | + self, model, dataset, subject, session, name, cv_ind, eval_type |
| 356 | + ): |
| 357 | + """Save model for a CV fold when saving is enabled.""" |
| 358 | + if self.hdf5_path is None or not self.save_model: |
| 359 | + return |
| 360 | + model_save_path = _create_save_path( |
| 361 | + hdf5_path=self.hdf5_path, |
| 362 | + code=dataset.code, |
| 363 | + subject=subject, |
| 364 | + session=session, |
| 365 | + name=name, |
| 366 | + grid=self.search, |
| 367 | + eval_type=eval_type, |
| 368 | + ) |
| 369 | + _save_model_cv(model=model, save_path=model_save_path, cv_index=str(cv_ind)) |
| 370 | + |
| 371 | + @staticmethod |
| 372 | + def _attach_emissions(res, emissions, task_name): |
| 373 | + res["carbon_emission"] = (1000 * emissions,) |
| 374 | + res["codecarbon_task_name"] = task_name |
| 375 | + |
| 376 | + def _build_result( |
| 377 | + self, |
| 378 | + dataset, |
| 379 | + subject, |
| 380 | + session, |
| 381 | + pipeline, |
| 382 | + n_samples, |
| 383 | + n_channels, |
| 384 | + duration, |
| 385 | + **extra, |
| 386 | + ): |
| 387 | + """Build a result dictionary with all required columns. |
| 388 | +
|
| 389 | + This is the single place where the evaluation result schema is defined. |
| 390 | + All evaluation subclasses should use this instead of constructing the |
| 391 | + dict manually, so the schema stays consistent when columns are added |
| 392 | + or evaluations are merged. |
| 393 | +
|
| 394 | + Any ``additional_columns`` not provided via *extra* are defaulted to |
| 395 | + NaN so that ``Results.add()`` never fails on a missing key. |
| 396 | + """ |
| 397 | + res = { |
| 398 | + "time": duration, |
| 399 | + "dataset": dataset, |
| 400 | + "subject": subject, |
| 401 | + "session": session, |
| 402 | + "n_samples": n_samples, |
| 403 | + "n_channels": n_channels, |
| 404 | + "pipeline": pipeline, |
| 405 | + } |
| 406 | + for col in self.additional_columns: |
| 407 | + if col not in res: |
| 408 | + res[col] = extra.get(col, math.nan) |
| 409 | + return res |
| 410 | + |
228 | 411 | def process(self, pipelines, param_grid=None, postprocess_pipeline=None): |
229 | 412 | """Runs all pipelines on all datasets. |
230 | 413 |
|
|
0 commit comments