diff --git a/.github/copilot-instructions.md b/.github/copilot-instructions.md new file mode 100644 index 0000000..88b9004 --- /dev/null +++ b/.github/copilot-instructions.md @@ -0,0 +1,23 @@ +# ASL toolkit Copilot Instructions + +- Focus on the `asltk` Python library for Arterial Spin Labeling (ASL) MRI processing. +- Prefer code and APIs from the workspace (e.g., `asltk.asldata`, `asltk.reconstruction`, `asltk.utils`). +- Use concise, clear bullet points and code examples. +- Reference workspace files and symbols with links when possible. +- Follow the project's coding style and documentation patterns (Google-style docstrings). +- Suggest improvements or fixes based on the workspace context. +- Avoid general Python advice unless relevant to the workspace. +- Respect the project's contribution guidelines and code of conduct. +- Highlight available scripts in `asltk/scripts` for common workflows. +- Use supported image formats: `.nii`, `.nii.gz`, `.mha`, `.nrrd`. +- Ensure that the code is syntactically correct and adheres to the project's coding standards. +- Be sure about the documentation and comments. They should be clear and concise and use the correct Python docstring format. +- Create commit messages with a detailed description of the changes made, including any bug fixes or new features. +- Be as much specific as possible in the commit messages, including the files affected and the nature of the changes. +- Uses for commit messages prefixes the following pattern: + - `ENH:` for new features and code enhancements + - `BUG:` for bug fixes and general corrections + - `DOC:` for documentation changes + - `STY:` for formatting changes (not affecting code logic) + - `TEST:` for adding or modifying tests + diff --git a/.github/copilot_instructions.md b/.github/copilot_instructions.md deleted file mode 100644 index 9ec3c10..0000000 --- a/.github/copilot_instructions.md +++ /dev/null @@ -1,12 +0,0 @@ -# Copilot Instructions - -## Code Commit Guidelines -- Ensure that the code is syntactically correct and adheres to the project's coding standards. -- Be sure about the documentation and comments. They should be clear and concise and use the correct Python docstring format. -- Create commit messages with a detailed description of the changes made, including any bug fixes or new features. -- Uses for commit messages prefixes the following pattern: - - `ENH:` for new features and code enhancements - - `BUG:` for bug fixes and general corrections - - `DOC:` for documentation changes - - `STY:` for formatting changes (not affecting code logic) - - `TEST:` for adding or modifying tests \ No newline at end of file diff --git a/.github/workflows/ci_develop.yaml b/.github/workflows/ci_develop.yaml index fdd9b0d..984c84f 100644 --- a/.github/workflows/ci_develop.yaml +++ b/.github/workflows/ci_develop.yaml @@ -1,16 +1,17 @@ -name: ASLtk Continuous Integration for Develop Branch +name: CI for Develop Branch on: push: branches: [ develop ] pull_request: branches: [ develop ] + workflow_dispatch: jobs: linux: runs-on: ubuntu-latest strategy: matrix: - python-version: ["3.9", "3.10"] + python-version: ["3.9"] steps: @@ -32,7 +33,7 @@ jobs: run: poetry install - name: Run code formatting check - run: poetry run task lint + run: poetry run task lint-check - name: Run project tests run: poetry run task test --cov-report=xml --ignore-glob='./asltk/scripts/*.py' @@ -47,7 +48,7 @@ jobs: runs-on: windows-latest strategy: matrix: - python-version: ["3.9", "3.10"] + python-version: ["3.9"] steps: - name: Clone repo @@ -69,7 +70,7 @@ jobs: run: poetry install - name: Run code formatting check - run: poetry run task lint + run: poetry run task lint-check - name: Run project tests run: poetry run task test --cov-report=xml --ignore-glob='./asltk/scripts/*.py' @@ -84,7 +85,7 @@ jobs: runs-on: macos-latest strategy: matrix: - python-version: ["3.9", "3.10"] + python-version: ["3.9"] steps: - name: Clone repo @@ -103,7 +104,7 @@ jobs: run: poetry install - name: Run code formatting check - run: poetry run task lint + run: poetry run task lint-check - name: Run project tests run: poetry run task test --cov-report=xml --ignore-glob='./asltk/scripts/*.py' diff --git a/.github/workflows/ci_main.yaml b/.github/workflows/ci_main.yaml index cc3cf58..af23618 100644 --- a/.github/workflows/ci_main.yaml +++ b/.github/workflows/ci_main.yaml @@ -1,4 +1,4 @@ -name: ASLtk Continuous Integration for Production Branch +name: CI for Production Branch on: push: branches: [ main ] @@ -10,7 +10,7 @@ jobs: runs-on: ubuntu-latest strategy: matrix: - python-version: ["3.9", "3.10"] + python-version: ["3.9"] steps: @@ -32,7 +32,7 @@ jobs: run: poetry install - name: Run code formatting check - run: poetry run task lint + run: poetry run task lint-check - name: Run project tests run: poetry run task test --cov-report=xml --ignore-glob='./asltk/scripts/*.py' @@ -47,7 +47,7 @@ jobs: runs-on: windows-latest strategy: matrix: - python-version: ["3.9", "3.10"] + python-version: ["3.9"] steps: - name: Clone repo @@ -69,7 +69,7 @@ jobs: run: poetry install - name: Run code formatting check - run: poetry run task lint + run: poetry run task lint-check - name: Run project tests run: poetry run task test --cov-report=xml --ignore-glob='./asltk/scripts/*.py' @@ -84,7 +84,7 @@ jobs: runs-on: macos-latest strategy: matrix: - python-version: ["3.9", "3.10"] + python-version: ["3.9"] steps: - name: Clone repo @@ -103,7 +103,7 @@ jobs: run: poetry install - name: Run code formatting check - run: poetry run task lint + run: poetry run task lint-check - name: Run project tests run: poetry run task test --cov-report=xml --ignore-glob='./asltk/scripts/*.py' diff --git a/.vscode/settings.json b/.vscode/settings.json new file mode 100644 index 0000000..0dedccd --- /dev/null +++ b/.vscode/settings.json @@ -0,0 +1,12 @@ +{ + "python.testing.pytestArgs": [ + "tests" + ], + "python.testing.unittestEnabled": false, + "python.testing.pytestEnabled": true, + "github.copilot.chat.commitMessageGeneration.instructions": [ + { + "text": "Use conventional commit message format and be as precise and clear as possible. Try to include relevante information that will be useful to create the project realease note. Also use the following prefix patters: ENH: for commits that gives general enhancements and improvements in the code, DOC: commits that give majority documentation and explanations contributios, BUG: commits that fixes bugs or general errors, STY: commits that adjust code styling." + } + ] +} \ No newline at end of file diff --git a/asltk/__init__.py b/asltk/__init__.py index b0456de..6ec7c71 100644 --- a/asltk/__init__.py +++ b/asltk/__init__.py @@ -1,6 +1,11 @@ +import os + BIDS_IMAGE_FORMATS = ('.nii', '.nii.gz') AVAILABLE_IMAGE_FORMATS = ('.nii', '.nii.gz', '.mha', '.nrrd') +PARCELLATION_REPORT_PATH = os.path.join( + os.path.expanduser('~'), 'asltk', os.path.sep, 'parcellation_reports' +) # Import logging functionality for easy access from .logging_config import configure_for_scripts, get_logger, setup_logging diff --git a/asltk/asldata.py b/asltk/asldata.py index 3c227d5..2b83b4f 100644 --- a/asltk/asldata.py +++ b/asltk/asldata.py @@ -1,9 +1,12 @@ +import copy import os +import warnings import numpy as np -from asltk.logging_config import get_logger, log_data_info -from asltk.utils import load_image +from asltk.logging_config import get_logger, log_data_info, log_function_call +from asltk.utils.image_manipulation import collect_data_volumes +from asltk.utils.io import load_image class ASLData: @@ -13,21 +16,19 @@ def __init__( ): """ASLData constructor - The basic data need to represent a ASL data is the full path to load - the image file, the Labeling Duration (LD) array and the Post-labeling - Delay (PLD) array. Is none of those information is passed, a null - ASLData object is created, which can be further been fed using the - get/set methods. + The basic data needed to represent ASL data are: + - The full path to load the image file + - The Labeling Duration (LD) array + - The Post-labeling Delay (PLD) array - The constructor is generic for classic ASL data and also for multi-TE - and Diffusion-Weighted (DW) ASL protocols. There is a specfic get/set - method for TE/DW data. If TE/DW is not provided, then it is assumed as - type `None` for those data properties. In order to informs the TE or DW - values in the object instance, you can use the tags `te_values` or - `dw_values` in the construction call + If none of these are provided, a null ASLData object is created, which can be further populated using the get/set methods. + + The constructor supports classic ASL data, multi-TE, and Diffusion-Weighted (DW) ASL protocols. + There are specific get/set methods for TE/DW data. If TE/DW is not provided, those properties are set to `None`. + To provide TE or DW values, use the `te_values` or `dw_values` keyword arguments. Examples: - By default, the LD and PLD arrays are indicated (as empty lists) + By default, the LD and PLD arrays are empty lists. >>> data = ASLData() >>> data.get_ld() @@ -35,21 +36,22 @@ def __init__( >>> data.get_pld() [] - >>> data = ASLData(te_values=[13.0,20.2,50.5,90.5,125.2]) + >>> data = ASLData(te_values=[13.0, 20.2, 50.5, 90.5, 125.2]) >>> data.get_te() [13.0, 20.2, 50.5, 90.5, 125.2] - >>> data = ASLData(dw_values=[13.0,20.2,50.5,90.5,125.2]) + >>> data = ASLData(dw_values=[13.0, 20.2, 50.5, 90.5, 125.2]) >>> data.get_dw() [13.0, 20.2, 50.5, 90.5, 125.2] - Other parameters: Set the ASL data parameters + Other parameters: pcasl (str, optional): The ASL data full path with filename. Defaults to ''. m0 (str, optional): The M0 data full path with filename. Defaults to ''. ld_values (list, optional): The LD values. Defaults to []. pld_values (list, optional): The PLD values. Defaults to []. te_values (list, optional): The TE values. Defaults to None. dw_values (list, optional): The DW values. Defaults to None. + average_m0 (bool, optional): If True, average the M0 image across the first dimension. This may be helpful for MRI acquisitions that collect an subset sample of M0 volumes and take the average of it. Defaults to False. """ self._asl_image = None self._m0_image = None @@ -64,18 +66,43 @@ def __init__( logger.info('Creating ASLData object') if kwargs.get('pcasl') is not None: - pcasl_path = kwargs.get('pcasl') - logger.info(f'Loading ASL image from: {pcasl_path}') - self._asl_image = load_image(pcasl_path) - if self._asl_image is not None: - log_data_info('ASL image', self._asl_image.shape, pcasl_path) + if isinstance(kwargs.get('pcasl'), str): + pcasl_path = kwargs.get('pcasl') + logger.info(f'Loading ASL image from: {pcasl_path}') + self._asl_image = load_image(pcasl_path) + if self._asl_image is not None: + log_data_info( + 'ASL image', self._asl_image.shape, pcasl_path + ) + elif isinstance(kwargs.get('pcasl'), np.ndarray): + self._asl_image = kwargs.get('pcasl') + logger.info('ASL image loaded as numpy array') + log_data_info( + 'ASL image', self._asl_image.shape, 'numpy array' + ) if kwargs.get('m0') is not None: - m0_path = kwargs.get('m0') - logger.info(f'Loading M0 image from: {m0_path}') - self._m0_image = load_image(m0_path) - if self._m0_image is not None: - log_data_info('M0 image', self._m0_image.shape, m0_path) + if isinstance(kwargs.get('m0'), str): + m0_path = kwargs.get('m0') + logger.info(f'Loading M0 image from: {m0_path}') + self._m0_image = load_image(m0_path) + + # Check if M0 image is 4D and warn if so + if ( + self._m0_image is not None + and len(self._m0_image.shape) > 3 + ): + warnings.warn('M0 image has more than 3 dimensions.') + + if self._m0_image is not None: + log_data_info('M0 image', self._m0_image.shape, m0_path) + elif isinstance(kwargs.get('m0'), np.ndarray): + self._m0_image = kwargs.get('m0') + logger.info('M0 image loaded as numpy array') + log_data_info('M0 image', self._m0_image.shape, 'numpy array') + + if kwargs.get('average_m0', False): + self._m0_image = np.mean(self._m0_image, axis=0) self._parameters['ld'] = ( [] if kwargs.get('ld_values') is None else kwargs.get('ld_values') @@ -142,6 +169,11 @@ def set_image(self, image, spec: str): self._m0_image = image elif spec == 'pcasl': self._asl_image = image + else: + raise ValueError( + f'Invalid image type or path: {image}. ' + 'Please provide a valid file path or a numpy array.' + ) def get_ld(self): """Obtain the LD array values""" @@ -215,6 +247,30 @@ def set_dw(self, dw_values: list): self._check_input_parameter(dw_values, 'DW') self._parameters['dw'] = dw_values + def copy(self): + """ + Make a copy of the ASLData object. + This method creates a deep copy of the ASLData object, including all + its attributes and data. It is useful when you want to preserve the + original object while working with a modified version. + + Note: + This method uses `copy.deepcopy` to ensure that all nested objects + are also copied, preventing any unintended side effects from + modifying the original object. + + Examples: + >>> data = ASLData(pcasl='./tests/files/t1-mri.nrrd') + >>> data_copy = data.copy() + >>> type(data_copy) + + + + Returns: + ASLData: A new instance of ASLData that is a deep copy of the original object. + """ + return copy.deepcopy(self) + def __call__(self, spec: str): """Object caller to expose the image data. @@ -234,6 +290,20 @@ def __call__(self, spec: str): elif spec == 'm0': return self._m0_image + def __len__(self): + """Return the number of volumes in the ASL data. + + This method returns the number of volumes in the ASL data based on + the pCASL image format. + + Returns: + int: The number of volumes in the ASL data considering the `pcasl` data. + """ + if self._asl_image is not None: + return len(collect_data_volumes(self._asl_image)[0]) + else: + return 0 + def _check_input_parameter(self, values, param_type): for v in values: if not isinstance(v, int) and not isinstance(v, float): @@ -255,3 +325,11 @@ def _check_ld_pld_sizes(self, ld, pld): logger.debug( f'LD and PLD size validation passed: {len(ld)} elements each' ) + + def _check_m0_dimension(self): + if len(self._m0_image.shape) > 3: + warnings.warn( + 'M0 image has more than 3 dimensions. ' + 'This may cause issues in processing. ' + 'Consider averaging the M0 image across the first dimension.' + ) diff --git a/asltk/data/__init__.py b/asltk/data/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/asltk/data/brain_atlas/AAL32024.json b/asltk/data/brain_atlas/AAL32024.json new file mode 100644 index 0000000..65f387b --- /dev/null +++ b/asltk/data/brain_atlas/AAL32024.json @@ -0,0 +1,180 @@ +{ + "atlas_name": "Automated Anatomical Labeling (AAL3) - 2024", + "dataset_url": "loamri/brain-atlas-aal32024", + "official_url": "https://www.gin.cnrs.fr/en/tools/aal/", + "description": "The automated anatomical parcellation AAL3 of the spatially normalized single-subject high-resolution T1 volume provided by the Montreal Neurological Institute (MNI).", + "dataset_doi": "10.34740/kaggle/dsv/12200283", + "citation_doi": [ + "10.1006/nimg.2001.0978", + "10.1016/j.neuroimage.2015.07.075", + "10.1006/nimg.2001.0978" + ], + "labels": { + "1": "Precentral_L", + "2": "Precentral_R", + "3": "Frontal_Sup_2_L", + "4": "Frontal_Sup_2_R", + "5": "Frontal_Mid_2_L", + "6": "Frontal_Mid_2_R", + "7": "Frontal_Inf_Oper_L", + "8": "Frontal_Inf_Oper_R", + "9": "Frontal_Inf_Tri_L", + "10": "Frontal_Inf_Tri_R", + "11": "Frontal_Inf_Orb_2_L", + "12": "Frontal_Inf_Orb_2_R", + "13": "Rolandic_Oper_L", + "14": "Rolandic_Oper_R", + "15": "Supp_Motor_Area_L", + "16": "Supp_Motor_Area_R", + "17": "Olfactory_L", + "18": "Olfactory_R", + "19": "Frontal_Sup_Medial_L", + "20": "Frontal_Sup_Medial_R", + "21": "Frontal_Med_Orb_L", + "22": "Frontal_Med_Orb_R", + "23": "Rectus_L", + "24": "Rectus_R", + "25": "OFCmed_L", + "26": "OFCmed_R", + "27": "OFCant_L", + "28": "OFCant_R", + "29": "OFCpost_L", + "30": "OFCpost_R", + "31": "OFClat_L", + "32": "OFClat_R", + "33": "Insula_L", + "34": "Insula_R", + "37": "Cingulate_Mid_L", + "38": "Cingulate_Mid_R", + "39": "Cingulate_Post_L", + "40": "Cingulate_Post_R", + "41": "Hippocampus_L", + "42": "Hippocampus_R", + "43": "ParaHippocampal_L", + "44": "ParaHippocampal_R", + "45": "Amygdala_L", + "46": "Amygdala_R", + "47": "Calcarine_L", + "48": "Calcarine_R", + "49": "Cuneus_L", + "50": "Cuneus_R", + "51": "Lingual_L", + "52": "Lingual_R", + "53": "Occipital_Sup_L", + "54": "Occipital_Sup_R", + "55": "Occipital_Mid_L", + "56": "Occipital_Mid_R", + "57": "Occipital_Inf_L", + "58": "Occipital_Inf_R", + "59": "Fusiform_L", + "60": "Fusiform_R", + "61": "Postcentral_L", + "62": "Postcentral_R", + "63": "Parietal_Sup_L", + "64": "Parietal_Sup_R", + "65": "Parietal_Inf_L", + "66": "Parietal_Inf_R", + "67": "SupraMarginal_L", + "68": "SupraMarginal_R", + "69": "Angular_L", + "70": "Angular_R", + "71": "Precuneus_L", + "72": "Precuneus_R", + "73": "Paracentral_Lobule_L", + "74": "Paracentral_Lobule_R", + "75": "Caudate_L", + "76": "Caudate_R", + "77": "Putamen_L", + "78": "Putamen_R", + "79": "Pallidum_L", + "80": "Pallidum_R", + "83": "Heschl_L", + "84": "Heschl_R", + "85": "Temporal_Sup_L", + "86": "Temporal_Sup_R", + "87": "Temporal_Pole_Sup_L", + "88": "Temporal_Pole_Sup_R", + "89": "Temporal_Mid_L", + "90": "Temporal_Mid_R", + "91": "Temporal_Pole_Mid_L", + "92": "Temporal_Pole_Mid_R", + "93": "Temporal_Inf_L", + "94": "Temporal_Inf_R", + "95": "Cerebellum_Crus1_L", + "96": "Cerebellum_Crus1_R", + "97": "Cerebellum_Crus2_L", + "98": "Cerebellum_Crus2_R", + "99": "Cerebellum_3_L", + "100": "Cerebellum_3_R", + "101": "Cerebellum_4_5_L", + "102": "Cerebellum_4_5_R", + "103": "Cerebellum_6_L", + "104": "Cerebellum_6_R", + "105": "Cerebellum_7b_L", + "106": "Cerebellum_7b_R", + "107": "Cerebellum_8_L", + "108": "Cerebellum_8_R", + "109": "Cerebellum_9_L", + "110": "Cerebellum_9_R", + "111": "Cerebellum_10_L", + "112": "Cerebellum_10_R", + "113": "Vermis_1_2", + "114": "Vermis_3", + "115": "Vermis_4_5", + "116": "Vermis_6", + "117": "Vermis_7", + "118": "Vermis_8", + "119": "Vermis_9", + "120": "Vermis_10", + "121": "Thal_AV_L", + "122": "Thal_AV_R", + "123": "Thal_LP_L", + "124": "Thal_LP_R", + "125": "Thal_VA_L", + "126": "Thal_VA_R", + "127": "Thal_VL_L", + "128": "Thal_VL_R", + "129": "Thal_VPL_L", + "130": "Thal_VPL_R", + "131": "Thal_IL_L", + "132": "Thal_IL_R", + "133": "Thal_Re_L", + "134": "Thal_Re_R", + "135": "Thal_MDm_L", + "136": "Thal_MDm_R", + "137": "Thal_MDl_L", + "138": "Thal_MDl_R", + "139": "Thal_LGN_L", + "140": "Thal_LGN_R", + "141": "Thal_MGN_L", + "142": "Thal_MGN_R", + "143": "Thal_PuI_L", + "144": "Thal_PuI_R", + "145": "Thal_PuM_L", + "146": "Thal_PuM_R", + "147": "Thal_PuA_L", + "148": "Thal_PuA_R", + "149": "Thal_PuL_L", + "150": "Thal_PuL_R", + "151": "ACC_sub_L", + "152": "ACC_sub_R", + "153": "ACC_pre_L", + "154": "ACC_pre_R", + "155": "ACC_sup_L", + "156": "ACC_sup_R", + "157": "N_Acc_L", + "158": "N_Acc_R", + "159": "VTA_L", + "160": "VTA_R", + "161": "SN_pc_L", + "162": "SN_pc_R", + "163": "SN_pr_L", + "164": "SN_pr_R", + "165": "Red_N_L", + "166": "Red_N_R", + "167": "LC_L", + "168": "LC_R", + "169": "Raphe_D", + "170": "Raphe_M" + } + } \ No newline at end of file diff --git a/asltk/data/brain_atlas/AAT2022.json b/asltk/data/brain_atlas/AAT2022.json new file mode 100644 index 0000000..44313f7 --- /dev/null +++ b/asltk/data/brain_atlas/AAT2022.json @@ -0,0 +1,42 @@ + { + "atlas_name": "Arterial Atlas Territories of the Human Brain - 2022", + "dataset_url": "loamri/brain-atlas-aat2022", + "official_url": "https://www.nitrc.org/projects/arterialatlas", + "description": "atlas of brain arterial territories based on lesion distributions in 1,298 acute stroke patients.", + "dataset_doi": "10.34740/kaggle/dsv/12200370", + "citation_doi": ["10.1038/s41597-022-01923-0"], + "labels": { + "1": "anterior cerebral artery left (ACAL)", + "2": "anterior cerebral artery right (ACAR)", + "3": "medial lenticulostriate left (MLSL)", + "4": "medial lenticulostriate right (MLSR)", + "5": "lateral lenticulostriate left (LLSL)", + "6": "lateral lenticulostriate right (LLSR)", + "7": "frontal pars of middle cerebral artery left (MCAFL)", + "8": "frontal pars of middle cerebral artery right (MCAFR)", + "9": "parietal pars of middle cerebral artery left (MCAPL)", + "10": "parietal pars of middle cerebral artery right (MCAPL)", + "11": "temporal pars of middle cerebral artery left (MCATL)", + "12": "temporal pars of middle cerebral artery right (MCATL)", + "13": "occipital pars of middle cerebral artery left (MCAOL)", + "14": "occipital pars of middle cerebral artery right (MCAOL)", + "15": "insular pars of middle cerebral artery left (MCAIL)", + "16": "insular pars of middle cerebral artery right (MCAIL)", + "17": "temporal pars of posterior cerebral artery left (PCATL)", + "18": "temporal pars of posterior cerebral artery right (PCATR)", + "19": "occipital pars of posterior cerebral artery left (PCAOL)", + "20": "occipital pars of posterior cerebral artery right (PCAOR)", + "21": "posterior choroidal and thalamoperfurators left (PCTPL)", + "22": "posterior choroidal and thalamoperfurators right (PCTPR)", + "23": "anteior choroidal and thalamoperfurators left (ACTPL)", + "24": "anterior choroidal and thalamoperfurators right (ACTPR)", + "25": "basilar left (BL)", + "26": "basilar right (BR)", + "27": "superior cerebellar left (SCL)", + "28": "superior cerebellar right (SCR)", + "29": "inferior cerebellar left (ICL)", + "30": "inferior cerebellar right (ICR)", + "31": "lateral ventricle left (LVL)", + "32": "lateral ventricle right (LVR)" + } + } \ No newline at end of file diff --git a/asltk/data/brain_atlas/AICHA2021.json b/asltk/data/brain_atlas/AICHA2021.json new file mode 100644 index 0000000..fff5c69 --- /dev/null +++ b/asltk/data/brain_atlas/AICHA2021.json @@ -0,0 +1,202 @@ +{ + "atlas_name": "AICHA: An atlas of intrinsic connectivity of homotopic areas", + "dataset_url": "loamri/brain-atlas-aicha2021", + "official_url": "https://www.gin.cnrs.fr/en/tools/aicha/", + "description": "AICHA (Atlas of Intrinsic Connectivity of Homotopic Areas) a functional brain ROIs atlas based on resting-state fMRI data acquired in 281 individuals. AICHA ROIs cover the whole cerebrum, each having 1- homogeneity of its constituting voxels intrinsic activity, and 2- a unique homotopic contralateral counterpart with which it has maximal intrinsic connectivity", + "dataset_doi": "10.34740/kaggle/dsv/12200555", + "citation_doi": ["10.1016/j.jneumeth.2015.07.013"], + "labels": { + "1": "G_Frontal_Sup-1", + "2": "G_Frontal_Sup-2", + "3": "G_Frontal_Sup-3", + "4": "S_Sup_Frontal-1", + "5": "S_Sup_Frontal-2", + "6": "S_Sup_Frontal-3", + "7": "S_Sup_Frontal-4", + "8": "S_Sup_Frontal-5", + "9": "S_Sup_Frontal-6", + "10": "G_Frontal_Mid-1", + "11": "G_Frontal_Mid-2", + "12": "G_Frontal_Mid-3", + "13": "G_Frontal_Mid-4", + "14": "G_Frontal_Mid-5", + "15": "S_Inf_Frontal-1", + "16": "S_Inf_Frontal-2", + "17": "G_Frontal_Inf_Tri-1", + "18": "G_Frontal_Sup_Orb-1", + "19": "G_Frontal_Mid_Orb-1", + "20": "G_Frontal_Mid_Orb-2", + "21": "G_Frontal_Inf_Orb-1", + "22": "G_Frontal_Inf_Orb-2", + "23": "S_Orbital-1", + "24": "S_Orbital-2", + "25": "S_Olfactory-1", + "26": "S_Precentral-1", + "27": "S_Precentral-2", + "28": "S_Precentral-3", + "29": "S_Precentral-4", + "30": "S_Precentral-5", + "31": "S_Precentral-6", + "32": "S_Rolando-1", + "33": "S_Rolando-2", + "34": "S_Rolando-3", + "35": "S_Rolando-4", + "36": "S_Postcentral-1", + "37": "S_Postcentral-2", + "38": "S_Postcentral-3", + "39": "G_Parietal_Sup-1", + "40": "G_Parietal_Sup-2", + "41": "G_Parietal_Sup-3", + "42": "G_Parietal_Sup-4", + "43": "G_Parietal_Sup-5", + "44": "G_Supramarginal-1", + "45": "G_SupraMarginal-2", + "46": "G_Supramarginal-3", + "47": "G_Supramarginal-4", + "48": "G_SupraMarginal-5", + "49": "G_SupraMarginal-6", + "50": "G_SupraMarginal-7", + "51": "G_Angular-1", + "52": "G_Angular-2", + "53": "G_Angular-3", + "54": "G_Parietal_Inf-1", + "55": "S_Intraparietal-1", + "56": "S_Intraparietal-2", + "57": "S_Intraparietal-3", + "58": "S_Intraoccipital-1", + "59": "G_Occipital_Pole-1", + "60": "G_Occipital_Lat-1", + "61": "G_Occipital_Lat-2", + "62": "G_Occipital_Lat-3", + "63": "G_Occipital_Lat-4", + "64": "G_Occipital_Lat-5", + "65": "G_Occipital_Sup-1", + "66": "G_Occipital_Sup-2", + "67": "G_Occipital_Mid-1", + "68": "G_Occipital_Mid-2", + "69": "G_Occipital_Mid-3", + "70": "G_Occipital_Mid-4", + "71": "G_Occipital_Inf-1", + "72": "G_Occipital_Inf-2", + "73": "G_Insula-anterior-1", + "74": "G_Insula-anterior-2", + "75": "G_Insula-anterior-3", + "76": "G_Insula-anterior-4", + "77": "G_Insula-anterior-5", + "78": "G_Insula-posterior-1", + "79": "G_Rolandic_Oper-1", + "80": "G_Rolandic_Oper-2", + "81": "G_Temporal_Sup-1", + "82": "G_Temporal_Sup-2", + "83": "G_Temporal_Sup-3", + "84": "G_Temporal_Sup-4", + "85": "S_Sup_Temporal-1", + "86": "S_Sup_Temporal-2", + "87": "S_Sup_Temporal-3", + "88": "S_Sup_Temporal-4", + "89": "S_Sup_Temporal-5", + "90": "G_Temporal_Mid-1", + "91": "G_Temporal_Mid-2", + "92": "G_Temporal_Mid-3", + "93": "G_Temporal_Mid-4", + "94": "G_Temporal_Inf-1", + "95": "G_Temporal_Inf-2", + "96": "G_Temporal_Inf-3", + "97": "G_Temporal_Inf-4", + "98": "G_Temporal_Inf-5", + "99": "G_Temporal_Pole_Sup-1", + "100": "G_Temporal_Pole_Sup-2", + "101": "G_Temporal_Pole_Mid-1", + "102": "G_Temporal_Pole_Mid-2", + "103": "G_Temporal_Pole_Mid-3", + "104": "G_Frontal_Sup_Medial-1", + "105": "G_Frontal_Sup_Medial-2", + "106": "G_Frontal_Sup_Medial-3", + "107": "S_Anterior_Rostral-1", + "108": "G_Frontal_Med_Orb-1", + "109": "G_Frontal_Med_Orb-2", + "110": "G_subcallosal-1", + "111": "G_Supp_Motor_Area-1", + "112": "G_Supp_Motor_Area-2", + "113": "G_Supp_Motor_Area-3", + "114": "S_Cingulate-1", + "115": "S_Cingulate-2", + "116": "S_Cingulate-3", + "117": "S_Cingulate-4", + "118": "S_Cingulate-5", + "119": "S_Cingulate-6", + "120": "S_Cingulate-7", + "121": "G_Cingulum_Ant-1", + "122": "G_Cingulum_Ant-2", + "123": "G_Cingulum_Mid-1", + "124": "G_Cingulum_Mid-2", + "125": "G_Cingulum_Mid-3", + "126": "G_Cingulum_Post-1", + "127": "G_Cingulum_Post-2", + "128": "G_Cingulum_Post-3", + "129": "G_Paracentral_Lobule-1", + "130": "G_Paracentral_Lobule-2", + "131": "G_Paracentral_Lobule-3", + "132": "G_Paracentral_Lobule-4", + "133": "G_Precuneus-1", + "134": "G_Precuneus-2", + "135": "G_Precuneus-3", + "136": "G_Precuneus-4", + "137": "G_Precuneus-5", + "138": "G_Precuneus-6", + "139": "G_Precuneus-7", + "140": "G_Precuneus-8", + "141": "G_Precuneus-9", + "142": "S_Parietooccipital-1", + "143": "S_Parietooccipital-2", + "144": "S_Parietooccipital-3", + "145": "S_Parietooccipital-4", + "146": "S_Parietooccipital-5", + "147": "S_Parietooccipital-6", + "148": "G_Cuneus-1", + "149": "G_Cuneus-2", + "150": "G_Calcarine-1", + "151": "G_Calcarine-2", + "152": "G_Calcarine-3", + "153": "G_Lingual-1", + "154": "G_Lingual-2", + "155": "G_Lingual-3", + "156": "G_Lingual-4", + "157": "G_Lingual-5", + "158": "G_Lingual-6", + "159": "G_Hippocampus-1", + "160": "G_Hippocampus-2", + "161": "G_ParaHippocampal-1", + "162": "G_ParaHippocampal-2", + "163": "G_ParaHippocampal-3", + "164": "G_ParaHippocampal-4", + "165": "G_ParaHippocampal-5", + "166": "G_Fusiform-1", + "167": "G_Fusiform-2", + "168": "G_Fusiform-3", + "169": "G_Fusiform-4", + "170": "G_Fusiform-5", + "171": "G_Fusiform-6", + "172": "G_Fusiform-7", + "173": "N_Amygdala-1", + "174": "N_Caudate-1", + "175": "N_Caudate-2", + "176": "N_Caudate-3", + "177": "N_Caudate-4", + "178": "N_Caudate-5", + "179": "N_Caudate-6", + "180": "N_Caudate-7", + "181": "N_Pallidum-1", + "182": "N_Putamen-2", + "183": "N_Putamen-3", + "184": "N_Thalamus-1", + "185": "N_Thalamus-2", + "186": "N_Thalamus-3", + "187": "N_Thalamus-4", + "188": "N_Thalamus-5", + "189": "N_Thalamus-6", + "190": "N_Thalamus-7", + "191": "N_Thalamus-8", + "192": "N_Thalamus-9" + } +} \ No newline at end of file diff --git a/asltk/data/brain_atlas/DKA2006.json b/asltk/data/brain_atlas/DKA2006.json new file mode 100644 index 0000000..badc69b --- /dev/null +++ b/asltk/data/brain_atlas/DKA2006.json @@ -0,0 +1,80 @@ +{ + "atlas_name": "Desikan-Killiany Atlas", + "dataset_url": "loamri/brain-atlas-dk2006", + "official_url": "https://surfer.nmr.mgh.harvard.edu/fswiki/CorticalParcellation", + "description": "A parcellation scheme widely used in the freesurfer world subdividing the human cerebral cortex on MRI scans into gyral based regions of interest.", + "dataset_doi": "10.34740/kaggle/dsv/12208673", + "citation_doi": ["10.1016/j.neuroimage.2006.01.021"], + "labels": { + "1": "L_white_matter", + "2": "L_Banks_superior_temporal_sulcus", + "3": "L_caudal_anterior_cingulate_cortex", + "4": "L_caudal_middle_frontal_gyrus", + "5": "L_corpus_calosum", + "6": "L_cuneus_cortex", + "7": "L_entorhinal_cortex", + "8": "L_fusiform_gyrus", + "9": "L_inferior_parietal_cortex", + "10": "L_inferior_temporal_gyrus", + "11": "L_isthmus-cingulate_cortex", + "12": "L_lateral_occipital_cortex", + "13": "L_lateral_orbitofrontal_cortex", + "14": "L_lingual_gyrus", + "15": "L_medial_orbitofrontal_cortex", + "16": "L_middle_temporal_gyrus", + "17": "L_parahippocampal_gyrus", + "18": "L_paracentral_lobule", + "19": "L_pars_opercularis", + "20": "L_pars_orbitalis", + "21": "L_pars_triangularis", + "22": "L_pericalcarine_cortex", + "23": "L_postcentral_gyrus", + "24": "L_posterior-cingulate_cortex", + "25": "L_precentral_gyrus", + "26": "L_precuneus_cortex", + "27": "L_rostral_anterior_cingulate_cortex", + "28": "L_rostral_middle_frontal_gyrus", + "29": "L_superior_frontal_gyrus", + "30": "L_superior_parietal_cortex", + "31": "L_superior_temporal_gyrus", + "32": "L_supramarginal_gyrus", + "33": "L_frontal_pole", + "34": "L_temporal_pole", + "35": "L_transverse_temporal_cortex", + "36": "R_white_matter", + "37": "R_Banks_superior_temporal_sulcus", + "38": "R_caudal_anterior_cingulate_cortex", + "39": "R_caudal_middle_frontal_gyrus", + "40": "R_corpus_calosum", + "41": "R_cuneus_cortex", + "42": "R_entorhinal_cortex", + "43": "R_fusiform_gyrus", + "44": "R_inferior_parietal_cortex", + "45": "R_inferior_temporal_gyrus", + "46": "R_isthmus-cingulate_cortex", + "47": "R_lateral_occipital_cortex", + "48": "R_lateral_orbitofrontal_cortex", + "49": "R_lingual_gyrus", + "50": "R_medial_orbitofrontal_cortex", + "51": "R_middle_temporal_gyrus", + "52": "R_parahippocampal_gyrus", + "53": "R_paracentral_lobule", + "54": "R_pars_opercularis", + "55": "R_pars_orbitalis", + "56": "R_pars_triangularis", + "57": "R_pericalcarine_cortex", + "58": "R_postcentral_gyrus", + "59": "R_posterior-cingulate_cortex", + "60": "R_precentral_gyrus", + "61": "R_precuneus_cortex", + "62": "R_rostral_anterior_cingulate_cortex", + "63": "R_rostral_middle_frontal_gyrus", + "64": "R_superior_frontal_gyrus", + "65": "R_superior_parietal_cortex", + "66": "R_superior_temporal_gyrus", + "67": "R_supramarginal_gyrus", + "68": "R_frontal_pole", + "69": "R_temporal_pole", + "70": "R_transverse_temporal_cortex" + } +} \ No newline at end of file diff --git a/asltk/data/brain_atlas/FCA7N2011.json b/asltk/data/brain_atlas/FCA7N2011.json new file mode 100644 index 0000000..b356efe --- /dev/null +++ b/asltk/data/brain_atlas/FCA7N2011.json @@ -0,0 +1,17 @@ +{ + "atlas_name": "Functional Connectivity Atlas 7 Networks", + "dataset_url": "loamri/brain-atlas-fca7n2011", + "official_url": "https://surfer.nmr.mgh.harvard.edu/fswiki/CorticalParcellation_Yeo2011", + "description": "Data from 1000 young, healthy adults were registered using surface-based alignment. All data were acquired on Siemens 3T scanners using the same functional and structural sequences. A clustering approach was employed to identify and replicate networks of functionally coupled regions across the cerebral cortex", + "dataset_doi": "10.34740/kaggle/dsv/12200454", + "citation_doi": ["10.1152/jn.00338.2011"], + "labels": { + "1": "7Networks_1", + "2": "7Networks_2", + "3": "7Networks_3", + "4": "7Networks_4", + "5": "7Networks_5", + "6": "7Networks_6", + "7": "7Networks_7" + } +} \ No newline at end of file diff --git a/asltk/data/brain_atlas/HA2003.json b/asltk/data/brain_atlas/HA2003.json new file mode 100644 index 0000000..e39cd03 --- /dev/null +++ b/asltk/data/brain_atlas/HA2003.json @@ -0,0 +1,93 @@ +{ + "atlas_name": "Hammersmith atlas", + "dataset_url": "loamri/brain-atlas-ha2003", + "official_url": "https://brain-development.org/brain-atlases/adult-brain-atlases/adult-brain-maximum-probability-map-hammers-mith-atlas-n30r83-in-mni-space/", + "description": "Adult brain maximum probability map with either 83 parcels in MNI space", + "dataset_doi": "10.34740/kaggle/dsv/12208631", + "citation_doi": ["10.1002/hbm.10123", "10.1016/j.neuroimage.2007.11.034", "10.1371/journal.pone.0180866"], + "labels": { + "1": "R_Hippocampus", + "2": "L_Hippocampus", + "3": "R_Amygdala", + "4": "L_Amygdala", + "5": "R_Anterior_temporal_lobe_medial_part", + "6": "L_Anterior_temporal_lobe_medial_part", + "7": "R_Anterior_temporal_lobe_lateral_part", + "8": "L_Anterior_temporal_lobe_lateral_part", + "9": "R_Parahippocampal_and_ambient_gyri", + "10": "L_Parahippocampal_and_ambient_gyri", + "11": "R_Superior_temporal_gyrus_posterior_part", + "12": "L_Superior_temporal_gyrus_posterior_part", + "13": "R_Middle_and_inferior_temporal_gyrus", + "14": "L_Middle_and_inferior_temporal_gyrus", + "15": "R_Fusiform_gyrus", + "16": "L_Fusiform_gyrus", + "17": "R_Cerebellum", + "18": "L_Cerebellum", + "19": "Brainstem", + "20": "L_Insula", + "21": "R_Insula", + "22": "L_Lateral_remainder_occipital_lobe", + "23": "R_Lateral_remainder_occipital_lobe", + "24": "L_Cingulate_gyrus_anterior_part", + "25": "R_Cingulate_gyrus_anterior_part", + "26": "L_Cingulate_gyrus_posterior_part", + "27": "R_Cingulate_gyrus_posterior_part", + "28": "L_Middle_frontal_gyrus", + "29": "R_Middle_frontal_gyrus", + "30": "L_Posterior_temporal_lobe", + "31": "R_Posterior_temporal_lobe", + "32": "L_Inferiolateral_remainder_parietal_lobe", + "33": "R_Inferiolateral_remainder_parietal_lobe", + "34": "L_Caudate_nucleus", + "35": "R_Caudate_nucleus", + "36": "L_Nucleus_accumbens", + "37": "R_Nucleus_accumbens", + "38": "L_Putamen", + "39": "R_Putamen", + "40": "L_Thalamus", + "41": "R_Thalamus", + "42": "L_Pallidum", + "43": "R_Pallidum", + "44": "Corpus_callosum", + "45": "R_Lateral_ventricle_no_temporal_horn", + "46": "L_Lateral_ventricle_no_temporal_horn", + "47": "R_Lateral_ventricle_temporal_horn", + "48": "L_Lateral_ventricle_temporal_horn", + "49": "Third_ventricle", + "50": "L_Precentral_gyrus", + "51": "R_Precentral_gyrus", + "52": "L_Straight_gyrus", + "53": "R_Straight_gyrus", + "54": "L_Anterior_orbital_gyrus", + "55": "R_Anterior_orbital_gyrus", + "56": "L_Inferior_frontal_gyrus", + "57": "R_Inferior_frontal_gyrus", + "58": "L_Superior_frontal_gyrus", + "59": "R_Superior_frontal_gyrus", + "60": "L_Postcentral_gyrus", + "61": "R_Postcentral_gyrus", + "62": "L_Superior_parietal_gyrus", + "63": "R_Superior_parietal_gyrus", + "64": "L_Lingual_gyrus", + "65": "R_Lingual_gyrus", + "66": "L_Cuneus", + "67": "R_Cuneus", + "68": "L_Medial_orbital_gyrus", + "69": "R_Medial_orbital_gyrus", + "70": "L_Lateral_orbital_gyrus", + "71": "R_Lateral_orbital_gyrus", + "72": "L_Posterior_orbital_gyrus", + "73": "R_Posterior_orbital_gyrus", + "74": "L_Substantia_nigra", + "75": "R_Substantia_nigra", + "76": "L_Subgenual_frontal_cortex", + "77": "R_Subgenual_frontal_cortex", + "78": "L_Subcallosal_area", + "79": "R_Subcallosal_area", + "80": "L_Pre-subgenual_frontal_cortex", + "81": "R_Pre-subgenual_frontal_cortex", + "82": "L_Superior_temporal_gyrus_anterior_part", + "83": "R_Superior_temporal_gyrus_anterior_part" + } +} \ No newline at end of file diff --git a/asltk/data/brain_atlas/HOCSA2006.json b/asltk/data/brain_atlas/HOCSA2006.json new file mode 100644 index 0000000..8425345 --- /dev/null +++ b/asltk/data/brain_atlas/HOCSA2006.json @@ -0,0 +1,63 @@ +{ + "atlas_name": "Harvard-Oxford Cortical and Subcortical Structural Atlases - 2006", + "dataset_url": "loamri/brain-atlas-hocsa2006", + "official_url": "https://neurovault.org/collections/262/", + "description": "Probabilistic atlases covering 48 cortical structural areas, derived from structural data and segmentations kindly provided by the Harvard Center for Morphometric Analysis.", + "dataset_doi": "10.34740/kaggle/dsv/12200315", + "citation_doi": [ + "10.1016/j.schres.2005.11.020", + "10.1176/appi.ajp.162.7.1256", + "10.1016/j.neuroimage.2006.01.021", + "10.1016/j.biopsych.2006.06.027" + ], + "labels": { + "1": "Frontal Pole", + "2": "Insular Cortex", + "3": "Superior Frontal Gyrus", + "4": "Middle Frontal Gyrus", + "5": "Inferior Frontal Gyrus, pars triangularis", + "6": "Inferior Frontal Gyrus, pars opercularis", + "7": "Precentral Gyrus", + "8": "Temporal Pole", + "9": "Superior Temporal Gyrus, anterior division", + "10": "Superior Temporal Gyrus, posterior division", + "11": "Middle Temporal Gyrus, anterior division", + "12": "Middle Temporal Gyrus, posterior division", + "13": "Middle Temporal Gyrus, temporooccipital part", + "14": "Inferior Temporal Gyrus, anterior division", + "15": "Inferior Temporal Gyrus, posterior division", + "16": "Inferior Temporal Gyrus, temporooccipital part", + "17": "Postcentral Gyrus", + "18": "Superior Parietal Lobule", + "19": "Supramarginal Gyrus, anterior division", + "20": "Supramarginal Gyrus, posterior division", + "21": "Angular Gyrus", + "22": "Lateral Occipital Cortex, superior division", + "23": "Lateral Occipital Cortex, inferior division", + "24": "Intracalcarine Cortex", + "25": "Frontal Medial Cortex", + "26": "Juxtapositional Lobule Cortex (formerly Supplementary Motor Cortex)", + "27": "Subcallosal Cortex", + "28": "Paracingulate Gyrus", + "29": "Cingulate Gyrus, anterior division", + "30": "Cingulate Gyrus, posterior division", + "31": "Precuneous Cortex", + "32": "Cuneal Cortex", + "33": "Frontal Orbital Cortex", + "34": "Parahippocampal Gyrus, anterior division", + "35": "Parahippocampal Gyrus, posterior division", + "36": "Lingual Gyrus", + "37": "Temporal Fusiform Cortex, anterior division", + "38": "Temporal Fusiform Cortex, posterior division", + "39": "Temporal Occipital Fusiform Cortex", + "40": "Occipital Fusiform Gyrus", + "41": "Frontal Operculum Cortex", + "42": "Central Opercular Cortex", + "43": "Parietal Operculum Cortex", + "44": "Planum Polare", + "45": "Heschl's Gyrus (includes H1 and H2)", + "46": "Planum Temporale", + "47": "Supracalcarine Cortex", + "48": "Occipital Pole" + } + } \ No newline at end of file diff --git a/asltk/data/brain_atlas/JHA2005.json b/asltk/data/brain_atlas/JHA2005.json new file mode 100644 index 0000000..bbdf6a9 --- /dev/null +++ b/asltk/data/brain_atlas/JHA2005.json @@ -0,0 +1,131 @@ +{ + "atlas_name": "JuBrain / Juelich histological atlas", + "dataset_url": "loamri/brain-atlas-jha2005", + "official_url": "https://www.fz-juelich.de/de/inm/inm-1", + "description": "A probabilistic atlas created by averaging multi-subject post-mortem cyto- and myelo-architectonic segmentations, performed by the team of Profs Zilles and Amunts at the Research Center Jülich and kindly provided by Simon Eickhoff.", + "dataset_doi": "10.34740/kaggle/dsv/12200396", + "citation_doi": ["10.1016/j.neuroimage.2004.12.034"], + "labels": { + "1": "GM Anterior intra-parietal sulcus hIP1 L", + "2": "GM Anterior intra-parietal sulcus hIP1 R", + "3": "GM Anterior intra-parietal sulcus hIP2 L", + "4": "GM Anterior intra-parietal sulcus hIP2 R", + "5": "GM Anterior intra-parietal sulcus hIP3 L", + "6": "GM Anterior intra-parietal sulcus hIP3 R", + "7": "GM Amygdala_centromedial group L", + "8": "GM Amygdala_centromedial group R", + "9": "GM Amygdala_laterobasal group L", + "10": "GM Amygdala_laterobasal group R", + "11": "GM Amygdala_superficial group L", + "12": "GM Amygdala_superficial group R", + "13": "GM Broca's area BA44 L", + "14": "GM Broca's area BA44 R", + "15": "GM Broca's area BA45 L", + "16": "GM Broca's area BA45 R", + "17": "GM Hippocampus cornu ammonis L", + "18": "GM Hippocampus cornu ammonis R", + "19": "GM Hippocampus entorhinal cortex L", + "20": "GM Hippocampus entorhinal cortex R", + "21": "GM Hippocampus dentate gyrus L", + "22": "GM Hippocampus dentate gyrus R", + "23": "GM Hippocampus hippocampal-amygdaloid transition area L", + "24": "GM Hippocampus hippocampal-amygdaloid transition area R", + "25": "GM Hippocampus subiculum L", + "26": "GM Hippocampus subiculum R", + "27": "GM Inferior parietal lobule PF L", + "28": "GM Inferior parietal lobule PF R", + "29": "GM Inferior parietal lobule PFcm L", + "30": "GM Inferior parietal lobule PFcm R", + "31": "GM Inferior parietal lobule PFm L", + "32": "GM Inferior parietal lobule PFm R", + "33": "GM Inferior parietal lobule PFop L", + "34": "GM Inferior parietal lobule PFop R", + "35": "GM Inferior parietal lobule PFt L", + "36": "GM Inferior parietal lobule PFt R", + "37": "GM Inferior parietal lobule Pga L", + "38": "GM Inferior parietal lobule Pga R", + "39": "GM Inferior parietal lobule PGp L", + "40": "GM Inferior parietal lobule PGp R", + "41": "GM Primary auditory cortex TE1.0 L", + "42": "GM Primary auditory cortex TE1.0 R", + "43": "GM Primary auditory cortex TE1.1 L", + "44": "GM Primary auditory cortex TE1.1 R", + "45": "GM Primary auditory cortex TE1.2 L", + "46": "GM Primary auditory cortex TE1.2 R", + "47": "GM Primary motor cortex BA4a L", + "48": "GM Primary motor cortex BA4a R", + "49": "GM Primary motor cortex BA4p L", + "50": "GM Primary motor cortex BA4p R", + "51": "GM Primary somatosensory cortex BA1 L", + "52": "GM Primary somatosensory cortex BA1 R", + "53": "GM Primary somatosensory cortex BA2 L", + "54": "GM Primary somatosensory cortex BA2 R", + "55": "GM Primary somatosensory cortex BA3a L", + "56": "GM Primary somatosensory cortex BA3a R", + "57": "GM Primary somatosensory cortex BA3b L", + "58": "GM Primary somatosensory cortex BA3b R", + "59": "GM Secondary somatosensory cortex / Parietal operculum OP1 L", + "60": "GM Secondary somatosensory cortex / Parietal operculum OP1 R", + "61": "GM Secondary somatosensory cortex / Parietal operculum OP2 L", + "62": "GM Secondary somatosensory cortex / Parietal operculum OP2 R", + "63": "GM Secondary somatosensory cortex / Parietal operculum OP3 L", + "64": "GM Secondary somatosensory cortex / Parietal operculum OP3 R", + "65": "GM Secondary somatosensory cortex / Parietal operculum OP4 L", + "66": "GM Secondary somatosensory cortex / Parietal operculum OP4 R", + "67": "GM Superior parietal lobule 5Ci L", + "68": "GM Superior parietal lobule 5Ci R", + "69": "GM Superior parietal lobule 5L L", + "70": "GM Superior parietal lobule 5L R", + "71": "GM Superior parietal lobule 5M L", + "72": "GM Superior parietal lobule 5M R", + "73": "GM Superior parietal lobule 7A L", + "74": "GM Superior parietal lobule 7A R", + "75": "GM Superior parietal lobule 7M L", + "76": "GM Superior parietal lobule 7M R", + "77": "GM Superior parietal lobule 7PC L", + "78": "GM Superior parietal lobule 7PC R", + "79": "GM Superior parietal lobule 7P L", + "80": "GM Superior parietal lobule 7P R", + "81": "GM Visual cortex V1 BA17 L", + "82": "GM Visual cortex V1 BA17 R", + "83": "GM Visual cortex V2 BA18 L", + "84": "GM Visual cortex V2 BA18 R", + "85": "GM Visual cortex V3V L", + "86": "GM Visual cortex V3V R", + "87": "GM Visual cortex V4 L", + "88": "GM Visual cortex V4 R", + "89": "GM Visual cortex V5 L", + "90": "GM Visual cortex V5 R", + "91": "GM Premotor cortex BA6 L", + "92": "GM Premotor cortex BA6 R", + "93": "WM Acoustic radiation R", + "94": "WM Acoustic radiation L", + "95": "WM Callosal body", + "96": "WM Cingulum R", + "97": "WM Cingulum L", + "98": "WM Corticospinal tract R", + "99": "WM Corticospinal tract L", + "100": "WM Fornix", + "101": "WM Inferior occipito-frontal fascicle R", + "102": "WM Inferior occipito-frontal fascicle L", + "103": "GM Lateral geniculate body R", + "104": "GM Lateral geniculate body L", + "105": "GM Mamillary body", + "106": "GM Medial geniculate body R", + "107": "GM Medial geniculate body L", + "108": "WM Optic radiation R", + "109": "WM Optic radiation L", + "110": "WM Superior longitudinal fascicle R", + "111": "WM Superior longitudinal fascicle L", + "112": "WM Superior occipito-frontal fascicle R", + "113": "WM Superior occipito-frontal fascicle L", + "114": "WM Uncinate fascicle R", + "115": "WM Uncinate fascicle L", + "116": "GM Insula Id1 L", + "117": "GM Insula Id1 R", + "118": "GM Insula Ig1 L", + "119": "GM Insula Ig1 R", + "120": "GM Insula Ig2 L", + "121": "GM Insula Ig2 R" + } +} \ No newline at end of file diff --git a/asltk/data/brain_atlas/LGPHCC2022.json b/asltk/data/brain_atlas/LGPHCC2022.json new file mode 100644 index 0000000..dcbe88a --- /dev/null +++ b/asltk/data/brain_atlas/LGPHCC2022.json @@ -0,0 +1,110 @@ +{ + "atlas_name": "Local-Global Parcellation of the Human Cerebral Cortex", + "dataset_url": "loamri/brain-atlas-lgphcc2022", + "official_url": "https://github.com/ThomasYeoLab/CBIG/tree/master/stable_projects/brain_parcellation/Schaefer2018_LocalGlobal", + "description": "Resting state fMRI data from 1489 subjects were registered using surface-based alignment. A gradient weighted markov random field approach was employed to identify cortical parcels ranging from 100 to 1000 parcels", + "dataset_doi": "10.34740/kaggle/dsv/12200527", + "citation_doi": ["10.1093/cercor/bhx179"], + "labels": { + "1": "7Networks_LH_Vis_1", + "2": "7Networks_LH_Vis_2", + "3": "7Networks_LH_Vis_3", + "4": "7Networks_LH_Vis_4", + "5": "7Networks_LH_Vis_5", + "6": "7Networks_LH_Vis_6", + "7": "7Networks_LH_Vis_7", + "8": "7Networks_LH_Vis_8", + "9": "7Networks_LH_Vis_9", + "10": "7Networks_LH_SomMot_1", + "11": "7Networks_LH_SomMot_2", + "12": "7Networks_LH_SomMot_3", + "13": "7Networks_LH_SomMot_4", + "14": "7Networks_LH_SomMot_5", + "15": "7Networks_LH_SomMot_6", + "16": "7Networks_LH_DorsAttn_Post_1", + "17": "7Networks_LH_DorsAttn_Post_2", + "18": "7Networks_LH_DorsAttn_Post_3", + "19": "7Networks_LH_DorsAttn_Post_4", + "20": "7Networks_LH_DorsAttn_Post_5", + "21": "7Networks_LH_DorsAttn_Post_6", + "22": "7Networks_LH_DorsAttn_PrCv_1", + "23": "7Networks_LH_DorsAttn_FEF_1", + "24": "7Networks_LH_SalVentAttn_ParOper_1", + "25": "7Networks_LH_SalVentAttn_FrOperIns_1", + "26": "7Networks_LH_SalVentAttn_FrOperIns_2", + "27": "7Networks_LH_SalVentAttn_PFCl_1", + "28": "7Networks_LH_SalVentAttn_Med_1", + "29": "7Networks_LH_SalVentAttn_Med_2", + "30": "7Networks_LH_SalVentAttn_Med_3", + "31": "7Networks_LH_Limbic_OFC_1", + "32": "7Networks_LH_Limbic_TempPole_1", + "33": "7Networks_LH_Limbic_TempPole_2", + "34": "7Networks_LH_Cont_Par_1", + "35": "7Networks_LH_Cont_PFCl_1", + "36": "7Networks_LH_Cont_pCun_1", + "37": "7Networks_LH_Cont_Cing_1", + "38": "7Networks_LH_Default_Temp_1", + "39": "7Networks_LH_Default_Temp_2", + "40": "7Networks_LH_Default_Par_1", + "41": "7Networks_LH_Default_Par_2", + "42": "7Networks_LH_Default_PFC_1", + "43": "7Networks_LH_Default_PFC_2", + "44": "7Networks_LH_Default_PFC_3", + "45": "7Networks_LH_Default_PFC_4", + "46": "7Networks_LH_Default_PFC_5", + "47": "7Networks_LH_Default_PFC_6", + "48": "7Networks_LH_Default_PFC_7", + "49": "7Networks_LH_Default_pCunPCC_1", + "50": "7Networks_LH_Default_pCunPCC_2", + "51": "7Networks_RH_Vis_1", + "52": "7Networks_RH_Vis_2", + "53": "7Networks_RH_Vis_3", + "54": "7Networks_RH_Vis_4", + "55": "7Networks_RH_Vis_5", + "56": "7Networks_RH_Vis_6", + "57": "7Networks_RH_Vis_7", + "58": "7Networks_RH_Vis_8", + "59": "7Networks_RH_SomMot_1", + "60": "7Networks_RH_SomMot_2", + "61": "7Networks_RH_SomMot_3", + "62": "7Networks_RH_SomMot_4", + "63": "7Networks_RH_SomMot_5", + "64": "7Networks_RH_SomMot_6", + "65": "7Networks_RH_SomMot_7", + "66": "7Networks_RH_SomMot_8", + "67": "7Networks_RH_DorsAttn_Post_1", + "68": "7Networks_RH_DorsAttn_Post_2", + "69": "7Networks_RH_DorsAttn_Post_3", + "70": "7Networks_RH_DorsAttn_Post_4", + "71": "7Networks_RH_DorsAttn_Post_5", + "72": "7Networks_RH_DorsAttn_PrCv_1", + "73": "7Networks_RH_DorsAttn_FEF_1", + "74": "7Networks_RH_SalVentAttn_TempOccPar_1", + "75": "7Networks_RH_SalVentAttn_TempOccPar_2", + "76": "7Networks_RH_SalVentAttn_FrOperIns_1", + "77": "7Networks_RH_SalVentAttn_Med_1", + "78": "7Networks_RH_SalVentAttn_Med_2", + "79": "7Networks_RH_Limbic_OFC_1", + "80": "7Networks_RH_Limbic_TempPole_1", + "81": "7Networks_RH_Cont_Par_1", + "82": "7Networks_RH_Cont_Par_2", + "83": "7Networks_RH_Cont_PFCl_1", + "84": "7Networks_RH_Cont_PFCl_2", + "85": "7Networks_RH_Cont_PFCl_3", + "86": "7Networks_RH_Cont_PFCl_4", + "87": "7Networks_RH_Cont_Cing_1", + "88": "7Networks_RH_Cont_PFCmp_1", + "89": "7Networks_RH_Cont_pCun_1", + "90": "7Networks_RH_Default_Par_1", + "91": "7Networks_RH_Default_Temp_1", + "92": "7Networks_RH_Default_Temp_2", + "93": "7Networks_RH_Default_Temp_3", + "94": "7Networks_RH_Default_PFCv_1", + "95": "7Networks_RH_Default_PFCv_2", + "96": "7Networks_RH_Default_PFCdPFCm_1", + "97": "7Networks_RH_Default_PFCdPFCm_2", + "98": "7Networks_RH_Default_PFCdPFCm_3", + "99": "7Networks_RH_Default_pCunPCC_1", + "100": "7Networks_RH_Default_pCunPCC_2" + } +} \ No newline at end of file diff --git a/asltk/data/brain_atlas/MNI2009.json b/asltk/data/brain_atlas/MNI2009.json new file mode 100644 index 0000000..afd782b --- /dev/null +++ b/asltk/data/brain_atlas/MNI2009.json @@ -0,0 +1,19 @@ +{ + "atlas_name": "MNI Structural Atlas - 2009", + "dataset_url": "loamri/brain-atlas-mni2009", + "official_url": "https://www.bic.mni.mcgill.ca/ServicesAtlases/ICBM152NLin2009", + "description": "A number of unbiased non-linear averages of the MNI152 database have been generated that combines the attractions of both high-spatial resolution and signal-to-noise while not being subject to the vagaries of any single brain.", + "dataset_doi": "10.34740/kaggle/dsv/12189230", + "citation_doi": ["10.1016/j.neuroimage.2010.07.033", "10.1016/S1053-8119(09)70884-5", "10.1007/3-540-48714-X_16"], + "labels": { + "1": "Caudate", + "2": "Cerebellum", + "3": "Frontal Lobe", + "4": "Insula", + "5": "Occipital Lobe", + "6": "Parietal Lobe", + "7": "Putamen", + "8": "Temporal Lobe", + "9": "Thalamus" + } + } \ No newline at end of file diff --git a/asltk/data/brain_atlas/__init__.py b/asltk/data/brain_atlas/__init__.py new file mode 100644 index 0000000..58c0b19 --- /dev/null +++ b/asltk/data/brain_atlas/__init__.py @@ -0,0 +1,170 @@ +# Brain atlas list for ASLtk +# All the data are storage in the Kaggle ASLtk project +# When a new data is called, then the brain atlas is allocated locally +import json +import os + +import kagglehub + + +# TODO Fix the t1_data loading because the brain atlases will have the 1mm and 2mm options +# TODO Ajust each kagglehub dataset to have the 2mm resolution option +class BrainAtlas: + + ATLAS_JSON_PATH = os.path.join(os.path.dirname(__file__)) + + def __init__(self, atlas_name: str = 'MNI2009'): + """ + Initializes the BrainAtlas class with a specified atlas name. + If no atlas name is provided, it defaults to 'MNI2009'. + + Args: + atlas_name (str, optional): The name of the atlas to be used. Defaults to 'MNI2009'. + """ + self._chosen_atlas = None + self.set_atlas(atlas_name) + + def set_atlas(self, atlas_name: str): + """ + Sets the brain atlas to be used for ASLtk operations. + This method checks if the provided atlas name exists in the available atlas database. + If found, it loads the corresponding atlas JSON file, downloads the atlas data using the + URL specified in the JSON (via kagglehub), and updates the atlas data with the local file + location. The selected atlas data is then stored internally for further use. + + Notes: + The atlas name should match one of the available atlases in the ASLtk database. + To see all the available atlases, you can use the `list_atlas` method. + + Args: + atlas_name (str): The name of the atlas to set. Must match an available atlas. + + Raises: + ValueError: If the atlas name is not found in the database or if there is an error + downloading the atlas data. + """ + if atlas_name not in self.list_atlas(): + raise ValueError(f'Atlas {atlas_name} not found in the database.') + + atlas_path = os.path.join(self.ATLAS_JSON_PATH, f'{atlas_name}.json') + with open(atlas_path, 'r') as f: + atlas_data = json.load(f) + + # Add the current atlas file location in the atlas data + try: + path = kagglehub.dataset_download( + atlas_data.get('dataset_url', None) + ) + except Exception as e: + raise ValueError(f'Error downloading the atlas: {e}') + + # Assuming the atlas_data is a dictionary, we can add the path to it + atlas_data['atlas_file_location'] = path + # Assuming the atlas data contains a key for T1-weighted and Label image data + atlas_data['t1_data'] = os.path.join(path, self._collect_t1(path)) + atlas_data['label_data'] = os.path.join( + path, self._collect_label(path) + ) + + self._chosen_atlas = atlas_data + + def get_atlas(self): + """ + Get the current brain atlas data. + + Returns: + dict: The current atlas data. + """ + return self._chosen_atlas + + def get_atlas_url(self, atlas_name: str): + """ + Get the brain atlas URL of the chosen format in the ASLtk database. + The atlas URL is the base Kaggle URL where the atlas is stored. + + + Notes: + The `atlas_name` should be the name of the atlas as it is stored in the ASLtk database. + To check all the available atlases, you can use the `list_atlas` method. + + Args: + atlas_name (str): The name of the atlas to retrieve the URL for. + + Raises: + ValueError: If the atlas name is not found in the database. + + Returns: + str: The Kaggle dataset URL of the atlas if it exists, otherwise None. + """ + if atlas_name not in self.list_atlas(): + raise ValueError(f'Atlas {atlas_name} not found in the database.') + + try: + atlas_url = self._chosen_atlas.get('dataset_url', None) + except AttributeError: + raise ValueError( + f'Atlas {atlas_name} is not set or does not have a dataset URL.' + ) + + return atlas_url + + def get_atlas_labels(self): + """ + Get the labels of the chosen brain atlas. + This method retrieves the labels associated with the current atlas. + Notes: + The labels are typically used for parcellation or segmentation tasks in brain imaging. + + Returns: + dict: The labels of the current atlas if available, otherwise None. + """ + return self._chosen_atlas.get('labels', None) + + def list_atlas(self): + """ + List all the available brain atlases in the ASLtk database. + The atlas names are derived from the JSON files stored in the `ATLAS_JSON_PATH`. + + The JSON names should follow the format `.json`. + The atlas names are returned without the `.json` extension. + + Returns: + list(str): List of atlas names available in the ASLtk database. + """ + return [ + f[:-5] + for f in os.listdir(self.ATLAS_JSON_PATH) + if f.endswith('.json') + ] + + def _collect_t1(self, path: str): # pragma: no cover + """ + Collect the T1-weighted image data from the atlas directory. + Args: + path (str): The path to the atlas directory. + Returns: + str: The filename of the T1-weighted image data. + """ + t1_file = next((f for f in os.listdir(path) if '_t1' in f), None) + if t1_file is None: + raise ValueError( + f"No file with '_t1' found in the atlas directory: {path}" + ) + + return t1_file + + def _collect_label(self, path: str): # pragma: no cover + """ + Collect the label file from the atlas directory. + Args: + path (str): The path to the atlas directory. + Returns: + str: The filename of the label file. + """ + label_file = next((f for f in os.listdir(path) if '_label' in f), None) + if label_file is None: + raise ValueError( + f"No file with '_label' found in the atlas directory: {path}" + ) + + return label_file diff --git a/asltk/data/reports/__init__.py b/asltk/data/reports/__init__.py new file mode 100644 index 0000000..7412c3e --- /dev/null +++ b/asltk/data/reports/__init__.py @@ -0,0 +1,4 @@ +# from .basic_report import BasicReport +# from .parcellation_report import ParcellationReport + +# __all__ = ['ParcellationReport', 'BasicReport'] diff --git a/asltk/data/reports/basic_report.py b/asltk/data/reports/basic_report.py new file mode 100644 index 0000000..5fefda1 --- /dev/null +++ b/asltk/data/reports/basic_report.py @@ -0,0 +1,48 @@ +# from abc import ABC, abstractmethod + + +# class BasicReport(ABC): +# """ +# This is an abstract base class for generating reports. +# It provides a structure for creating reports with a title and methods +# for generating and saving the report. + +# Args: +# ABC: Abstract Base Class for defining abstract methods. +# """ + +# def __init__(self, title: str, **kwargs): +# """ +# Initialize the BasicReport with a title. + +# Args: +# title (str): The title of the report. +# """ +# self.title = title +# self.report = None + +# @abstractmethod +# def generate_report(self) -> None: +# """ +# Generate the report content. +# This method should be implemented by subclasses to create the report content. +# It should populate the `self.report` attribute with the report data. +# The report can be in any format, such as text, HTML, or a structured data format. +# The specific implementation will depend on the type of report being generated. +# """ +# pass + +# @abstractmethod +# def save_report(self, file_path: str, format: str = 'csv') -> None: +# """ +# Save the generated report to a file. + +# Parameters +# ---------- +# file_path : str +# The path where the report will be saved. +# format : str, optional +# The format of the report file. Options are 'pdf', 'csv' (default is 'csv'). +# """ +# if self.report is None: +# raise ValueError('Report has not been generated yet.') diff --git a/asltk/data/reports/parcellation_report.py b/asltk/data/reports/parcellation_report.py new file mode 100644 index 0000000..d4196b6 --- /dev/null +++ b/asltk/data/reports/parcellation_report.py @@ -0,0 +1,232 @@ +# import os +# from datetime import datetime + +# import matplotlib.gridspec as gridspec +# import matplotlib.pyplot as plt +# import pandas as pd +# from matplotlib.backends.backend_pdf import PdfPages + +# from asltk import PARCELLATION_REPORT_PATH as default_path +# from asltk.asldata import ASLData +# from asltk.data.brain_atlas import BrainAtlas +# from asltk.data.reports.basic_report import BasicReport +# from asltk.utils.io import load_image + + +# class ParcellationReport(BasicReport): +# def __init__( +# self, +# subject_image: ASLData, +# atlas_name: str = 'MNI2009', +# subject_filename: str = None, +# subject_img_dimensions: tuple = None, +# subject_img_type: str = None, +# subject_img_resolution: tuple = None, +# **kwargs, +# ): +# self.atlas = load_image(BrainAtlas(atlas_name).get_atlas()['t1_data']) +# self.subject_image = subject_image('m0') +# self._check_inputs_dimensions(self.subject_image, self.atlas) + +# # Optional parameters for subject information +# self.subject_filename = ( +# subject_filename if subject_filename else 'Unknown' +# ) +# self.subject_img_dimensions = ( +# subject_img_dimensions if subject_img_dimensions else (0, 0, 0) +# ) +# self.subject_img_type = ( +# subject_img_type if subject_img_type else 'Unknown' +# ) +# self.subject_img_resolution = ( +# subject_img_resolution if subject_img_resolution else (0, 0, 0) +# ) + +# default_filename = f"parcellation_report_{datetime.now().strftime('%Y%m%d_%H%M%S')}.pdf" +# self.report_filename = kwargs.get('report_filename', default_filename) + +# self.default_fullpath = os.path.join( +# default_path, self.report_filename +# ) + +# # Initialize the report data structure +# self.report_data = {} + +# def generate_report(self): +# # Report structure: +# # Description section: +# # - Report information: date +# # - Brain Atlas: Name and description +# # - Brain Regions: List of regions with their labels and descriptions +# # - Subject Information: Subject filename, image dimensions, image type, image resolution +# # Illustration section: +# # - Brain atlas illustration: Image of the brain atlas with regions labeled (5 slices I-S) +# # - Subject illustration: Image of subject's brain without parcellation (5 slices I-S) +# # - Subject illustration: Image of the subject's brain with parcellation overlay (5 slices I-S) +# # Parcellation section: +# # - Table with parcellation statistics: +# # - Region label +# # - Region name +# # - Number of voxels +# # - Volume in mm³ +# # - Average intensity +# # - Std. deviation of intensity +# # - Minimum intensity +# # - Maximum intensity +# # - Coefficient of variation (CV) +# description_section = self._create_description_section() + +# self.report_data = description_section + +# def save_report(self, format: str = 'csv'): +# # TODO explain in the documentation that the file path is defined by the report_filename and uses the PARCELLATION_REPORT_PATH in the asltk module +# if not self.report_data: +# raise ValueError( +# 'Report data is empty. Please generate the report first.' +# ) + +# # Save the report data to a file +# if format == 'csv': +# # TODO revise the CSV formatting to include all necessary information +# # Save the regions DataFrame to a CSV file +# self.report_data['regions_dataframe'].to_csv( +# self.default_fullpath, index=False +# ) +# elif format == 'pdf': +# # Save the report as a PDF file +# with PdfPages(self.default_fullpath) as pdf: +# # Save the header figure +# pdf.savefig(self.report_data['header_figure']) +# plt.close(self.report_data['header_figure']) + +# # Add more sections to the PDF as needed +# # For example, you can add illustrations or parcellation statistics here + +# def _create_description_section(self): +# """ +# Create the description section header for the PDF report. + +# Returns: +# dict: A dictionary containing the matplotlib figures and information for the report header. +# """ + +# # Create figure for the header section +# fig = plt.figure(figsize=(10, 8)) +# gs = gridspec.GridSpec(4, 1, height_ratios=[1, 1, 2, 2]) + +# # Report information: date +# ax1 = plt.subplot(gs[0]) +# ax1.axis('off') +# ax1.text( +# 0.01, 0.5, f'Parcellation Report', fontsize=16, fontweight='bold' +# ) +# ax1.text( +# 0.01, +# 0.1, +# f"Generated on: {datetime.now().strftime('%Y-%m-%d %H:%M')}", +# fontsize=10, +# ) + +# # Brain Atlas: Name and description +# ax2 = plt.subplot(gs[1]) +# ax2.axis('off') +# ax2.text( +# 0.01, +# 0.7, +# f'Brain Atlas Information', +# fontsize=14, +# fontweight='bold', +# ) +# ax2.text(0.01, 0.4, f'Name: {self.atlas.name}') +# ax2.text( +# 0.01, +# 0.1, +# f"Description: {getattr(self.atlas, 'description', 'No description available')}", +# ) + +# # Subject Information +# ax3 = plt.subplot(gs[2]) +# ax3.axis('off') +# ax3.text( +# 0.01, 0.9, 'Subject Information', fontsize=14, fontweight='bold' +# ) +# ax3.text(0.01, 0.7, f'Filename: {self.subject_filename}') +# ax3.text(0.01, 0.5, f'Image dimensions: {self.subject_img_dimensions}') +# ax3.text(0.01, 0.3, f'Image type: {self.subject_img_type}') +# ax3.text( +# 0.01, 0.1, f'Image resolution: {self.subject_img_resolution} mm' +# ) + +# # Brain Regions: Create a DataFrame with the regions information +# try: +# regions_data = {'Label': [], 'Region Name': []} + +# # Get regions from the atlas - adapt this based on how your BrainAtlas class works +# for label, region in self.atlas.get('labels', {}).items(): +# regions_data['Label'].append(label) +# regions_data['Region Name'].append(region) +# # regions_data['Description'].append(getattr(region, 'description', 'No description available')) + +# df_regions = pd.DataFrame(regions_data) + +# # Create a table for the regions +# ax4 = plt.subplot(gs[3]) +# ax4.axis('off') +# ax4.text( +# 0.01, 0.95, 'Brain Regions', fontsize=14, fontweight='bold' +# ) + +# # Display all regions in a table +# table_data = df_regions.values +# columns = df_regions.columns + +# table = ax4.table( +# cellText=table_data, +# colLabels=columns, +# loc='center', +# cellLoc='center', +# colWidths=[0.1, 0.3, 0.6], +# ) +# table.auto_set_font_size(False) +# table.set_fontsize(8) +# table.scale(1, 1.5) + +# except Exception as e: +# # In case of any error with regions +# ax4 = plt.subplot(gs[3]) +# ax4.axis('off') +# ax4.text( +# 0.01, +# 0.5, +# f'Brain Regions: Error retrieving region information. {str(e)}', +# fontsize=10, +# color='red', +# ) +# df_regions = pd.DataFrame() + +# plt.tight_layout() + +# # Return the result as a dictionary that can be used by save_report +# return { +# 'header_figure': fig, +# 'date': datetime.now().strftime('%Y-%m-%d %H:%M'), +# 'atlas_name': self.atlas.get('atlas_name', 'Unknown Atlas'), +# 'atlas_description': self.atlas.get( +# 'description', 'No description available' +# ), +# 'subject_info': { +# 'filename': self.subject_filename, +# 'dimensions': self.subject_img_dimensions, +# 'type': self.subject_img_type, +# 'resolution': self.subject_img_resolution, +# }, +# 'regions_dataframe': df_regions, +# } + +# def _check_inputs_dimensions(subject_image, atlas): +# subj_dim = subject_image.shape +# atlas_dim = atlas.shape +# if subj_dim != atlas_dim: +# raise TypeError( +# f'subject_image must have the same dimensions as the atlas image. Dimensions do not match: {subj_dim} != {atlas_dim}' +# ) diff --git a/asltk/reconstruction/cbf_mapping.py b/asltk/reconstruction/cbf_mapping.py index ccdd9e5..8c743b8 100644 --- a/asltk/reconstruction/cbf_mapping.py +++ b/asltk/reconstruction/cbf_mapping.py @@ -100,7 +100,7 @@ def set_brain_mask(self, brain_mask: np.ndarray, label: int = 1): Load and use an existing brain mask: >>> # Load pre-computed brain mask - >>> from asltk.utils import load_image + >>> from asltk.utils.io import load_image >>> brain_mask = load_image('./tests/files/m0_brain_mask.nii.gz') >>> cbf_mapper.set_brain_mask(brain_mask) diff --git a/asltk/registration/__init__.py b/asltk/registration/__init__.py index 5804476..57aaf0b 100644 --- a/asltk/registration/__init__.py +++ b/asltk/registration/__init__.py @@ -1,78 +1,337 @@ -import warnings - +import ants import numpy as np +import SimpleITK as sitk from asltk.asldata import ASLData -from asltk.logging_config import ( - get_logger, - log_processing_step, - log_warning_with_context, -) -from asltk.registration.rigid import rigid_body_registration -from asltk.utils import collect_data_volumes +from asltk.data.brain_atlas import BrainAtlas +from asltk.logging_config import get_logger +from asltk.utils.image_manipulation import check_and_fix_orientation +from asltk.utils.io import load_image -def head_movement_correction( - asl_data: ASLData, ref_vol: int = 0, verbose: bool = False +def space_normalization( + moving_image: np.ndarray, + template_image: BrainAtlas, + moving_mask: np.ndarray = None, + template_mask: np.ndarray = None, + transform_type: str = 'SyNBoldAff', + **kwargs, ): + """ + Perform brain normalization to register the moving image into the + template image space. + + This function uses ANTsPy to register a moving image to a template + image. Optional masks can be provided for both images. The + registration process supports different transformation types. + + This is the base method for space normalization, which can be used + for different types of images, such as M0, T1w, and ASL images. + The method is designed to be flexible and can be used for different + types of images, as long as the moving image and template image are + provided in the correct format. + + Note: + For more specfiic cases, such as ASL data normalization, one can + use other methods, such as in `asl_normalization` module. + + Note: + Usually the space normalization is performed between the M0 and T1w + images. The M0 image is one of the images obtained in the ASL + acquisition and the T1w image is the anatomical image template. + + Important: + The `transform_type` parameter allows for different types of + transformations, such as 'SyN', 'BSpline', etc. The default is 'SyNBoldAff', + which is suitable for registering ASL images to a T1-weighted template. + All the definitions of the transformation types can be found in the + ANTsPy documentation: https://antspy.readthedocs.io/en/latest/registration.html + + Important: + This method always assumes a template image as a BrainAtlas object. + One may pass a string with the name of the atlas, and the method will + automatically load the atlas and use the T1-weighted image as the + template image. If a different template image is needed, it should be + passed as a BrainAtlas object, however, it depends on the ASLtk + Kaggle dataset structure, so it is not recommended to raise an issue + in the official ASLtk repository if the template image is not presented + in the BrainAtlas format. + + Parameters + ---------- + moving_image : np.ndarray + The moving image. + template_image : BrainAtlas or str or np.ndarray + The template image as BrainAtlas object, string with the atlas name or + a numpy array. + moving_mask : np.ndarray, optional + The moving mask in the same space as the moving image. If not provided, + no mask is used. + template_mask : np.ndarray, optional + The template mask in the same space as the template image. If not provided, + no mask is used. + transform_type : str, optional + Type of transformation ('SyN', 'BSpline', etc.). Default is 'SyNBoldAff'. + check_orientation : bool, optional + Whether to automatically check and fix orientation mismatches between + moving and template images. Default is True. + verbose : bool, optional + Whether to print detailed orientation analysis. Default is False. + + Returns + ------- + normalized_image : np.ndarray + The moving image transformed into the template image space. + transform : list + A list of transformation mapping from moving to template space. + """ + if not isinstance(moving_image, np.ndarray) or not isinstance( + template_image, (BrainAtlas, str, np.ndarray) + ): + raise TypeError( + 'moving_image must be a numpy array and template_image must be a BrainAtlas object, a string with the atlas name, or a numpy array.' + ) + + # Take optional parameters + check_orientation = kwargs.get('check_orientation', True) + verbose = kwargs.get('verbose', False) + logger = get_logger('registration') - logger.info('Starting head movement correction') - - # Check if the input is a valid ASLData object. - if not isinstance(asl_data, ASLData): - error_msg = 'Input must be an ASLData object.' - logger.error(error_msg) - raise TypeError(error_msg) - - # Collect all the volumes in the pcasl image - log_processing_step('Collecting data volumes') - total_vols, orig_shape = collect_data_volumes(asl_data('pcasl')) - logger.info(f'Collected {len(total_vols)} volumes for registration') - - # Check if the reference volume is a valid integer based on the ASLData number of volumes. - if not isinstance(ref_vol, int) or ref_vol >= len(total_vols): - error_msg = 'ref_vol must be an positive integer based on the total asl data volumes.' - logger.error( - f'{error_msg} ref_vol={ref_vol}, total_volumes={len(total_vols)}' + logger.info('Starting space normalization') + + # Load template image first + # TODO PROBLEMA PRINCIPAL: A leitura de imagens para numpy faz a perda da origen e spacing, para fazer o corregistro é preciso acertar a orientação da imagem com relação a origem (flip pela origem) para que ambas estejam na mesma orientação visual + # TODO Pensar em como será a utilização do corregistro para o ASLTK (assume que já está alinhado? ou tenta alinhar imagens check_orientation?) + template_array = None + if isinstance(template_image, BrainAtlas): + template_file = template_image.get_atlas()['t1_data'] + template_array = load_image(template_file) + elif isinstance(template_image, str): + template_file = BrainAtlas(template_image).get_atlas()['t1_data'] + template_array = load_image(template_file) + # template_array = ants.image_read('/home/antonio/Imagens/loamri-samples/20240909/mni_2mm.nii.gz') + elif isinstance(template_image, np.ndarray): + template_array = template_image + else: + raise TypeError( + 'template_image must be a BrainAtlas object, a string with the atlas name, or a numpy array.' ) - raise ValueError(error_msg) - # Apply the rigid body registration to each volume (considering the ref_vol) - log_processing_step( - 'Applying rigid body registration', - f'using volume {ref_vol} as reference', + if moving_image.ndim != 3 or template_array.ndim != 3: + raise ValueError( + 'Both moving_image and template_image must be 3D arrays.' + ) + + corrected_moving_image = moving_image + orientation_transform = None + + # TODO VERIICAR SE CHECK_ORIENTATION ESTA CERTO... USAR sitk.FlipImageFilter usando a Origen da image (Slicer da certo assim) + if check_orientation: + ( + corrected_moving_image, + orientation_transform, + ) = check_and_fix_orientation( + moving_image, template_array, verbose=verbose + ) + if verbose and orientation_transform: + print(f'Applied orientation correction: {orientation_transform}') + + # Convert to ANTs images + + moving = ants.from_numpy(corrected_moving_image) + template = ants.from_numpy(template_array) + + # Load masks if provided + if isinstance(moving_mask, np.ndarray): + moving_mask = ants.from_numpy(moving_mask) + if isinstance(template_mask, np.ndarray): + template_mask = ants.from_numpy(template_mask) + + # TODO Vericicar se ants.registration consegue colocar o TransformInit como Centro de Massa!' + # Perform registration + registration = ants.registration( + fixed=template, + moving=moving, + type_of_transform=transform_type, + mask=moving_mask, + mask_fixed=template_mask, + **kwargs, # Additional parameters for ants.registration ) - corrected_vols = [] - trans_mtx = [] - ref_volume = total_vols[ref_vol] - - for idx, vol in enumerate(total_vols): - logger.debug(f'Correcting volume {idx}') - if verbose: - print(f'Correcting volume {idx}...', end='') - try: - corrected_vol, trans_m = rigid_body_registration(vol, ref_volume) - logger.debug(f'Volume {idx} registration successful') - except Exception as e: - warning_msg = f'Volume movement no handle by: {e}. Assuming the original data.' - log_warning_with_context(warning_msg, f'volume {idx}') - warnings.warn(warning_msg) - corrected_vol, trans_m = vol, np.eye(4) - - if verbose: - print('...finished.') - corrected_vols.append(corrected_vol) - trans_mtx.append(trans_m) - - # Rebuild the original ASLData object with the corrected volumes - log_processing_step('Rebuilding corrected volume data') - corrected_vols = np.stack(corrected_vols).reshape(orig_shape) - - logger.info( - f'Head movement correction completed successfully for {len(total_vols)} volumes' + + # Passing the warped image and forward transforms + return registration['warpedmovout'].numpy(), registration['fwdtransforms'] + + +def rigid_body_registration( + fixed_image: np.ndarray, + moving_image: np.ndarray, + moving_mask: np.ndarray = None, + template_mask: np.ndarray = None, +): + """ + Register two images using a rigid body transformation. This methods applies + a Euler 3D transformation in order to register the moving image to the + fixed image. + + Note: + The registration assumes that the moving image can be adjusted using + only rotation and translation, without any scaling or shearing. This + is suitable for cases in algiment among temporal volumes, such as in + ASL data, where the images are acquired in the same space and only + small movements are expected. + + Args: + fixed_image: np.ndarray + The fixed image as the reference space. + moving_image: np.ndarray + The moving image to be registered. + moving_mask: np.ndarray, optional + The mask of the moving image. If not provided, the moving image + will be used as the mask. + template_mask: np.ndarray, optional + The mask of the fixed image. If not provided, the fixed image + will be used as the mask. + + Raises: + Exception: fixed_image and moving_image must be a numpy array. + Exception: moving_mask must be a numpy array. + Exception: template_mask must be a numpy array. + + Returns + ------- + normalized_image : np.ndarray + The moving image transformed into the template image space. + transforms : list + A list of transformation mapping from moving to template space. + """ + if not isinstance(fixed_image, np.ndarray) or not isinstance( + moving_image, np.ndarray + ): + raise Exception('fixed_image and moving_image must be a numpy array.') + + if moving_mask is not None and not isinstance(moving_mask, np.ndarray): + raise Exception('moving_mask must be a numpy array.') + if template_mask is not None and not isinstance(template_mask, np.ndarray): + raise Exception('template_mask must be a numpy array.') + + normalized_image, trans_maps = space_normalization( + moving_image, + fixed_image, + transform_type='Rigid', + moving_mask=moving_mask, + template_mask=template_mask, + ) + + return normalized_image, trans_maps + + +def affine_registration( + fixed_image: np.ndarray, + moving_image: np.ndarray, + moving_mask: np.ndarray = None, + template_mask: np.ndarray = None, + fast_method: bool = True, +): + """ + Register two images using an affine transformation. This method applies + a 3D affine transformation in order to register the moving image to the + fixed image. + + Args: + fixed_image: np.ndarray + The fixed image as the reference space. + moving_image: np.ndarray + The moving image to be registered. + moving_mask: np.ndarray, optional + The mask of the moving image. If not provided, the moving image + will be used as the mask. + template_mask: np.ndarray, optional + The mask of the fixed image. If not provided, the fixed image + will be used as the mask. + + Raises: + Exception: fixed_image and moving_image must be a numpy array. + + Returns + ------- + resampled_image : np.ndarray + The moving image transformed into the template image space. + transformation_matrix : np.ndarray + The transformation matrix mapping from moving to template space. + """ + if not isinstance(fixed_image, np.ndarray) or not isinstance( + moving_image, np.ndarray + ): + raise Exception('fixed_image and moving_image must be a numpy array.') + if moving_mask is not None and not isinstance(moving_mask, np.ndarray): + raise Exception('moving_mask must be a numpy array.') + if template_mask is not None and not isinstance(template_mask, np.ndarray): + raise Exception('template_mask must be a numpy array.') + + affine_type = 'AffineFast' if fast_method else 'Affine' + warped_image, transformation_matrix = space_normalization( + moving_image, + fixed_image, + transform_type=affine_type, + moving_mask=moving_mask, + template_mask=template_mask, ) - # # Update the ASLData object with the corrected volumes - # asl_data.set_image(corrected_vols, 'pcasl') + return warped_image, transformation_matrix + + +def apply_transformation( + moving_image: np.ndarray, + reference_image: np.ndarray, + transforms: list, + **kwargs, +): + """ + Apply a transformation list set to an image. + + This method applies a list of transformations to a moving image + to align it with a reference image. The transformations are typically + obtained from a registration process, such as rigid or affine + registration. + + Note: + The `transforms` parameter should be a list of transformation matrices + obtained from a registration process. The transformations are applied + in the order they are provided in the list. + + Args: + image: np.ndarray + The image to be transformed. + reference_image: np.ndarray + The reference image to which the transformed image will be aligned. + If not provided, the original image will be used as the reference. + transforms: list + The transformation matrix list. + + Returns: + transformed_image: np.ndarray + The transformed image. + """ + # TODO handle kwargs for additional parameters based on ants.apply_transforms + if not isinstance(moving_image, np.ndarray): + raise TypeError('moving image must be numpy array.') + + if not isinstance(reference_image, (np.ndarray, BrainAtlas)): + raise TypeError( + 'reference_image must be a numpy array or a BrainAtlas object.' + ) + elif isinstance(reference_image, BrainAtlas): + reference_image = load_image(reference_image.get_atlas()['t1_data']) + + if not isinstance(transforms, list): + raise TypeError( + 'transforms must be a list of transformation matrices.' + ) + + corr_image = ants.apply_transforms( + fixed=ants.from_numpy(reference_image), + moving=ants.from_numpy(moving_image), + transformlist=transforms, + ) - return corrected_vols, trans_mtx + return corr_image.numpy() diff --git a/asltk/registration/asl_normalization.py b/asltk/registration/asl_normalization.py index e69de29..b2d1984 100644 --- a/asltk/registration/asl_normalization.py +++ b/asltk/registration/asl_normalization.py @@ -0,0 +1,295 @@ +import ants +import numpy as np +from rich.progress import Progress + +from asltk.asldata import ASLData +from asltk.data.brain_atlas import BrainAtlas +from asltk.registration import ( + apply_transformation, + rigid_body_registration, + space_normalization, +) +from asltk.utils.image_manipulation import ( + collect_data_volumes, + select_reference_volume, +) +from asltk.utils.image_statistics import ( + calculate_mean_intensity, + calculate_snr, +) +from asltk.utils.io import load_image + + +def asl_template_registration( + asl_data: ASLData, + asl_data_mask: np.ndarray = None, + atlas_name: str = 'MNI2009', + verbose: bool = False, +): + """ + Register ASL data to common atlas space. + + This function applies a elastic normalization to fit the subject head + space into the atlas template space. + + + Note: + This method takes in consideration the ASLData object, which contains + the pcasl and/or m0 image. The registration is performed using primarily + the `m0`image if available, otherwise it uses the `pcasl` image. + Therefore, choose wisely the `ref_vol` parameter, which should be a valid index + for the best `pcasl`volume reference to be registered to the atlas. + + Args: + asl_data: ASLData + The ASLData object containing the pcasl and/or m0 image to be corrected. + ref_vol: (int, optional) + The index of the reference volume to which all other volumes will be registered. + Defaults to 0. + asl_data_mask: np.ndarray + A single volume image mask. This can assist the normalization method to converge + into the atlas space. If not provided, the full image is adopted. + atlas_name: str + The atlas type to be considered. The BrainAtlas class is applied, then choose + the `atlas_name` based on the ASLtk brain atlas list. + verbose: (bool, optional) + If True, prints progress messages. Defaults to False. + + Raises: + TypeError: If the input is not an ASLData object. + ValueError: If ref_vol is not a valid index. + RuntimeError: If an error occurs during registration. + + Returns: + tuple: ASLData object with corrected volumes and a list of transformation matrices. + """ + if not isinstance(asl_data, ASLData): + raise TypeError('Input must be an ASLData object.') + + # if not isinstance(ref_vol, int) or ref_vol < 0: + # raise ValueError('ref_vol must be a non-negative integer.') + + total_vols, orig_shape = collect_data_volumes(asl_data('pcasl')) + # if ref_vol >= len(total_vols): + # raise ValueError( + # 'ref_vol must be a valid index based on the total ASL data volumes.' + # ) + + if asl_data('m0') is None: + raise ValueError( + 'M0 image is required for normalization. Please provide an ASLData with a valid M0 image.' + ) + + atlas = BrainAtlas(atlas_name) + # atlas_img = ants.image_read(atlas.get_atlas()['t1_data']).numpy() + atlas_img = load_image(atlas.get_atlas()['t1_data']) + + def norm_function(vol, _): + return space_normalization( + moving_image=vol, + template_image=atlas, + moving_mask=asl_data_mask, + template_mask=None, + transform_type='Affine', + check_orientation=True, + ) + + # Create a new ASLData to allocate the normalized image + new_asl = asl_data.copy() + + tmp_vol_list = [asl_data('m0')] + orig_shape = asl_data('m0').shape + + m0_vol_corrected, trans_m0_mtx = __apply_array_normalization( + tmp_vol_list, 0, norm_function + ) + new_asl.set_image(m0_vol_corrected[0], 'm0') + + # Apply the normalization transformation to all pcasl volumes + pcasl_vols, _ = collect_data_volumes(asl_data('pcasl')) + normalized_pcasl_vols = [] + with Progress() as progress: + task = progress.add_task( + '[green]Applying normalization to pcasl volumes...', + total=len(pcasl_vols), + ) + for vol in pcasl_vols: + norm_vol = apply_transformation( + moving_image=vol, + reference_image=atlas_img, + transforms=trans_m0_mtx, + ) + normalized_pcasl_vols.append(norm_vol) + progress.update(task, advance=1) + + new_asl.set_image(normalized_pcasl_vols, 'pcasl') + + return new_asl, trans_m0_mtx + + +def head_movement_correction( + asl_data: ASLData, + ref_vol: np.ndarray = None, + method: str = 'snr', + roi: np.ndarray = None, + verbose: bool = False, +): + """ + Correct head movement in ASL data using rigid body registration. + + This function applies rigid body registration to correct head movement + in ASL data. It registers each volume in the ASL data to a reference volume. + + Hence, it can be helpfull to correct for head movements that may have + occurred during the acquisition of ASL data. + Note: + The reference volume is selected based on the `ref_vol` parameter, + which should be a valid index of the total number of volumes in the ASL data. + The `ref_vol` value for 0 means that the first volume will be used as the reference. + + Args: + asl_data: ASLData) + The ASLData object containing the pcasl image to be corrected. + ref_vol: (np.ndarray, optional) + The reference volume to which all other volumes will be registered. + If not defined, the `m0` volume will be used. + In case the `m0` volume is not available, the volume is defined by the method parameter. + method: (str, optional) + The method to select the reference volume. Options are 'snr' or 'mean'. + If 'snr', the volume with the highest SNR is selected. + If 'mean', the volume with the highest mean signal is selected. + verbose: (bool, optional) + If True, prints progress messages. Defaults to False. + + Raises: + TypeError: _description_ + ValueError: _description_ + RuntimeError: _description_ + + Returns: + tuple: ASLData object with corrected volumes and a list of transformation matrices. + """ + + # Check if the input is a valid ASLData object. + if not isinstance(asl_data, ASLData): + raise TypeError('Input must be an ASLData object.') + + # Collect all the volumes in the pcasl image + total_vols, _ = collect_data_volumes(asl_data('pcasl')) + trans_proportions = _collect_transformation_proportions( + total_vols, method, roi + ) + + # If ref_vol is not provided, use the m0 volume or the first pcasl volume + ref_volume = None + if ref_vol is None: + if asl_data('m0') is not None: + ref_volume = asl_data('m0') + elif total_vols: + vol_from_method, _ = select_reference_volume( + asl_data, ref_vol, method=method + ) + ref_volume = vol_from_method + else: + raise ValueError( + 'No valid reference volume provided. Please provide a valid m0 or ASLData volume.' + ) + else: + ref_volume = ref_vol + + # Check if the reference volume is a valid volume. + if ( + not isinstance(ref_volume, np.ndarray) + or ref_volume.shape != total_vols[0].shape + ): + raise ValueError( + 'ref_vol must be a valid volume from the total asl data volumes.' + ) + + def norm_function(vol, ref_volume): + return rigid_body_registration(vol, ref_volume) + + corrected_vols, trans_mtx = __apply_array_normalization( + total_vols, ref_volume, norm_function, trans_proportions + ) + + new_asl_data = asl_data.copy() + # Create the new ASLData object with the corrected volumes + corrected_vols_array = np.array(corrected_vols).reshape( + asl_data('pcasl').shape + ) + new_asl_data.set_image(corrected_vols_array, 'pcasl') + + return new_asl_data, trans_mtx + + +# TODO Provavel que tenha que separar esse metodo para o asl_template_registration... revisar depois +def __apply_array_normalization( + total_vols, ref_vol, normalization_function, trans_proportions +): + corrected_vols = [] + trans_mtx = [] + with Progress() as progress: + task = progress.add_task( + '[green]Registering volumes...', total=len(total_vols) + ) + for idx, vol in enumerate(total_vols): + try: + _, trans_m = normalization_function(vol, ref_vol) + + # Adjust the transformation matrix + trans_path = trans_m[0] + t_matrix = ants.read_transform(trans_path) + params = t_matrix.parameters * trans_proportions[idx] + t_matrix.set_parameters(params) + ants.write_transform(t_matrix, trans_m[0]) + + corrected_vol = apply_transformation(vol, ref_vol, trans_m) + except Exception as e: + raise RuntimeError( + f'[red on white]Error during registration of volume {idx}: {e}[/]' + ) + + corrected_vols.append(corrected_vol) + trans_mtx.append(trans_m) + progress.update(task, advance=1) + + # Rebuild the original ASLData object with the corrected volumes + # orig_shape = orig_shape[1:4] + # corrected_vols = np.stack(corrected_vols).reshape(orig_shape) + + return corrected_vols, trans_mtx + + +def _collect_transformation_proportions(total_vols, method, roi): + """ + Collect method values to be used for matrix transformation balancing. + + Args: + total_vols (list): List of ASL volumes. + method (str): Method to use (in accordance to the `select_reference_volume`). + roi (np.ndarray): Region of interest mask. + + Returns: + list: List of calculated values based on the method. + """ + method_values = [] + for vol in total_vols: + if method == 'snr': + value = calculate_snr(vol, roi=roi) + elif method == 'mean': + value = calculate_mean_intensity(vol, roi=roi) + else: + raise ValueError(f'Unknown method: {method}') + method_values.append(value) + + min_val = np.min(method_values) + max_val = np.max(method_values) + if max_val == min_val: + trans_proportions = np.ones_like(method_values) + else: + trans_proportions = (np.array(method_values) - min_val) / ( + max_val - min_val + ) + + return trans_proportions diff --git a/asltk/registration/rigid.py b/asltk/registration/rigid.py deleted file mode 100644 index 8ea9cf5..0000000 --- a/asltk/registration/rigid.py +++ /dev/null @@ -1,144 +0,0 @@ -import numpy as np -import SimpleITK as sitk - - -def rigid_body_registration( - fixed_image: np.ndarray, - moving_image: np.ndarray, - interpolator=sitk.sitkLinear, - iterations: int = 5000, - converge_min: float = 1e-8, -): - """ - Register two images using a rigid body transformation. This methods applies - a Euler 3D transformation in order to register the moving image to the - fixed image. - - The optimization method used is the Gradient Descent. - - Note: - The registration process is based on the SimpleITK library. More details - on how the registration process works can be found at: [Registration Overview](https://simpleitk.readthedocs.io/en/master/registrationOverview.html) - - Args: - fixed_image (np.ndarray): The fixed image as the reference space. - moving_image (np.ndarray): The moving image to be registered. - interpolator (sitk.Interpolator, optional): The interpolation method used in the registration process. Defaults to sitk.sitkLinear. - - Raises: - Exception: fixed_image and moving_image must be a numpy array. - - Returns: - numpy.ndarray: The resampled image. - numpy.ndarray: The transformation matrix. - """ - - # Check if the fixed_image is a numpy array. - if not isinstance(fixed_image, np.ndarray) or not isinstance( - moving_image, np.ndarray - ): - raise Exception('fixed_image and moving_image must be a numpy array.') - - fixed_image = sitk.GetImageFromArray(fixed_image) - moving_image = sitk.GetImageFromArray(moving_image) - - # Create the registration method. - registration_method = sitk.ImageRegistrationMethod() - - # Initialize the registration method. - registration_transform = sitk.Euler3DTransform() - initial_transform = sitk.CenteredTransformInitializer( - fixed_image, - moving_image, - registration_transform, - sitk.CenteredTransformInitializerFilter.GEOMETRY, - ) - registration_method.SetInitialTransform(initial_transform) - - # Set the metric. - registration_method.SetMetricAsMattesMutualInformation( - numberOfHistogramBins=50 - ) - registration_method.SetMetricSamplingStrategy(registration_method.RANDOM) - registration_method.SetMetricSamplingPercentage(0.01) - - # Set the optimizer. - registration_method.SetOptimizerAsGradientDescent( - learningRate=1.0, - numberOfIterations=iterations, - convergenceMinimumValue=converge_min, - convergenceWindowSize=10, - ) - registration_method.SetOptimizerScalesFromPhysicalShift() - - # Set the interpolator. - registration_method.SetInterpolator(interpolator) - - # Execute the registration. - final_transform = registration_method.Execute(fixed_image, moving_image) - - # Convert the final transform to a numpy array. - transform_matrix = np.array(final_transform.GetMatrix()).reshape(3, 3) - - # Create a 4x4 transformation matrix. - transformation_matrix = np.eye(4) - transformation_matrix[:3, :3] = transform_matrix - transformation_matrix[:3, 3] = final_transform.GetTranslation() - - # Resample the moving image. - resampled_image = sitk.Resample( - moving_image, - fixed_image, - final_transform, - interpolator, - 0.0, - moving_image.GetPixelID(), - ) - - resampled_image = sitk.GetArrayFromImage(resampled_image) - return resampled_image, transformation_matrix - - -# def affine_registration(fixed_image: np.ndarray, moving_image: np.ndarray, interpolator=sitk.sitkLinear, iterations: int = 5000, converge_min: float = 1e-8): - -# # Check if the fixed_image is a numpy array. -# if not isinstance(fixed_image, np.ndarray) or not isinstance(moving_image, np.ndarray): -# raise Exception('fixed_image and moving_image must be a numpy array.') - -# fixed_image = sitk.GetImageFromArray(fixed_image) -# moving_image = sitk.GetImageFromArray(moving_image) - -# # Create the registration method. -# registration_method = sitk.ImageRegistrationMethod() - -# # Initialize the registration method. -# registration_transform = sitk.AffineTransform(3) -# initial_transform = sitk.CenteredTransformInitializer(fixed_image, moving_image, registration_transform, -# sitk.CenteredTransformInitializerFilter.GEOMETRY) -# registration_method.SetInitialTransform(initial_transform) - -# # Set the metric. -# registration_method.SetMetricAsMattesMutualInformation(numberOfHistogramBins=50) -# registration_method.SetMetricSamplingStrategy(registration_method.RANDOM) -# registration_method.SetMetricSamplingPercentage(0.01) - -# # Set the optimizer. -# registration_method.SetOptimizerAsGradientDescent(learningRate=1.0, numberOfIterations=iterations, -# convergenceMinimumValue=converge_min, convergenceWindowSize=10) -# registration_method.SetOptimizerScalesFromPhysicalShift() - -# # Set the interpolator. -# registration_method.SetInterpolator(interpolator) - -# # Execute the registration. -# final_transform = registration_method.Execute(fixed_image, moving_image) - -# # Convert the final transform to a numpy array. -# transformation_matrix = np.array(final_transform.GetMatrix()).reshape(3, 3) - -# # Resample the moving image. -# resampled_image = sitk.Resample(moving_image, fixed_image, final_transform, interpolator, 0.0, -# moving_image.GetPixelID()) - -# resampled_image = sitk.GetArrayFromImage(resampled_image) -# return resampled_image, transformation_matrix diff --git a/asltk/scripts/cbf.py b/asltk/scripts/cbf.py index f403a13..a21b5e7 100644 --- a/asltk/scripts/cbf.py +++ b/asltk/scripts/cbf.py @@ -10,7 +10,7 @@ from asltk.asldata import ASLData from asltk.reconstruction import CBFMapping -from asltk.utils import load_image, save_image +from asltk.utils.io import load_image, save_image parser = argparse.ArgumentParser( prog='CBF/ATT Mapping', diff --git a/asltk/scripts/dw_asl.py b/asltk/scripts/dw_asl.py index 53a47c6..22ddffb 100644 --- a/asltk/scripts/dw_asl.py +++ b/asltk/scripts/dw_asl.py @@ -14,7 +14,7 @@ log_processing_step, ) from asltk.reconstruction import MultiDW_ASLMapping -from asltk.utils import load_image, save_image +from asltk.utils.io import load_image, save_image warnings.filterwarnings('ignore', category=RuntimeWarning) diff --git a/asltk/scripts/generate_subtracted_asl_image.py b/asltk/scripts/generate_subtracted_asl_image.py index 8a4dd98..6710d32 100644 --- a/asltk/scripts/generate_subtracted_asl_image.py +++ b/asltk/scripts/generate_subtracted_asl_image.py @@ -9,7 +9,7 @@ from rich.progress import track from scipy.linalg import hadamard -from asltk.utils import load_image, save_image +from asltk.utils.io import load_image, save_image parser = argparse.ArgumentParser( prog='Generate Subtracted ASL Image', diff --git a/asltk/scripts/te_asl.py b/asltk/scripts/te_asl.py index f7b2e17..0da38dd 100644 --- a/asltk/scripts/te_asl.py +++ b/asltk/scripts/te_asl.py @@ -13,7 +13,7 @@ log_processing_step, ) from asltk.reconstruction import MultiTE_ASLMapping -from asltk.utils import load_image, save_image +from asltk.utils.io import load_image, save_image parser = argparse.ArgumentParser( prog='Multi-TE ASL Mapping', diff --git a/asltk/smooth/gaussian.py b/asltk/smooth/gaussian.py index a062f50..c449c50 100644 --- a/asltk/smooth/gaussian.py +++ b/asltk/smooth/gaussian.py @@ -3,7 +3,7 @@ import numpy as np import SimpleITK as sitk -from asltk.utils import collect_data_volumes +from asltk.utils.image_manipulation import collect_data_volumes def isotropic_gaussian(data, sigma: float = 1.0): diff --git a/asltk/smooth/median.py b/asltk/smooth/median.py index bbf8fce..ec874a0 100644 --- a/asltk/smooth/median.py +++ b/asltk/smooth/median.py @@ -3,7 +3,7 @@ import numpy as np from scipy.ndimage import median_filter -from asltk.utils import collect_data_volumes +from asltk.utils.image_manipulation import collect_data_volumes def isotropic_median(data, size: int = 3): diff --git a/asltk/utils/__init__.py b/asltk/utils/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/asltk/utils/image_manipulation.py b/asltk/utils/image_manipulation.py new file mode 100644 index 0000000..dbad775 --- /dev/null +++ b/asltk/utils/image_manipulation.py @@ -0,0 +1,655 @@ +import os +from typing import Dict, List, Optional, Tuple, Union + +import ants +import numpy as np +import SimpleITK as sitk +from rich import print + +from asltk.logging_config import get_logger +from asltk.utils.image_statistics import ( + analyze_image_properties, + calculate_mean_intensity, + calculate_snr, +) + +logger = get_logger(__name__) + +# Set SimpleITK to use half of available CPU cores (at least 1) +num_cores = max(1, os.cpu_count() // 4 if os.cpu_count() else 1) +sitk.ProcessObject_SetGlobalDefaultNumberOfThreads(num_cores) + + +def collect_data_volumes(data: np.ndarray): + """Collect the data volumes from a higher dimension array. + + This method is used to collect the data volumes from a higher dimension + array. The method assumes that the data is a 4D array, where the first + dimension is the number of volumes. The method will collect the volumes + and return a list of 3D arrays. + + The method is used to separate the 3D volumes from the higher dimension + array. This is useful when the user wants to apply a filter to each volume + separately. + + Args: + data (np.ndarray): The data to be separated. + + Returns: + list: A list of 3D arrays, each one representing a volume. + tuple: The original shape of the data. + """ + if not isinstance(data, np.ndarray): + raise TypeError('data is not a numpy array.') + + if data.ndim < 3: + raise ValueError('data is a 3D volume or higher dimensions') + + volumes = [] + # Calculate the number of volumes by multiplying all dimensions except the last three + num_volumes = int(np.prod(data.shape[:-3])) + reshaped_data = data.reshape((int(num_volumes),) + data.shape[-3:]) + for i in range(num_volumes): + volumes.append(reshaped_data[i]) + + return volumes, data.shape + + +def orientation_check( + moving_image: np.ndarray, fixed_image: np.ndarray, threshold: float = 0.1 +) -> Dict[str, any]: + """ + Quick orientation compatibility check between two images. + + This function provides a fast assessment of whether two images + have compatible orientations for registration without applying + any corrections. + + Parameters + ---------- + moving_image : np.ndarray + The moving image to be checked. + fixed_image : np.ndarray + The reference/fixed image. + threshold : float, optional + Correlation threshold to consider orientations compatible. Default is 0.1. + + Returns + ------- + dict + Dictionary containing: + - 'compatible': bool, whether orientations are compatible + - 'correlation': float, normalized correlation between images + - 'recommendation': str, action recommendation + """ + # Normalize images + moving_norm = _normalize_image_intensity(moving_image) + fixed_norm = _normalize_image_intensity(fixed_image) + + # Resize if needed for comparison + # Resize the larger image to match the smaller one to minimize memory overhead + if moving_norm.shape != fixed_norm.shape: + if np.prod(moving_norm.shape) > np.prod(fixed_norm.shape): + moving_norm = _resize_image_to_match(moving_norm, fixed_norm.shape) + else: + fixed_norm = _resize_image_to_match(fixed_norm, moving_norm.shape) + + # Compute correlation + correlation = _compute_normalized_correlation(moving_norm, fixed_norm) + + # Determine compatibility + compatible = correlation > threshold + + if compatible: + recommendation = 'Images appear to have compatible orientations. Registration should proceed normally.' + elif correlation > 0.05: + recommendation = 'Possible orientation mismatch detected. Consider using orientation correction.' + else: + recommendation = 'Strong orientation mismatch detected. Orientation correction is highly recommended.' + + return { + 'compatible': compatible, + 'correlation': correlation, + 'recommendation': recommendation, + } + + +# TODO Evaluate this method and decide if it is needed (or useful...) +# def preview_orientation_correction( +# moving_image: np.ndarray, +# fixed_image: np.ndarray, +# slice_index: Optional[int] = None +# ) -> Dict[str, np.ndarray]: +# """ +# Preview the effect of orientation correction on a specific slice. + +# This function shows the before and after effect of orientation +# correction on a 2D slice, useful for visual validation. + +# Parameters +# ---------- +# moving_image : np.ndarray +# The moving image to be corrected. +# fixed_image : np.ndarray +# The reference/fixed image. +# slice_index : int, optional +# Index of the axial slice to preview. If None, uses middle slice. + +# Returns +# ------- +# dict +# Dictionary containing: +# - 'original_slice': np.ndarray, original moving image slice +# - 'corrected_slice': np.ndarray, corrected moving image slice +# - 'fixed_slice': np.ndarray, corresponding fixed image slice +# - 'slice_index': int, the slice index used +# """ +# # Get orientation correction +# corrected_moving, _ = check_and_fix_orientation( +# moving_image, fixed_image, verbose=False +# ) + +# # Determine slice index +# if slice_index is None: +# slice_index = moving_image.shape[0] // 2 + +# # Ensure slice index is valid +# slice_index = max(0, min(slice_index, moving_image.shape[0] - 1)) +# corrected_slice_idx = max(0, min(slice_index, corrected_moving.shape[0] - 1)) +# fixed_slice_idx = max(0, min(slice_index, fixed_image.shape[0] - 1)) + +# return { +# 'original_slice': moving_image[slice_index, :, :], +# 'corrected_slice': corrected_moving[corrected_slice_idx, :, :], +# 'fixed_slice': fixed_image[fixed_slice_idx, :, :], +# 'slice_index': slice_index +# } + + +def check_and_fix_orientation( + moving_image: np.ndarray, + fixed_image: np.ndarray, + moving_spacing: tuple = None, + fixed_spacing: tuple = None, + verbose: bool = False, +): + """ + Check and fix orientation mismatches between moving and fixed images. + + This function analyzes the anatomical orientations of both images and + applies necessary transformations to align them before registration. + It handles common orientation issues like axial, sagittal, or coronal flips. + + The method uses both intensity-based and geometric approaches to determine + the best orientation alignment between images. + + Parameters + ---------- + moving_image : np.ndarray + The moving image that needs to be aligned. + fixed_image : np.ndarray + The reference/fixed image. + moving_spacing : tuple, optional + Voxel spacing for the moving image (x, y, z). If None, assumes isotropic. + fixed_spacing : tuple, optional + Voxel spacing for the fixed image (x, y, z). If None, assumes isotropic. + verbose : bool, optional + If True, prints detailed orientation analysis. Default is False. + + Returns + ------- + corrected_moving : np.ndarray + The moving image with corrected orientation. + orientation_transform : dict + Dictionary containing the applied transformations for reproducibility. + """ + if verbose: + print('Analyzing image orientations...') + + # Convert to SimpleITK images for orientation analysis + moving_sitk = sitk.GetImageFromArray(moving_image) + fixed_sitk = sitk.GetImageFromArray(fixed_image) + + # Set spacing if provided + if moving_spacing is not None: + moving_sitk.SetSpacing(moving_spacing) + if fixed_spacing is not None: + fixed_sitk.SetSpacing(fixed_spacing) + + # Get image dimensions and properties + moving_size = moving_sitk.GetSize() + fixed_size = fixed_sitk.GetSize() + + if verbose: + print(f'Moving image size: {moving_size}') + print(f'Fixed image size: {fixed_size}') + + # Analyze anatomical orientations using intensity patterns + orientation_transform = _analyze_anatomical_orientation( + moving_image, fixed_image, verbose + ) + + # Apply orientation corrections + corrected_moving = _apply_orientation_correction( + moving_image, orientation_transform, verbose + ) + + # Verify the correction using cross-correlation + if verbose: + original_corr = _compute_normalized_correlation( + moving_image, fixed_image + ) + corrected_corr = _compute_normalized_correlation( + corrected_moving, fixed_image + ) + print(f'Original correlation: {original_corr:.4f}') + print(f'Corrected correlation: {corrected_corr:.4f}') + if corrected_corr > original_corr: + print('Orientation correction improved alignment') + else: + print('Orientation correction may not have improved alignment') + + return corrected_moving, orientation_transform + + +def create_orientation_report( + moving_image: np.ndarray, + fixed_image: np.ndarray, + output_path: Optional[str] = None, +) -> str: + """ + Create a comprehensive orientation analysis report. + + Parameters + ---------- + moving_image : np.ndarray + The moving image to analyze. + fixed_image : np.ndarray + The reference/fixed image. + output_path : str, optional + Path to save the report. If None, returns the report as string. + + Returns + ------- + str + The orientation analysis report. + """ + # Perform analysis + quick_check = orientation_check(moving_image, fixed_image) + moving_props = analyze_image_properties(moving_image) + fixed_props = analyze_image_properties(fixed_image) + + # Get correction info + corrected_moving, orientation_transform = check_and_fix_orientation( + moving_image, fixed_image, verbose=False + ) + + # Generate report + report = f""" + ORIENTATION ANALYSIS REPORT + ============================ + + QUICK COMPATIBILITY CHECK: + - Orientation Compatible: {quick_check['compatible']} + - Correlation Score: {quick_check['correlation']:.4f} + - Recommendation: {quick_check['recommendation']} + + MOVING IMAGE PROPERTIES: + - Shape: {moving_props['shape']} + - Center of Mass: {moving_props['center_of_mass']} + - Intensity Range: {moving_props['intensity_stats']['min']:.2f} - {moving_props['intensity_stats']['max']:.2f} + - Mean Intensity: {moving_props['intensity_stats']['mean']:.2f} + + FIXED IMAGE PROPERTIES: + - Shape: {fixed_props['shape']} + - Center of Mass: {fixed_props['center_of_mass']} + - Intensity Range: {fixed_props['intensity_stats']['min']:.2f} - {fixed_props['intensity_stats']['max']:.2f} + - Mean Intensity: {fixed_props['intensity_stats']['mean']:.2f} + + ORIENTATION CORRECTION APPLIED: + - X-axis flip: {orientation_transform.get('flip_x', False)} + - Y-axis flip: {orientation_transform.get('flip_y', False)} + - Z-axis flip: {orientation_transform.get('flip_z', False)} + - Axis transpose: {orientation_transform.get('transpose_axes', 'None')} + + RECOMMENDATIONS: + {quick_check['recommendation']} + """.strip() + + if output_path: + with open(output_path, 'w') as f: + f.write(report) + print(f'Report saved to: {output_path}') + + return report + + +def select_reference_volume( + asl_data: Union['ASLData', list[np.ndarray]], + roi: np.ndarray = None, + method: str = 'snr', +): + from asltk.asldata import ASLData + + """ + Select a reference volume from the ASL data based on a specified method. + + Parameters + ---------- + asl_data : ASLData + The ASL data object containing the image volumes. + roi : np.ndarray, optional + Region of interest mask to limit the analysis. + method : str + The method to use for selecting the reference volume. Options are: + - 'snr': Select the volume with the highest signal-to-noise ratio. + - 'mean': Select the volume with the highest mean signal intensity. + + Returns + ------- + tuple[np.ndarray, int] + A tuple informing the selected reference volume and its index in the ASL `pcasl` data. + """ + if method not in ('snr', 'mean'): + raise ValueError(f'Invalid method: {method}') + + if roi is not None: + if not isinstance(roi, np.ndarray): + raise TypeError('ROI must be a numpy array.') + if roi.ndim != 3: + raise ValueError('ROI must be a 3D array.') + + if isinstance(asl_data, ASLData): + volumes, _ = collect_data_volumes(asl_data('pcasl')) + elif isinstance(asl_data, list) and all( + isinstance(vol, np.ndarray) for vol in asl_data + ): + volumes = asl_data + else: + raise TypeError( + 'asl_data must be an ASLData object or a list of numpy arrays.' + ) + + if method == 'snr': + logger.info('Estimating maximum SNR from provided volumes...') + ref_volume, vol_idx = _estimate_max_snr(volumes, roi=roi) + logger.info( + f'Selected volume index: {vol_idx} with SNR: {calculate_snr(ref_volume):.2f}' + ) + + elif method == 'mean': + logger.info('Estimating maximum mean from provided volumes...') + ref_volume, vol_idx = _estimate_max_mean(volumes, roi=roi) + logger.info( + f'Selected volume index: {vol_idx} with mean: {ref_volume.mean():.2f}' + ) + else: + raise ValueError(f'Unknown method: {method}') + + return ref_volume, vol_idx + + +def _estimate_max_snr( + volumes: List[np.ndarray], roi: np.ndarray = None +) -> Tuple[np.ndarray, int]: # pragma: no cover + """ + Estimate the maximum SNR from a list of volumes. + + Args: + volumes (List[np.ndarray]): A list of 3D numpy arrays representing the image volumes. + + Raises: + TypeError: If any volume is not a numpy array. + + Returns: + Tuple[np.ndarray, int]: The reference volume and its index. + """ + max_snr_idx = 0 + max_snr_value = 0 + for idx, vol in enumerate(volumes): + if not isinstance(vol, np.ndarray): + logger.error(f'Volume at index {idx} is not a numpy array.') + raise TypeError('All volumes must be numpy arrays.') + + snr_value = calculate_snr(vol, roi=roi) + if snr_value > max_snr_value: + max_snr_value = snr_value + max_snr_idx = idx + + ref_volume = volumes[max_snr_idx] + + return ref_volume, max_snr_idx + + +def _estimate_max_mean( + volumes: List[np.ndarray], roi: np.ndarray = None +) -> Tuple[np.ndarray, int]: + """ + Estimate the maximum mean from a list of volumes. + + Args: + volumes (List[np.ndarray]): A list of 3D numpy arrays representing the image volumes. + + Raises: + TypeError: If any volume is not a numpy array. + + Returns: + Tuple[np.ndarray, int]: The reference volume and its index. + """ + max_mean_idx = 0 + max_mean_value = 0 + for idx, vol in enumerate(volumes): + if not isinstance(vol, np.ndarray): + logger.error(f'Volume at index {idx} is not a numpy array.') + raise TypeError('All volumes must be numpy arrays.') + + mean_value = calculate_mean_intensity(vol, roi=roi) + if mean_value > max_mean_value: + max_mean_value = mean_value + max_mean_idx = idx + + ref_volume = volumes[max_mean_idx] + + return ref_volume, max_mean_idx + + +def _analyze_anatomical_orientation(moving_image, fixed_image, verbose=False): + """ + Analyze anatomical orientations by comparing intensity patterns + and geometric properties of brain images. + """ + orientation_transform = { + 'flip_x': False, + 'flip_y': False, + 'flip_z': False, + 'transpose_axes': None, + } + + # Normalize images for comparison + moving_norm = _normalize_image_intensity(moving_image) + fixed_norm = _normalize_image_intensity(fixed_image) + + # Determine the smaller shape for comparison + moving_size = np.prod(moving_norm.shape) + fixed_size = np.prod(fixed_norm.shape) + if moving_size <= fixed_size: + ref_shape = moving_norm.shape + else: + ref_shape = fixed_norm.shape + + # Test different orientation combinations + best_corr = -1 + best_transform = orientation_transform.copy() + + # Test axis flips + for flip_x in [False, True]: + for flip_y in [False, True]: + for flip_z in [False, True]: + # Apply test transformation + test_img = moving_norm.copy() + if flip_x: + test_img = np.flip(test_img, axis=2) # X axis + if flip_y: + test_img = np.flip(test_img, axis=1) # Y axis + if flip_z: + test_img = np.flip(test_img, axis=0) # Z axis + + # Resize to match reference shape if needed + if test_img.shape != ref_shape: + test_img = _resize_image_to_match(test_img, ref_shape) + + # Also resize fixed_norm if needed + ref_img = fixed_norm + if fixed_norm.shape != ref_shape: + ref_img = _resize_image_to_match(fixed_norm, ref_shape) + + # Compute correlation + corr = _compute_normalized_correlation(test_img, ref_img) + + if corr > best_corr: + best_corr = corr + best_transform = { + 'flip_x': flip_x, + 'flip_y': flip_y, + 'flip_z': flip_z, + 'transpose_axes': None, + } + + if verbose: + print( + f'Flip X:{flip_x}, Y:{flip_y}, Z:{flip_z} -> Correlation: {corr:.4f}' + ) + + # Test common axis permutations for different acquisition orientations + axis_permutations = [ + (0, 1, 2), # Original + (0, 2, 1), # Swap Y-Z + (1, 0, 2), # Swap X-Y + (1, 2, 0), # Rotate axes + (2, 0, 1), # Rotate axes + (2, 1, 0), # Swap X-Z + ] + + for axes in axis_permutations[1:]: # Skip original + try: + test_img = np.transpose(moving_norm, axes) + # Resize to match reference shape if needed + if test_img.shape != ref_shape: + test_img = _resize_image_to_match(test_img, ref_shape) + + # Also resize fixed_norm if needed + ref_img = fixed_norm + if fixed_norm.shape != ref_shape: + ref_img = _resize_image_to_match(fixed_norm, ref_shape) + + corr = _compute_normalized_correlation(test_img, ref_img) + + if corr > best_corr: + best_corr = corr + best_transform = { + 'flip_x': False, + 'flip_y': False, + 'flip_z': False, + 'transpose_axes': axes, + } + + if verbose: + print(f'Transpose {axes} -> Correlation: {corr:.4f}') + except Exception as e: + if verbose: + print(f'Failed transpose {axes}: {e}') + continue + + if verbose: + print(f'Best orientation transform: {best_transform}') + print(f'Best correlation: {best_corr:.4f}') + + return best_transform + + +def _apply_orientation_correction(image, orientation_transform, verbose=False): + """Apply the determined orientation corrections to the image.""" + corrected = image.copy() + + # Apply axis transposition first if needed + if orientation_transform['transpose_axes'] is not None: + corrected = np.transpose( + corrected, orientation_transform['transpose_axes'] + ) + if verbose: + print( + f"Applied transpose: {orientation_transform['transpose_axes']}" + ) + + # Apply axis flips + if orientation_transform['flip_x']: + corrected = np.flip(corrected, axis=2) + if verbose: + print('Applied X-axis flip') + + if orientation_transform['flip_y']: + corrected = np.flip(corrected, axis=1) + if verbose: + print('Applied Y-axis flip') + + if orientation_transform['flip_z']: + corrected = np.flip(corrected, axis=0) + if verbose: + print('Applied Z-axis flip') + + return corrected + + +def _normalize_image_intensity(image): + """Normalize image intensity to [0, 1] range for comparison.""" + img = image.astype(np.float64) + img_min, img_max = np.min(img), np.max(img) + if img_max > img_min: + img = (img - img_min) / (img_max - img_min) + return img + + +def _resize_image_to_match(source_image, resample_shape): + """Resize source image to match target shape using antsPy (ants).""" + + # Convert numpy array to ANTsImage (assume float32 for compatibility) + ants_img = ants.from_numpy(source_image.astype(np.float32)) + + # Resample to target shape + resampled_img = ants.resample_image( + ants_img, resample_shape, use_voxels=True, interp_type=0 + ) + + # Convert back to numpy array + return resampled_img.numpy() + + +def _compute_normalized_correlation(img1, img2): + """Compute normalized cross-correlation between two images.""" + # Ensure same shape + if img1.shape != img2.shape: + return -1 + + # Flatten images + img1_flat = img1.flatten() + img2_flat = img2.flatten() + + # Remove NaN and infinite values + valid_mask = np.isfinite(img1_flat) & np.isfinite(img2_flat) + if np.sum(valid_mask) == 0: + return -1 + + img1_valid = img1_flat[valid_mask] + img2_valid = img2_flat[valid_mask] + + # Compute correlation coefficient + try: + corr_matrix = np.corrcoef(img1_valid, img2_valid) + correlation = corr_matrix[0, 1] + if np.isnan(correlation): + return -1 + return abs( + correlation + ) # Use absolute value for orientation independence + except: + return -1 diff --git a/asltk/utils/image_statistics.py b/asltk/utils/image_statistics.py new file mode 100644 index 0000000..71b0585 --- /dev/null +++ b/asltk/utils/image_statistics.py @@ -0,0 +1,167 @@ +from typing import Dict + +import numpy as np +from scipy.ndimage import center_of_mass + + +def calculate_snr(image: np.ndarray, roi: np.ndarray = None) -> float: + """ + Calculate the Signal-to-Noise Ratio (SNR) of a medical image. + + It is assumed the absolute value for SNR, i.e., SNR = |mean_signal| / |std_noise|. + + Parameters + ---------- + image : np.ndarray + The image to analyze. + + Returns + ------- + float + The SNR value of the image. + """ + if not isinstance(image, np.ndarray): + raise ValueError('Input must be a numpy array.') + + if roi is not None: + if not isinstance(roi, np.ndarray): + raise ValueError('ROI must be a numpy array.') + if roi.shape != image.shape: + raise ValueError('ROI shape must match image shape.') + + image_roi = image[roi > 0] + mean_signal = np.mean(image_roi) + noise = image_roi - mean_signal + else: + mean_signal = np.mean(image) + noise = image - mean_signal + + try: + snr = mean_signal / np.std(noise) + except ZeroDivisionError: + snr = float('inf') # If noise is zero, SNR is infinite + + return float(abs(snr)) if not np.isnan(snr) else 0.0 + + +def calculate_mean_intensity( + image: np.ndarray, roi: np.ndarray = None +) -> float: + """ + Calculate the mean intensity of a medical image. + + Parameters + ---------- + image : np.ndarray + The image to analyze. + + roi : np.ndarray, optional + Region of interest (ROI) mask. If provided, only the ROI will be considered. + + Returns + ------- + float + The mean intensity value of the image or ROI. + """ + if not isinstance(image, np.ndarray): + raise ValueError('Input must be a numpy array.') + + if roi is not None: + if not isinstance(roi, np.ndarray): + raise ValueError('ROI must be a numpy array.') + if roi.shape != image.shape: + raise ValueError('ROI shape must match image shape.') + + # Compute mean intensity + if roi is not None: + return float(abs(np.mean(image[roi > 0]))) # Only consider ROI + return float(abs(np.mean(image))) + + +def analyze_image_properties(image: np.ndarray) -> Dict[str, any]: + """ + Analyze basic properties of a medical image for orientation assessment. + + Parameters + ---------- + image : np.ndarray + The image to analyze. + + Returns + ------- + dict + Dictionary containing image properties: + - 'shape': tuple, image dimensions + - 'center_of_mass': tuple, center of mass coordinates + - 'intensity_stats': dict, intensity statistics + - 'symmetry_axes': dict, symmetry analysis for each axis + """ + # Basic properties + shape = image.shape + + # Center of mass + try: + + com = center_of_mass(image > np.mean(image)) + except ImportError: # pragma: no cover + # Fallback calculation without scipy + coords = np.argwhere(image > np.mean(image)) + com = np.mean(coords, axis=0) if len(coords) > 0 else (0, 0, 0) + + # Intensity statistics + intensity_stats = { + 'min': float(np.min(image)), + 'max': float(np.max(image)), + 'mean': float(np.mean(image)), + 'std': float(np.std(image)), + 'median': float(np.median(image)), + } + + # Symmetry analysis + symmetry_axes = {} + for axis in range(3): + # Flip along axis and compare + flipped = np.flip(image, axis=axis) + correlation = _compute_correlation_simple(image, flipped) + symmetry_axes[f'axis_{axis}'] = { + 'symmetry_correlation': correlation, + 'likely_symmetric': correlation > 0.8, + } + + return { + 'shape': shape, + 'center_of_mass': com, + 'intensity_stats': intensity_stats, + 'symmetry_axes': symmetry_axes, + } + + +def _compute_correlation_simple( + img1: np.ndarray, img2: np.ndarray +) -> float: # pragma: no cover + """Simple correlation computation without external dependencies.""" + img1_flat = img1.flatten() + img2_flat = img2.flatten() + + if len(img1_flat) != len(img2_flat): + return 0.0 + + # Remove NaN values + valid_mask = np.isfinite(img1_flat) & np.isfinite(img2_flat) + if np.sum(valid_mask) < 2: + return 0.0 + + img1_valid = img1_flat[valid_mask] + img2_valid = img2_flat[valid_mask] + + # Compute correlation + mean1, mean2 = np.mean(img1_valid), np.mean(img2_valid) + std1, std2 = np.std(img1_valid), np.std(img2_valid) + + if std1 == 0 or std2 == 0: + return 0.0 + + correlation = np.mean((img1_valid - mean1) * (img2_valid - mean2)) / ( + std1 * std2 + ) + return abs(correlation) diff --git a/asltk/utils.py b/asltk/utils/io.py similarity index 76% rename from asltk/utils.py rename to asltk/utils/io.py index 7d37f1a..d3f6ec9 100644 --- a/asltk/utils.py +++ b/asltk/utils/io.py @@ -9,59 +9,13 @@ from asltk import AVAILABLE_IMAGE_FORMATS, BIDS_IMAGE_FORMATS -def _check_input_path(full_path: str): - if not os.path.exists(full_path): - raise FileNotFoundError(f'The file {full_path} does not exist.') - - -def _get_file_from_folder_layout( - full_path: str, - subject: str = None, - session: str = None, - modality: str = None, - suffix: str = None, -): - selected_file = None - layout = BIDSLayout(full_path) - if all(param is None for param in [subject, session, modality, suffix]): - for root, _, files in os.walk(full_path): - for file in files: - if '_asl' in file and file.endswith(BIDS_IMAGE_FORMATS): - selected_file = os.path.join(root, file) - else: - layout_files = layout.files.keys() - matching_files = [] - for f in layout_files: - search_pattern = '' - if subject: - search_pattern = f'*sub-*{subject}*' - if session: - search_pattern += search_pattern + f'*ses-*{session}' - if modality: - search_pattern += search_pattern + f'*{modality}*' - if suffix: - search_pattern += search_pattern + f'*{suffix}*' - - if fnmatch.fnmatch(f, search_pattern) and f.endswith( - BIDS_IMAGE_FORMATS - ): - matching_files.append(f) - - if not matching_files: - raise FileNotFoundError( - f'ASL image file is missing for subject {subject} in directory {full_path}' - ) - selected_file = matching_files[0] - - return selected_file - - def load_image( full_path: str, subject: str = None, session: str = None, modality: str = None, suffix: str = None, + **kwargs, ): """ Load an image file from a BIDS directory or file using the SimpleITK API. @@ -121,19 +75,25 @@ def load_image( numpy.ndarray: The loaded image array. """ _check_input_path(full_path) + img = None if full_path.endswith(AVAILABLE_IMAGE_FORMATS): # If the full path is a file, then load the image directly - img = sitk.ReadImage(full_path) - return sitk.GetArrayFromImage(img) + img = sitk.GetArrayFromImage(sitk.ReadImage(full_path)) + else: + # If the full path is a directory, then use BIDSLayout to find the file + selected_file = _get_file_from_folder_layout( + full_path, subject, session, modality, suffix + ) + img = sitk.GetArrayFromImage(sitk.ReadImage(selected_file)) - # Check if the full path is a directory using BIDS structure - selected_file = _get_file_from_folder_layout( - full_path, subject, session, modality, suffix - ) + # Check if there are additional parameters + if kwargs.get('average_m0', False): + # If average_m0 is True, then average the M0 image + if img.ndim > 3: + img = np.mean(img, axis=0) - img = sitk.ReadImage(selected_file) - return sitk.GetArrayFromImage(img) + return img def _make_bids_path( @@ -306,85 +266,48 @@ def load_asl_data(fullpath: str): return dill.load(open(fullpath, 'rb')) -def collect_data_volumes(data: np.ndarray): - """ - Collect the data volumes from a higher dimension array. - - This method is used to collect the data volumes from a higher dimension - array. The method works with 4D or 5D arrays, where the volumes are - separated along the higher dimensions. The method will collect the volumes - and return a list of 3D arrays. - - Note: - If the input is already 3D, the function returns a list with a single volume (the input itself). - For 4D or 5D arrays with singleton dimensions, the output list will contain all possible 3D volumes, even if some are identical or empty. - - The method is useful when you want to: - - Apply filters to each volume separately - - Process multi-echo or multi-b-value ASL data - - Separate time series data into individual volumes - - Prepare data for volume-wise analysis - - Args: - data (np.ndarray): The data to be separated. Must be at least 3D. - - Returns: - tuple: A tuple containing: - - list: A list of 3D arrays, each one representing a volume. - - tuple: The original shape of the data. - - Examples: - Separate 4D ASL data into individual volumes: - >>> from asltk.asldata import ASLData - >>> asl_data = ASLData(pcasl='./tests/files/pcasl_mte.nii.gz', m0='./tests/files/m0.nii.gz') - >>> pcasl_4d = asl_data('pcasl') - >>> volumes, original_shape = collect_data_volumes(pcasl_4d) - >>> len(volumes) # Number of volumes - 56 - >>> volumes[0].shape # Shape of each volume - (5, 35, 35) - >>> original_shape # Original 5D shape - (8, 7, 5, 35, 35) +def _check_input_path(full_path: str): + if not os.path.exists(full_path): + raise FileNotFoundError(f'The file {full_path} does not exist.') - Process each volume separately: - >>> volumes, shape = collect_data_volumes(pcasl_4d) - >>> # Apply processing to first volume - >>> processed_vol = volumes[0] * 1.5 # Example processing - >>> processed_vol.shape - (5, 35, 35) - Work with 3D data (single volume): - >>> import numpy as np - >>> single_vol = np.random.rand(10, 20, 30) - >>> volumes, shape = collect_data_volumes(single_vol) - >>> len(volumes) - 1 - >>> volumes[0].shape - (10, 20, 30) - - Edge case: 4D with singleton dimension - >>> arr = np.random.rand(1, 10, 20, 30) - >>> vols, shape = collect_data_volumes(arr) - >>> len(vols) - 1 - >>> vols[0].shape - (10, 20, 30) - - Raises: - TypeError: If data is not a numpy array. - ValueError: If data has less than 3 dimensions. - """ - if not isinstance(data, np.ndarray): - raise TypeError('data is not a numpy array.') +def _get_file_from_folder_layout( + full_path: str, + subject: str = None, + session: str = None, + modality: str = None, + suffix: str = None, +): + selected_file = None + layout = BIDSLayout(full_path) + if all(param is None for param in [subject, session, modality, suffix]): + for root, _, files in os.walk(full_path): + for file in files: + if '_asl' in file and file.endswith(BIDS_IMAGE_FORMATS): + selected_file = os.path.join(root, file) + else: + layout_files = layout.files.keys() + matching_files = [] + for f in layout_files: + search_pattern = '' + if subject: + search_pattern = f'*sub-*{subject}*' + if session: + search_pattern += search_pattern + f'*ses-*{session}' + if modality: + search_pattern += search_pattern + f'*{modality}*' + if suffix: + search_pattern += search_pattern + f'*{suffix}*' - if data.ndim < 3: - raise ValueError('data is a 3D volume or higher dimensions') + if fnmatch.fnmatch(f, search_pattern) and f.endswith( + BIDS_IMAGE_FORMATS + ): + matching_files.append(f) - volumes = [] - # Calculate the number of volumes by multiplying all dimensions except the last three - num_volumes = int(np.prod(data.shape[:-3])) - reshaped_data = data.reshape((int(num_volumes),) + data.shape[-3:]) - for i in range(num_volumes): - volumes.append(reshaped_data[i]) + if not matching_files: + raise FileNotFoundError( + f'ASL image file is missing for subject {subject} in directory {full_path}' + ) + selected_file = matching_files[0] - return volumes, data.shape + return selected_file diff --git a/docs/api/data.md b/docs/api/data.md new file mode 100644 index 0000000..d791c87 --- /dev/null +++ b/docs/api/data.md @@ -0,0 +1,12 @@ +# Brain Atlas + +The Brain Atlas module provides tools and data structures for representing, manipulating, and analyzing brain region information. It serves as a foundational component for working with anatomical brain atlases, enabling users to access region metadata, and collect atlas data into neuroimaging workflows. + +Use this module to facilitate research and development tasks that require standardized brain region definitions and mappings. + +## Note + +This module is intended for use with standardized brain atlases and may require adaptation for custom or non-standard datasets. Refer to the official documentation for integration guidelines and best practices. + + +::: data.brain_atlas \ No newline at end of file diff --git a/docs/api/reports.md b/docs/api/reports.md new file mode 100644 index 0000000..649bbca --- /dev/null +++ b/docs/api/reports.md @@ -0,0 +1,12 @@ +# Brain Parcellation Reports + +The Parcellation Report module provides utilities for generating detailed reports on brain parcellation results. These reports help users summarize, visualize, and interpret the outcomes of brain region segmentation and labeling processes. The module supports integration with the Brain Atlas workflow, enabling streamlined analysis and documentation of parcellation data. + +Use this module to create standardized, reproducible reports that facilitate communication and comparison of parcellation results across studies. + +## Note + +The `reports` module provides quantitative pipelines designed to deliver clear and concise views of scientific data. Refer to the documentation for each available report to determine which method best suits your application. + + +::: data.reports \ No newline at end of file diff --git a/docs/getting_started.md b/docs/getting_started.md index cb8e063..8392f2a 100644 --- a/docs/getting_started.md +++ b/docs/getting_started.md @@ -80,7 +80,7 @@ As as standard notation, the `asltk` library assumes that all the image data fil 1. Loading and saving an image ```python -from asltk.utils import load_image, save_image +from asltk.utils.io import load_image, save_image img = load_image('path/to/pcasl.nii.gz') #Loading an image type(img) diff --git a/examples/orientation_checking_examples.py b/examples/orientation_checking_examples.py index e69de29..93d99d2 100644 --- a/examples/orientation_checking_examples.py +++ b/examples/orientation_checking_examples.py @@ -0,0 +1,237 @@ +# #!/usr/bin/env python3 +# """ +# Example script demonstrating orientation checking and correction for image registration. + +# This script shows how to use the new orientation checking utilities in ASLtk +# to detect and fix orientation mismatches between medical images before registration. +# """ + +# import numpy as np + +# from asltk.data.brain_atlas import BrainAtlas +# from asltk.registration import ( +# check_and_fix_orientation, +# create_orientation_report, +# orientation_check, +# preview_orientation_correction, +# space_normalization, +# ) +# from asltk.utils.io import load_image + + +# def example_basic_orientation_check(): +# """Basic example of orientation checking between two images.""" +# print('=== Basic Orientation Check Example ===') + +# # Load your moving and fixed images +# # Replace these paths with your actual image files +# try: +# moving_image = load_image('path/to/your/moving_image.nii.gz') +# fixed_image = load_image('path/to/your/fixed_image.nii.gz') +# except: +# # Create synthetic example data for demonstration +# print('Using synthetic data for demonstration...') +# moving_image = np.random.rand(64, 64, 64) * 100 +# # Create a flipped version to simulate orientation mismatch +# fixed_image = np.flip(moving_image, axis=0) # Flip axial axis +# print('Created synthetic moving and fixed images with axial flip') + +# # Quick compatibility check +# compatibility = orientation_check(moving_image, fixed_image) +# print(f"Orientation compatible: {compatibility['compatible']}") +# print(f"Correlation score: {compatibility['correlation']:.4f}") +# print(f"Recommendation: {compatibility['recommendation']}") + +# # Check and fix orientation +# corrected_moving, transform = check_and_fix_orientation( +# moving_image, fixed_image, verbose=True +# ) + +# print(f'Applied transformations: {transform}') +# print(f'Original shape: {moving_image.shape}') +# print(f'Corrected shape: {corrected_moving.shape}') + +# return moving_image, fixed_image, corrected_moving, transform + + +# def example_registration_with_orientation_check(): +# """Example showing registration with automatic orientation checking.""" +# print('\n=== Registration with Orientation Check Example ===') + +# try: +# # Load your ASL M0 image +# moving_image = load_image('path/to/your/m0_image.nii.gz') + +# # Load brain atlas as template +# atlas = BrainAtlas('MNI2009') + +# print('Performing registration with automatic orientation checking...') + +# # Register with orientation checking enabled (default) +# normalized_image, transforms = space_normalization( +# moving_image=moving_image, +# template_image=atlas, +# transform_type='Affine', +# check_orientation=True, # Enable orientation checking +# orientation_verbose=True, # Show detailed orientation analysis +# ) + +# print('Registration completed successfully!') +# print(f'Original image shape: {moving_image.shape}') +# print(f'Normalized image shape: {normalized_image.shape}') + +# return normalized_image, transforms + +# except Exception as e: +# print(f'Registration example failed (likely missing data): {e}') +# print("This is normal if you don't have the required image files.") +# return None, None + + +# def example_detailed_orientation_analysis(): +# """Example showing detailed orientation analysis and reporting.""" +# print('\n=== Detailed Orientation Analysis Example ===') + +# # Create synthetic data with known orientation mismatch +# original_image = np.random.rand(32, 64, 48) * 100 + +# # Apply various transformations to simulate orientation mismatches +# flipped_axial = np.flip(original_image, axis=0) # Axial flip +# flipped_sagittal = np.flip(original_image, axis=2) # Sagittal flip +# transposed = np.transpose(original_image, (1, 0, 2)) # Transpose X-Y + +# test_cases = [ +# ('Original vs Axial Flip', original_image, flipped_axial), +# ('Original vs Sagittal Flip', original_image, flipped_sagittal), +# ('Original vs Transposed', original_image, transposed), +# ] + +# for case_name, moving, fixed in test_cases: +# print(f'\n--- {case_name} ---') + +# # Quick check +# compatibility = orientation_check(moving, fixed) +# print(f"Compatible: {compatibility['compatible']}") +# print(f"Correlation: {compatibility['correlation']:.4f}") + +# # Detailed correction +# corrected, transform = check_and_fix_orientation( +# moving, fixed, verbose=True +# ) + +# print(f'Applied transform: {transform}') + +# # Generate report +# report = create_orientation_report(moving, fixed) +# print('Generated orientation report (first 200 chars):') +# print(report[:200] + '...') + + +# def example_manual_orientation_workflow(): +# """Example showing manual workflow for orientation checking.""" +# print('\n=== Manual Orientation Workflow Example ===') + +# # Simulate loading images +# moving_image = np.random.rand(64, 64, 32) * 100 +# fixed_image = np.flip(moving_image, axis=1) # Y-axis flip + +# print('Step 1: Quick orientation compatibility check') +# compatibility = orientation_check(moving_image, fixed_image) +# print(f'Result: {compatibility}') + +# if not compatibility['compatible']: +# print('\nStep 2: Preview orientation correction') +# preview = preview_orientation_correction(moving_image, fixed_image) +# print(f"Preview generated for slice {preview['slice_index']}") +# print(f"Original slice shape: {preview['original_slice'].shape}") +# print(f"Corrected slice shape: {preview['corrected_slice'].shape}") + +# print('\nStep 3: Apply orientation correction') +# corrected_moving, transform = check_and_fix_orientation( +# moving_image, fixed_image, verbose=True +# ) + +# print('\nStep 4: Verify improvement') +# post_correction_check = orientation_check( +# corrected_moving, fixed_image +# ) +# print(f'Post-correction compatibility: {post_correction_check}') + +# improvement = ( +# post_correction_check['correlation'] - compatibility['correlation'] +# ) +# print(f'Correlation improvement: {improvement:.4f}') + +# return corrected_moving, transform +# else: +# print('Images are already compatible - no correction needed') +# return moving_image, None + + +# def example_advanced_usage(): +# """Advanced usage examples and tips.""" +# print('\n=== Advanced Usage Tips ===') + +# # Tip 1: Handling spacing information +# print('Tip 1: Including voxel spacing for better orientation analysis') +# moving_image = np.random.rand(64, 64, 32) * 100 +# fixed_image = np.random.rand(64, 64, 32) * 100 + +# # Voxel spacing in mm (x, y, z) +# moving_spacing = (1.0, 1.0, 3.0) # Typical ASL spacing +# fixed_spacing = (1.0, 1.0, 1.0) # Typical T1 spacing + +# corrected_moving, transform = check_and_fix_orientation( +# moving_image, +# fixed_image, +# moving_spacing=moving_spacing, +# fixed_spacing=fixed_spacing, +# verbose=True, +# ) + +# # Tip 2: Disabling orientation check for speed +# print('\nTip 2: Disabling orientation check when not needed') +# print('Use check_orientation=False in space_normalization() for speed') + +# # Tip 3: Batch processing +# print('\nTip 3: For batch processing, check compatibility first') +# print('Use orientation_check() to identify problematic cases') + +# # Tip 4: Error handling +# print('\nTip 4: Always handle potential errors in orientation checking') +# try: +# # This might fail with incompatible shapes +# incompatible_moving = np.random.rand(100, 50, 25) +# incompatible_fixed = np.random.rand(32, 32, 32) + +# result = orientation_check(incompatible_moving, incompatible_fixed) +# print(f'Handled incompatible shapes: {result}') +# except Exception as e: +# print(f'Caught expected error: {e}') + + +# if __name__ == '__main__': +# print('ASLtk Orientation Checking Examples') +# print('=' * 50) + +# # Run all examples +# try: +# example_basic_orientation_check() +# example_registration_with_orientation_check() +# example_detailed_orientation_analysis() +# example_manual_orientation_workflow() +# example_advanced_usage() + +# print('\n' + '=' * 50) +# print('All examples completed successfully!') +# print('\nNext steps:') +# print('1. Replace the synthetic data with your actual image files') +# print( +# '2. Integrate orientation checking into your registration workflow' +# ) +# print('3. Use the orientation utilities for quality control') + +# except Exception as e: +# print(f'\nExample execution failed: {e}') +# print('This is likely due to missing dependencies or data files.') +# print('Please ensure you have the required packages installed.') diff --git a/mkdocs.yml b/mkdocs.yml index 16ee8e5..d3750c4 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -44,6 +44,8 @@ nav: - 'getting_started.md' - 'examples/workflow_examples.md' - 'faq.md' + - 'api/data.md' + - 'api/reports.md' - 'api/asldata.md' - 'api/reconstruction.md' - 'api/utils.md' diff --git a/poetry.lock b/poetry.lock index 29289ce..5f6fab4 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,4 +1,41 @@ -# This file is automatically @generated by Poetry 1.8.3 and should not be changed by hand. +# This file is automatically @generated by Poetry 2.1.3 and should not be changed by hand. + +[[package]] +name = "antspyx" +version = "0.5.4" +description = "A fast medical imaging analysis library in Python with algorithms for registration, segmentation, and more." +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "antspyx-0.5.4-cp310-cp310-macosx_12_0_x86_64.whl", hash = "sha256:42037a9dc9b02ac0d0393ba98cb945723b489f49709dceb0b51cf02447f44577"}, + {file = "antspyx-0.5.4-cp310-cp310-macosx_14_0_arm64.whl", hash = "sha256:64c9f633aa94297ae379c7a1220dab3e399d5ff6c617efff30f90ef1802be6ef"}, + {file = "antspyx-0.5.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ffdb471fcc805f4f9d689d19253ea2993c646552738cb1a8a68b8d8017e57820"}, + {file = "antspyx-0.5.4-cp310-cp310-win_amd64.whl", hash = "sha256:b45198c5792e456d21943fefd37625849d496a07a5264281dfa999ec21581fe0"}, + {file = "antspyx-0.5.4-cp311-cp311-macosx_12_0_x86_64.whl", hash = "sha256:299a890ccbe17616834a6e637ea11f25f03d299bc537e5148583859151368d20"}, + {file = "antspyx-0.5.4-cp311-cp311-macosx_14_0_arm64.whl", hash = "sha256:b3a3269c6989cbe543c3a8e0f47f03ed4e3d61e851881b9cd7f402bce6bbc8cd"}, + {file = "antspyx-0.5.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:37235e75f7c361f96e83333f05a979e494eebf47ae69e9f9f8da924c8cc88f37"}, + {file = "antspyx-0.5.4-cp311-cp311-win_amd64.whl", hash = "sha256:15c29fdd2975cc9905cc55d494faf1c7e7d2d56310805c4019623c4e04e8b678"}, + {file = "antspyx-0.5.4-cp312-cp312-macosx_12_0_x86_64.whl", hash = "sha256:314cf1b7a690c4254c3569254db5e1752baf620c8cd49835a0115a53d85cb498"}, + {file = "antspyx-0.5.4-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:1856de7cd182667443743ecb2efc6977bbd1e66b739178b330650c759d57df4a"}, + {file = "antspyx-0.5.4-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b61cdc43459fc1038f9eea65d148be630fc442c0753ef6d484c0ec6d435d74de"}, + {file = "antspyx-0.5.4-cp312-cp312-win_amd64.whl", hash = "sha256:7705628994baf094e0ec240c0fe3ed6424edc81cca9add4011a368972dd96215"}, + {file = "antspyx-0.5.4-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8499989a797ba36428b21fad916246c08e11dd0870e58ccc71c728e392598b3f"}, + {file = "antspyx-0.5.4-cp39-cp39-macosx_12_0_x86_64.whl", hash = "sha256:9c495073f09154ed1b3f32fab2026efd8c9549d2002beefc35386dea7136676d"}, + {file = "antspyx-0.5.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:39b830bfd33aef433f13742ea5b83244f1bd76a85fe33cd54ab953f3d13d15e0"}, + {file = "antspyx-0.5.4-cp39-cp39-win_amd64.whl", hash = "sha256:ed10b48d19d34aeda3ea31f1b1d43996612e4e294c29183aeb9b97a3b6e860dd"}, + {file = "antspyx-0.5.4.tar.gz", hash = "sha256:893e5b45175e278c1e5ab2c4c663f4dbcb08098ee842ba35eca73d785f263749"}, +] + +[package.dependencies] +matplotlib = "*" +numpy = "<=2.0.1" +pandas = "*" +Pillow = "*" +pyyaml = "*" +requests = "*" +statsmodels = "*" +webcolors = "*" [[package]] name = "astor" @@ -6,6 +43,7 @@ version = "0.8.1" description = "Read/rewrite/write Python ASTs" optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,>=2.7" +groups = ["main"] files = [ {file = "astor-0.8.1-py2.py3-none-any.whl", hash = "sha256:070a54e890cefb5b3739d19f30f5a5ec840ffc9c50ffa7d23cc9fc1a38ebbfc5"}, {file = "astor-0.8.1.tar.gz", hash = "sha256:6a6effda93f4e1ce9f618779b2dd1d9d84f1e32812c23a29b3fff6fd7f63fa5e"}, @@ -17,18 +55,19 @@ version = "24.2.0" description = "Classes Without Boilerplate" optional = false python-versions = ">=3.7" +groups = ["main"] files = [ {file = "attrs-24.2.0-py3-none-any.whl", hash = "sha256:81921eb96de3191c8258c199618104dd27ac608d9366f5e35d011eae1867ede2"}, {file = "attrs-24.2.0.tar.gz", hash = "sha256:5cfb1b9148b5b086569baec03f20d7b6bf3bcacc9a42bebf87ffaaca362f6346"}, ] [package.extras] -benchmark = ["cloudpickle", "hypothesis", "mypy (>=1.11.1)", "pympler", "pytest (>=4.3.0)", "pytest-codspeed", "pytest-mypy-plugins", "pytest-xdist[psutil]"] -cov = ["cloudpickle", "coverage[toml] (>=5.3)", "hypothesis", "mypy (>=1.11.1)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"] -dev = ["cloudpickle", "hypothesis", "mypy (>=1.11.1)", "pre-commit", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"] +benchmark = ["cloudpickle ; platform_python_implementation == \"CPython\"", "hypothesis", "mypy (>=1.11.1) ; platform_python_implementation == \"CPython\" and python_version >= \"3.9\"", "pympler", "pytest (>=4.3.0)", "pytest-codspeed", "pytest-mypy-plugins ; platform_python_implementation == \"CPython\" and python_version >= \"3.9\" and python_version < \"3.13\"", "pytest-xdist[psutil]"] +cov = ["cloudpickle ; platform_python_implementation == \"CPython\"", "coverage[toml] (>=5.3)", "hypothesis", "mypy (>=1.11.1) ; platform_python_implementation == \"CPython\" and python_version >= \"3.9\"", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins ; platform_python_implementation == \"CPython\" and python_version >= \"3.9\" and python_version < \"3.13\"", "pytest-xdist[psutil]"] +dev = ["cloudpickle ; platform_python_implementation == \"CPython\"", "hypothesis", "mypy (>=1.11.1) ; platform_python_implementation == \"CPython\" and python_version >= \"3.9\"", "pre-commit", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins ; platform_python_implementation == \"CPython\" and python_version >= \"3.9\" and python_version < \"3.13\"", "pytest-xdist[psutil]"] docs = ["cogapp", "furo", "myst-parser", "sphinx", "sphinx-notfound-page", "sphinxcontrib-towncrier", "towncrier (<24.7)"] -tests = ["cloudpickle", "hypothesis", "mypy (>=1.11.1)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"] -tests-mypy = ["mypy (>=1.11.1)", "pytest-mypy-plugins"] +tests = ["cloudpickle ; platform_python_implementation == \"CPython\"", "hypothesis", "mypy (>=1.11.1) ; platform_python_implementation == \"CPython\" and python_version >= \"3.9\"", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins ; platform_python_implementation == \"CPython\" and python_version >= \"3.9\" and python_version < \"3.13\"", "pytest-xdist[psutil]"] +tests-mypy = ["mypy (>=1.11.1) ; platform_python_implementation == \"CPython\" and python_version >= \"3.9\"", "pytest-mypy-plugins ; platform_python_implementation == \"CPython\" and python_version >= \"3.9\" and python_version < \"3.13\""] [[package]] name = "babel" @@ -36,6 +75,7 @@ version = "2.16.0" description = "Internationalization utilities" optional = false python-versions = ">=3.8" +groups = ["doc"] files = [ {file = "babel-2.16.0-py3-none-any.whl", hash = "sha256:368b5b98b37c06b7daf6696391c3240c938b37767d4584413e8438c5c435fa8b"}, {file = "babel-2.16.0.tar.gz", hash = "sha256:d1f3554ca26605fe173f3de0c65f750f5a42f924499bf134de6423582298e316"}, @@ -50,6 +90,7 @@ version = "1.14.7.post0" description = "Validator for the Brain Imaging Data Structure" optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "bids_validator-1.14.7.post0-py3-none-any.whl", hash = "sha256:a1ee196eae8e5cf3b3fe9fd1985e03997e3e21a40ea3bcb494ff1e0dcec86a89"}, {file = "bids_validator-1.14.7.post0.tar.gz", hash = "sha256:e6005a500b75f8a961593fb67d46085107dadb116f59a5c3b524aa0697945b66"}, @@ -64,6 +105,7 @@ version = "0.11.3.post3" description = "Python tools for working with the BIDS schema." optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "bidsschematools-0.11.3.post3-py3-none-any.whl", hash = "sha256:37bc00f9c31c48dca0bd8bf3825f8f5026b499b22fe6c553843255b7fd8653db"}, {file = "bidsschematools-0.11.3.post3.tar.gz", hash = "sha256:18630d0045bf83205a76e56eccf379fbd8661d8a7e5d02701bc29005e56429f7"}, @@ -87,6 +129,7 @@ version = "22.1.0" description = "The uncompromising code formatter." optional = false python-versions = ">=3.6.2" +groups = ["dev"] files = [ {file = "black-22.1.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:1297c63b9e1b96a3d0da2d85d11cd9bf8664251fd69ddac068b98dc4f34f73b6"}, {file = "black-22.1.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:2ff96450d3ad9ea499fc4c60e425a1439c2120cbbc1ab959ff20f7c76ec7e866"}, @@ -133,6 +176,7 @@ version = "0.9.1" description = "Blue -- Some folks like black but I prefer blue." optional = false python-versions = "*" +groups = ["dev"] files = [ {file = "blue-0.9.1-py3-none-any.whl", hash = "sha256:037742c072c58a2ff024f59fb9164257b907f97f8f862008db3b013d1f27cc22"}, {file = "blue-0.9.1.tar.gz", hash = "sha256:76b4f26884a8425042356601d80773db6e0e14bebaa7a59d7c54bf8cef2e2af5"}, @@ -148,6 +192,7 @@ version = "2024.8.30" description = "Python package for providing Mozilla's CA Bundle." optional = false python-versions = ">=3.6" +groups = ["main", "doc"] files = [ {file = "certifi-2024.8.30-py3-none-any.whl", hash = "sha256:922820b53db7a7257ffbda3f597266d435245903d80737e34f8a45ff3e3230d8"}, {file = "certifi-2024.8.30.tar.gz", hash = "sha256:bec941d2aa8195e248a60b31ff9f0558284cf01a52591ceda73ea9afffd69fd9"}, @@ -159,6 +204,7 @@ version = "3.4.0" description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." optional = false python-versions = ">=3.7.0" +groups = ["main", "doc"] files = [ {file = "charset_normalizer-3.4.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:4f9fc98dad6c2eaa32fc3af1417d95b5e3d08aff968df0cd320066def971f9a6"}, {file = "charset_normalizer-3.4.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0de7b687289d3c1b3e8660d0741874abe7888100efe14bd0f9fd7141bcbda92b"}, @@ -273,6 +319,7 @@ version = "8.1.7" description = "Composable command line interface toolkit" optional = false python-versions = ">=3.7" +groups = ["main", "dev", "doc"] files = [ {file = "click-8.1.7-py3-none-any.whl", hash = "sha256:ae74fb96c20a0277a1d615f1e4d73c8414f5a98db8b799a7931d1582f3390c28"}, {file = "click-8.1.7.tar.gz", hash = "sha256:ca9853ad459e787e2192211578cc907e7594e294c7ccc834310722b41b9ca6de"}, @@ -287,10 +334,176 @@ version = "0.4.6" description = "Cross-platform colored terminal text." optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" +groups = ["main", "dev", "doc"] files = [ {file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"}, {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"}, ] +markers = {main = "platform_system == \"Windows\""} + +[[package]] +name = "contourpy" +version = "1.3.0" +description = "Python library for calculating contours of 2D quadrilateral grids" +optional = false +python-versions = ">=3.9" +groups = ["main"] +markers = "python_version < \"3.11\"" +files = [ + {file = "contourpy-1.3.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:880ea32e5c774634f9fcd46504bf9f080a41ad855f4fef54f5380f5133d343c7"}, + {file = "contourpy-1.3.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:76c905ef940a4474a6289c71d53122a4f77766eef23c03cd57016ce19d0f7b42"}, + {file = "contourpy-1.3.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:92f8557cbb07415a4d6fa191f20fd9d2d9eb9c0b61d1b2f52a8926e43c6e9af7"}, + {file = "contourpy-1.3.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:36f965570cff02b874773c49bfe85562b47030805d7d8360748f3eca570f4cab"}, + {file = "contourpy-1.3.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cacd81e2d4b6f89c9f8a5b69b86490152ff39afc58a95af002a398273e5ce589"}, + {file = "contourpy-1.3.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:69375194457ad0fad3a839b9e29aa0b0ed53bb54db1bfb6c3ae43d111c31ce41"}, + {file = "contourpy-1.3.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:7a52040312b1a858b5e31ef28c2e865376a386c60c0e248370bbea2d3f3b760d"}, + {file = "contourpy-1.3.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:3faeb2998e4fcb256542e8a926d08da08977f7f5e62cf733f3c211c2a5586223"}, + {file = "contourpy-1.3.0-cp310-cp310-win32.whl", hash = "sha256:36e0cff201bcb17a0a8ecc7f454fe078437fa6bda730e695a92f2d9932bd507f"}, + {file = "contourpy-1.3.0-cp310-cp310-win_amd64.whl", hash = "sha256:87ddffef1dbe5e669b5c2440b643d3fdd8622a348fe1983fad7a0f0ccb1cd67b"}, + {file = "contourpy-1.3.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:0fa4c02abe6c446ba70d96ece336e621efa4aecae43eaa9b030ae5fb92b309ad"}, + {file = "contourpy-1.3.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:834e0cfe17ba12f79963861e0f908556b2cedd52e1f75e6578801febcc6a9f49"}, + {file = "contourpy-1.3.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dbc4c3217eee163fa3984fd1567632b48d6dfd29216da3ded3d7b844a8014a66"}, + {file = "contourpy-1.3.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4865cd1d419e0c7a7bf6de1777b185eebdc51470800a9f42b9e9decf17762081"}, + {file = "contourpy-1.3.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:303c252947ab4b14c08afeb52375b26781ccd6a5ccd81abcdfc1fafd14cf93c1"}, + {file = "contourpy-1.3.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:637f674226be46f6ba372fd29d9523dd977a291f66ab2a74fbeb5530bb3f445d"}, + {file = "contourpy-1.3.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:76a896b2f195b57db25d6b44e7e03f221d32fe318d03ede41f8b4d9ba1bff53c"}, + {file = "contourpy-1.3.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:e1fd23e9d01591bab45546c089ae89d926917a66dceb3abcf01f6105d927e2cb"}, + {file = "contourpy-1.3.0-cp311-cp311-win32.whl", hash = "sha256:d402880b84df3bec6eab53cd0cf802cae6a2ef9537e70cf75e91618a3801c20c"}, + {file = "contourpy-1.3.0-cp311-cp311-win_amd64.whl", hash = "sha256:6cb6cc968059db9c62cb35fbf70248f40994dfcd7aa10444bbf8b3faeb7c2d67"}, + {file = "contourpy-1.3.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:570ef7cf892f0afbe5b2ee410c507ce12e15a5fa91017a0009f79f7d93a1268f"}, + {file = "contourpy-1.3.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:da84c537cb8b97d153e9fb208c221c45605f73147bd4cadd23bdae915042aad6"}, + {file = "contourpy-1.3.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0be4d8425bfa755e0fd76ee1e019636ccc7c29f77a7c86b4328a9eb6a26d0639"}, + {file = "contourpy-1.3.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9c0da700bf58f6e0b65312d0a5e695179a71d0163957fa381bb3c1f72972537c"}, + {file = "contourpy-1.3.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:eb8b141bb00fa977d9122636b16aa67d37fd40a3d8b52dd837e536d64b9a4d06"}, + {file = "contourpy-1.3.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3634b5385c6716c258d0419c46d05c8aa7dc8cb70326c9a4fb66b69ad2b52e09"}, + {file = "contourpy-1.3.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:0dce35502151b6bd35027ac39ba6e5a44be13a68f55735c3612c568cac3805fd"}, + {file = "contourpy-1.3.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:aea348f053c645100612b333adc5983d87be69acdc6d77d3169c090d3b01dc35"}, + {file = "contourpy-1.3.0-cp312-cp312-win32.whl", hash = "sha256:90f73a5116ad1ba7174341ef3ea5c3150ddf20b024b98fb0c3b29034752c8aeb"}, + {file = "contourpy-1.3.0-cp312-cp312-win_amd64.whl", hash = "sha256:b11b39aea6be6764f84360fce6c82211a9db32a7c7de8fa6dd5397cf1d079c3b"}, + {file = "contourpy-1.3.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:3e1c7fa44aaae40a2247e2e8e0627f4bea3dd257014764aa644f319a5f8600e3"}, + {file = "contourpy-1.3.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:364174c2a76057feef647c802652f00953b575723062560498dc7930fc9b1cb7"}, + {file = "contourpy-1.3.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:32b238b3b3b649e09ce9aaf51f0c261d38644bdfa35cbaf7b263457850957a84"}, + {file = "contourpy-1.3.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d51fca85f9f7ad0b65b4b9fe800406d0d77017d7270d31ec3fb1cc07358fdea0"}, + {file = "contourpy-1.3.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:732896af21716b29ab3e988d4ce14bc5133733b85956316fb0c56355f398099b"}, + {file = "contourpy-1.3.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d73f659398a0904e125280836ae6f88ba9b178b2fed6884f3b1f95b989d2c8da"}, + {file = "contourpy-1.3.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:c6c7c2408b7048082932cf4e641fa3b8ca848259212f51c8c59c45aa7ac18f14"}, + {file = "contourpy-1.3.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:f317576606de89da6b7e0861cf6061f6146ead3528acabff9236458a6ba467f8"}, + {file = "contourpy-1.3.0-cp313-cp313-win32.whl", hash = "sha256:31cd3a85dbdf1fc002280c65caa7e2b5f65e4a973fcdf70dd2fdcb9868069294"}, + {file = "contourpy-1.3.0-cp313-cp313-win_amd64.whl", hash = "sha256:4553c421929ec95fb07b3aaca0fae668b2eb5a5203d1217ca7c34c063c53d087"}, + {file = "contourpy-1.3.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:345af746d7766821d05d72cb8f3845dfd08dd137101a2cb9b24de277d716def8"}, + {file = "contourpy-1.3.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:3bb3808858a9dc68f6f03d319acd5f1b8a337e6cdda197f02f4b8ff67ad2057b"}, + {file = "contourpy-1.3.0-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:420d39daa61aab1221567b42eecb01112908b2cab7f1b4106a52caaec8d36973"}, + {file = "contourpy-1.3.0-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4d63ee447261e963af02642ffcb864e5a2ee4cbfd78080657a9880b8b1868e18"}, + {file = "contourpy-1.3.0-cp313-cp313t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:167d6c890815e1dac9536dca00828b445d5d0df4d6a8c6adb4a7ec3166812fa8"}, + {file = "contourpy-1.3.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:710a26b3dc80c0e4febf04555de66f5fd17e9cf7170a7b08000601a10570bda6"}, + {file = "contourpy-1.3.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:75ee7cb1a14c617f34a51d11fa7524173e56551646828353c4af859c56b766e2"}, + {file = "contourpy-1.3.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:33c92cdae89ec5135d036e7218e69b0bb2851206077251f04a6c4e0e21f03927"}, + {file = "contourpy-1.3.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:a11077e395f67ffc2c44ec2418cfebed032cd6da3022a94fc227b6faf8e2acb8"}, + {file = "contourpy-1.3.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:e8134301d7e204c88ed7ab50028ba06c683000040ede1d617298611f9dc6240c"}, + {file = "contourpy-1.3.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e12968fdfd5bb45ffdf6192a590bd8ddd3ba9e58360b29683c6bb71a7b41edca"}, + {file = "contourpy-1.3.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fd2a0fc506eccaaa7595b7e1418951f213cf8255be2600f1ea1b61e46a60c55f"}, + {file = "contourpy-1.3.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4cfb5c62ce023dfc410d6059c936dcf96442ba40814aefbfa575425a3a7f19dc"}, + {file = "contourpy-1.3.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:68a32389b06b82c2fdd68276148d7b9275b5f5cf13e5417e4252f6d1a34f72a2"}, + {file = "contourpy-1.3.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:94e848a6b83da10898cbf1311a815f770acc9b6a3f2d646f330d57eb4e87592e"}, + {file = "contourpy-1.3.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:d78ab28a03c854a873787a0a42254a0ccb3cb133c672f645c9f9c8f3ae9d0800"}, + {file = "contourpy-1.3.0-cp39-cp39-win32.whl", hash = "sha256:81cb5ed4952aae6014bc9d0421dec7c5835c9c8c31cdf51910b708f548cf58e5"}, + {file = "contourpy-1.3.0-cp39-cp39-win_amd64.whl", hash = "sha256:14e262f67bd7e6eb6880bc564dcda30b15e351a594657e55b7eec94b6ef72843"}, + {file = "contourpy-1.3.0-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:fe41b41505a5a33aeaed2a613dccaeaa74e0e3ead6dd6fd3a118fb471644fd6c"}, + {file = "contourpy-1.3.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eca7e17a65f72a5133bdbec9ecf22401c62bcf4821361ef7811faee695799779"}, + {file = "contourpy-1.3.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:1ec4dc6bf570f5b22ed0d7efba0dfa9c5b9e0431aeea7581aa217542d9e809a4"}, + {file = "contourpy-1.3.0-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:00ccd0dbaad6d804ab259820fa7cb0b8036bda0686ef844d24125d8287178ce0"}, + {file = "contourpy-1.3.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8ca947601224119117f7c19c9cdf6b3ab54c5726ef1d906aa4a69dfb6dd58102"}, + {file = "contourpy-1.3.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:c6ec93afeb848a0845a18989da3beca3eec2c0f852322efe21af1931147d12cb"}, + {file = "contourpy-1.3.0.tar.gz", hash = "sha256:7ffa0db17717a8ffb127efd0c95a4362d996b892c2904db72428d5b52e1938a4"}, +] + +[package.dependencies] +numpy = ">=1.23" + +[package.extras] +bokeh = ["bokeh", "selenium"] +docs = ["furo", "sphinx (>=7.2)", "sphinx-copybutton"] +mypy = ["contourpy[bokeh,docs]", "docutils-stubs", "mypy (==1.11.1)", "types-Pillow"] +test = ["Pillow", "contourpy[test-no-images]", "matplotlib"] +test-no-images = ["pytest", "pytest-cov", "pytest-rerunfailures", "pytest-xdist", "wurlitzer"] + +[[package]] +name = "contourpy" +version = "1.3.2" +description = "Python library for calculating contours of 2D quadrilateral grids" +optional = false +python-versions = ">=3.10" +groups = ["main"] +markers = "python_version >= \"3.11\"" +files = [ + {file = "contourpy-1.3.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:ba38e3f9f330af820c4b27ceb4b9c7feee5fe0493ea53a8720f4792667465934"}, + {file = "contourpy-1.3.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:dc41ba0714aa2968d1f8674ec97504a8f7e334f48eeacebcaa6256213acb0989"}, + {file = "contourpy-1.3.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9be002b31c558d1ddf1b9b415b162c603405414bacd6932d031c5b5a8b757f0d"}, + {file = "contourpy-1.3.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8d2e74acbcba3bfdb6d9d8384cdc4f9260cae86ed9beee8bd5f54fee49a430b9"}, + {file = "contourpy-1.3.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e259bced5549ac64410162adc973c5e2fb77f04df4a439d00b478e57a0e65512"}, + {file = "contourpy-1.3.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ad687a04bc802cbe8b9c399c07162a3c35e227e2daccf1668eb1f278cb698631"}, + {file = "contourpy-1.3.2-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:cdd22595308f53ef2f891040ab2b93d79192513ffccbd7fe19be7aa773a5e09f"}, + {file = "contourpy-1.3.2-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:b4f54d6a2defe9f257327b0f243612dd051cc43825587520b1bf74a31e2f6ef2"}, + {file = "contourpy-1.3.2-cp310-cp310-win32.whl", hash = "sha256:f939a054192ddc596e031e50bb13b657ce318cf13d264f095ce9db7dc6ae81c0"}, + {file = "contourpy-1.3.2-cp310-cp310-win_amd64.whl", hash = "sha256:c440093bbc8fc21c637c03bafcbef95ccd963bc6e0514ad887932c18ca2a759a"}, + {file = "contourpy-1.3.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6a37a2fb93d4df3fc4c0e363ea4d16f83195fc09c891bc8ce072b9d084853445"}, + {file = "contourpy-1.3.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:b7cd50c38f500bbcc9b6a46643a40e0913673f869315d8e70de0438817cb7773"}, + {file = "contourpy-1.3.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d6658ccc7251a4433eebd89ed2672c2ed96fba367fd25ca9512aa92a4b46c4f1"}, + {file = "contourpy-1.3.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:70771a461aaeb335df14deb6c97439973d253ae70660ca085eec25241137ef43"}, + {file = "contourpy-1.3.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:65a887a6e8c4cd0897507d814b14c54a8c2e2aa4ac9f7686292f9769fcf9a6ab"}, + {file = "contourpy-1.3.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3859783aefa2b8355697f16642695a5b9792e7a46ab86da1118a4a23a51a33d7"}, + {file = "contourpy-1.3.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:eab0f6db315fa4d70f1d8ab514e527f0366ec021ff853d7ed6a2d33605cf4b83"}, + {file = "contourpy-1.3.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:d91a3ccc7fea94ca0acab82ceb77f396d50a1f67412efe4c526f5d20264e6ecd"}, + {file = "contourpy-1.3.2-cp311-cp311-win32.whl", hash = "sha256:1c48188778d4d2f3d48e4643fb15d8608b1d01e4b4d6b0548d9b336c28fc9b6f"}, + {file = "contourpy-1.3.2-cp311-cp311-win_amd64.whl", hash = "sha256:5ebac872ba09cb8f2131c46b8739a7ff71de28a24c869bcad554477eb089a878"}, + {file = "contourpy-1.3.2-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:4caf2bcd2969402bf77edc4cb6034c7dd7c0803213b3523f111eb7460a51b8d2"}, + {file = "contourpy-1.3.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:82199cb78276249796419fe36b7386bd8d2cc3f28b3bc19fe2454fe2e26c4c15"}, + {file = "contourpy-1.3.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:106fab697af11456fcba3e352ad50effe493a90f893fca6c2ca5c033820cea92"}, + {file = "contourpy-1.3.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d14f12932a8d620e307f715857107b1d1845cc44fdb5da2bc8e850f5ceba9f87"}, + {file = "contourpy-1.3.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:532fd26e715560721bb0d5fc7610fce279b3699b018600ab999d1be895b09415"}, + {file = "contourpy-1.3.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f26b383144cf2d2c29f01a1e8170f50dacf0eac02d64139dcd709a8ac4eb3cfe"}, + {file = "contourpy-1.3.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:c49f73e61f1f774650a55d221803b101d966ca0c5a2d6d5e4320ec3997489441"}, + {file = "contourpy-1.3.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:3d80b2c0300583228ac98d0a927a1ba6a2ba6b8a742463c564f1d419ee5b211e"}, + {file = "contourpy-1.3.2-cp312-cp312-win32.whl", hash = "sha256:90df94c89a91b7362e1142cbee7568f86514412ab8a2c0d0fca72d7e91b62912"}, + {file = "contourpy-1.3.2-cp312-cp312-win_amd64.whl", hash = "sha256:8c942a01d9163e2e5cfb05cb66110121b8d07ad438a17f9e766317bcb62abf73"}, + {file = "contourpy-1.3.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:de39db2604ae755316cb5967728f4bea92685884b1e767b7c24e983ef5f771cb"}, + {file = "contourpy-1.3.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:3f9e896f447c5c8618f1edb2bafa9a4030f22a575ec418ad70611450720b5b08"}, + {file = "contourpy-1.3.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:71e2bd4a1c4188f5c2b8d274da78faab884b59df20df63c34f74aa1813c4427c"}, + {file = "contourpy-1.3.2-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:de425af81b6cea33101ae95ece1f696af39446db9682a0b56daaa48cfc29f38f"}, + {file = "contourpy-1.3.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:977e98a0e0480d3fe292246417239d2d45435904afd6d7332d8455981c408b85"}, + {file = "contourpy-1.3.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:434f0adf84911c924519d2b08fc10491dd282b20bdd3fa8f60fd816ea0b48841"}, + {file = "contourpy-1.3.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:c66c4906cdbc50e9cba65978823e6e00b45682eb09adbb78c9775b74eb222422"}, + {file = "contourpy-1.3.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:8b7fc0cd78ba2f4695fd0a6ad81a19e7e3ab825c31b577f384aa9d7817dc3bef"}, + {file = "contourpy-1.3.2-cp313-cp313-win32.whl", hash = "sha256:15ce6ab60957ca74cff444fe66d9045c1fd3e92c8936894ebd1f3eef2fff075f"}, + {file = "contourpy-1.3.2-cp313-cp313-win_amd64.whl", hash = "sha256:e1578f7eafce927b168752ed7e22646dad6cd9bca673c60bff55889fa236ebf9"}, + {file = "contourpy-1.3.2-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:0475b1f6604896bc7c53bb070e355e9321e1bc0d381735421a2d2068ec56531f"}, + {file = "contourpy-1.3.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:c85bb486e9be652314bb5b9e2e3b0d1b2e643d5eec4992c0fbe8ac71775da739"}, + {file = "contourpy-1.3.2-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:745b57db7758f3ffc05a10254edd3182a2a83402a89c00957a8e8a22f5582823"}, + {file = "contourpy-1.3.2-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:970e9173dbd7eba9b4e01aab19215a48ee5dd3f43cef736eebde064a171f89a5"}, + {file = "contourpy-1.3.2-cp313-cp313t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c6c4639a9c22230276b7bffb6a850dfc8258a2521305e1faefe804d006b2e532"}, + {file = "contourpy-1.3.2-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cc829960f34ba36aad4302e78eabf3ef16a3a100863f0d4eeddf30e8a485a03b"}, + {file = "contourpy-1.3.2-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:d32530b534e986374fc19eaa77fcb87e8a99e5431499949b828312bdcd20ac52"}, + {file = "contourpy-1.3.2-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:e298e7e70cf4eb179cc1077be1c725b5fd131ebc81181bf0c03525c8abc297fd"}, + {file = "contourpy-1.3.2-cp313-cp313t-win32.whl", hash = "sha256:d0e589ae0d55204991450bb5c23f571c64fe43adaa53f93fc902a84c96f52fe1"}, + {file = "contourpy-1.3.2-cp313-cp313t-win_amd64.whl", hash = "sha256:78e9253c3de756b3f6a5174d024c4835acd59eb3f8e2ca13e775dbffe1558f69"}, + {file = "contourpy-1.3.2-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:fd93cc7f3139b6dd7aab2f26a90dde0aa9fc264dbf70f6740d498a70b860b82c"}, + {file = "contourpy-1.3.2-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:107ba8a6a7eec58bb475329e6d3b95deba9440667c4d62b9b6063942b61d7f16"}, + {file = "contourpy-1.3.2-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:ded1706ed0c1049224531b81128efbd5084598f18d8a2d9efae833edbd2b40ad"}, + {file = "contourpy-1.3.2-pp311-pypy311_pp73-macosx_10_15_x86_64.whl", hash = "sha256:5f5964cdad279256c084b69c3f412b7801e15356b16efa9d78aa974041903da0"}, + {file = "contourpy-1.3.2-pp311-pypy311_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:49b65a95d642d4efa8f64ba12558fcb83407e58a2dfba9d796d77b63ccfcaff5"}, + {file = "contourpy-1.3.2-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:8c5acb8dddb0752bf252e01a3035b21443158910ac16a3b0d20e7fed7d534ce5"}, + {file = "contourpy-1.3.2.tar.gz", hash = "sha256:b6945942715a034c671b7fc54f9588126b0b8bf23db2696e3ca8328f3ff0ab54"}, +] + +[package.dependencies] +numpy = ">=1.23" + +[package.extras] +bokeh = ["bokeh", "selenium"] +docs = ["furo", "sphinx (>=7.2)", "sphinx-copybutton"] +mypy = ["bokeh", "contourpy[bokeh,docs]", "docutils-stubs", "mypy (==1.15.0)", "types-Pillow"] +test = ["Pillow", "contourpy[test-no-images]", "matplotlib"] +test-no-images = ["pytest", "pytest-cov", "pytest-rerunfailures", "pytest-xdist", "wurlitzer"] [[package]] name = "coverage" @@ -298,6 +511,7 @@ version = "7.6.8" description = "Code coverage measurement for Python" optional = false python-versions = ">=3.9" +groups = ["dev"] files = [ {file = "coverage-7.6.8-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:b39e6011cd06822eb964d038d5dff5da5d98652b81f5ecd439277b32361a3a50"}, {file = "coverage-7.6.8-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:63c19702db10ad79151a059d2d6336fe0c470f2e18d0d4d1a57f7f9713875dcf"}, @@ -367,7 +581,23 @@ files = [ tomli = {version = "*", optional = true, markers = "python_full_version <= \"3.11.0a6\" and extra == \"toml\""} [package.extras] -toml = ["tomli"] +toml = ["tomli ; python_full_version <= \"3.11.0a6\""] + +[[package]] +name = "cycler" +version = "0.12.1" +description = "Composable style cycles" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "cycler-0.12.1-py3-none-any.whl", hash = "sha256:85cef7cff222d8644161529808465972e51340599459b8ac3ccbac5a854e0d30"}, + {file = "cycler-0.12.1.tar.gz", hash = "sha256:88bb128f02ba341da8ef447245a9e138fae777f6a23943da4540077d3601eb1c"}, +] + +[package.extras] +docs = ["ipython", "matplotlib", "numpydoc", "sphinx"] +tests = ["pytest", "pytest-cov", "pytest-xdist"] [[package]] name = "dill" @@ -375,6 +605,7 @@ version = "0.3.9" description = "serialize all of Python" optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "dill-0.3.9-py3-none-any.whl", hash = "sha256:468dff3b89520b474c0397703366b7b95eebe6303f108adf9b19da1f702be87a"}, {file = "dill-0.3.9.tar.gz", hash = "sha256:81aa267dddf68cbfe8029c42ca9ec6a4ab3b22371d1c450abc54422577b4512c"}, @@ -390,6 +621,7 @@ version = "0.6.2" description = "Pythonic argument parser, that will make you smile" optional = false python-versions = "*" +groups = ["main"] files = [ {file = "docopt-0.6.2.tar.gz", hash = "sha256:49b3a825280bd66b3aa83585ef59c4a8c82f2c8a522dbe754a8bc8d08c85c491"}, ] @@ -400,6 +632,8 @@ version = "1.2.2" description = "Backport of PEP 654 (exception groups)" optional = false python-versions = ">=3.7" +groups = ["dev"] +markers = "python_version < \"3.11\"" files = [ {file = "exceptiongroup-1.2.2-py3-none-any.whl", hash = "sha256:3111b9d131c238bec2f8f516e123e14ba243563fb135d3fe885990585aa7795b"}, {file = "exceptiongroup-1.2.2.tar.gz", hash = "sha256:47c2edf7c6738fafb49fd34290706d1a1a2f4d1c6df275526b62cbb4aa5393cc"}, @@ -414,6 +648,7 @@ version = "4.0.1" description = "the modular source code checker: pep8 pyflakes and co" optional = false python-versions = ">=3.6" +groups = ["dev"] files = [ {file = "flake8-4.0.1-py2.py3-none-any.whl", hash = "sha256:479b1304f72536a55948cb40a32dce8bb0ffe3501e26eaf292c7e60eb5e0428d"}, {file = "flake8-4.0.1.tar.gz", hash = "sha256:806e034dda44114815e23c16ef92f95c91e4c71100ff52813adf7132a6ad870d"}, @@ -424,12 +659,79 @@ mccabe = ">=0.6.0,<0.7.0" pycodestyle = ">=2.8.0,<2.9.0" pyflakes = ">=2.4.0,<2.5.0" +[[package]] +name = "fonttools" +version = "4.58.3" +description = "Tools to manipulate font files" +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "fonttools-4.58.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:e85ed7936ed49d5f8b7611cfd9484087a76fb8d9c20dcfbd54641b8d990a04a4"}, + {file = "fonttools-4.58.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:712c86321ff849e56be5bde902f6d7f05a566ba8d5b7e07cc647616553d7a03d"}, + {file = "fonttools-4.58.3-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:dce8718199b851fa4310a95b6a91b678bd4e4f54829a3e352a0279179e04b5f6"}, + {file = "fonttools-4.58.3-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:cd87a92ab3cbf3bdf7b35b5db549a3af9ded712ffef0ee84bdf0dc39389b8ab6"}, + {file = "fonttools-4.58.3-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:a9382cec40a16b0bd7d81529444742e80ee639d5fdca7c05f88454bb6b1792c2"}, + {file = "fonttools-4.58.3-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:0b0b4db8a56ef21af7743e91822b81f432458299860d76698074252ec0f82a34"}, + {file = "fonttools-4.58.3-cp310-cp310-win32.whl", hash = "sha256:82f7f462b0fa1f1f0ddc7522da82fc4a7a6aaf1e383cb5d6b341bb4418599c9d"}, + {file = "fonttools-4.58.3-cp310-cp310-win_amd64.whl", hash = "sha256:cf8f4e9589d3dd464054166a2cb7c6eea75d3ddac925fe0c4fa10b220d0e89b0"}, + {file = "fonttools-4.58.3-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:1c5d78bb6379af6e90f74d234cde8fadfcb15fefe30bc9c6596682f3720aefb8"}, + {file = "fonttools-4.58.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:639e7dd39c31ef38a96cd5638cfa81bb7820d9a08c63c4c69aeadf475eab69dd"}, + {file = "fonttools-4.58.3-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:009d091e145e6f95eb3baf5a071c260abc7b346a03b120b1661bde18f70701ca"}, + {file = "fonttools-4.58.3-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:e0f6ae31ea08bd42914c658333ceaeaf5b19c775d34013c3249ce6eaf611b827"}, + {file = "fonttools-4.58.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:6dd836d0b9050ec340e0b0b442b679a30748b42a6eb5ae96bbed87348c8c9fbc"}, + {file = "fonttools-4.58.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:bd7cbc635f66671f3f0fd8ab01ea99fb2f42174c62b492ed35960dd8d8b87518"}, + {file = "fonttools-4.58.3-cp311-cp311-win32.whl", hash = "sha256:1f76aafbf4540a1c84d8794e152d79a63255d6d71eb6886dbd13dfa326e518fd"}, + {file = "fonttools-4.58.3-cp311-cp311-win_amd64.whl", hash = "sha256:c508723f075ad9bee99e380d2329649b387234a7bdc1bb38f4bf65901ab0f383"}, + {file = "fonttools-4.58.3-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:280b1280b500f4cf50a7f191dfb4f74a0ee0dbb9fe987f6cbba867a2fb58bd75"}, + {file = "fonttools-4.58.3-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:25df49215abeaf8cf00ce6318db6c960c46d53316b8bd991047d1e5bc37f6dd0"}, + {file = "fonttools-4.58.3-cp312-cp312-manylinux1_x86_64.manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:adb352a360df3bb493827321e44f7872b0ddadc874d499d4345e118332fd008e"}, + {file = "fonttools-4.58.3-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c087f5218cedc777c4313c5bf4041cf4fac119ccfc9dfdfea1d5fedaa527a79e"}, + {file = "fonttools-4.58.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:2b4a117d5c2e6c951d56113c8710e09d759e3dedf9be630a1e0125f17c10a1c3"}, + {file = "fonttools-4.58.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:1a572c64265954d9b066187750ee1e7142ed9750d90fc4985575508982b216ab"}, + {file = "fonttools-4.58.3-cp312-cp312-win32.whl", hash = "sha256:b4e9834ccc83c7099709d69462ce7d026f60fe2db3735b3d5b6c3116131791aa"}, + {file = "fonttools-4.58.3-cp312-cp312-win_amd64.whl", hash = "sha256:4095cc3f74d12d90d2f3418a734649d89bef53378abbb506de5579a4d698d22b"}, + {file = "fonttools-4.58.3-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:06406f6541f478f71d7e170181f39b285fb5be89c9e60d5ca8b31c63d7c998a2"}, + {file = "fonttools-4.58.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:a982eb56bf631577548ab690a1b2d20dd0f208569d0c2165ed1e59eb42f39499"}, + {file = "fonttools-4.58.3-cp313-cp313-manylinux1_x86_64.manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:1858a03a1608b5ed067513ba70668bd12866e52b8bbf362d3d17d417b8f017e4"}, + {file = "fonttools-4.58.3-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:162993e3e18af89055954272a68cbd22873bc5bc32055cbb169a3974049603ce"}, + {file = "fonttools-4.58.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:0459128143ebf8469555cb5d70f552f38e9d00bc7b716fa1f9b3406176df3719"}, + {file = "fonttools-4.58.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:041a4c1a3aa6d47d18aa9c5d7767e504af2e107d9438f99282c47e8a1e9a83ff"}, + {file = "fonttools-4.58.3-cp313-cp313-win32.whl", hash = "sha256:5a5c384cc683d397a0469ade718261a4dea277383e87729c4a8f7e2d2fe6965f"}, + {file = "fonttools-4.58.3-cp313-cp313-win_amd64.whl", hash = "sha256:a555cb665c5539422d7d45ff86fd9e947be4966b39bf7726f7bca33174226c1f"}, + {file = "fonttools-4.58.3-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:c0fccc1e49f9048c956928979157677aabd3bf3c538e4cb69ae44684194312c0"}, + {file = "fonttools-4.58.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:36e35c08a2a3b8bc2bc46060e09b5c2d64b4718151e6340d7ad7dc67f14a0084"}, + {file = "fonttools-4.58.3-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:4aa2e24187d308c9fbaf56fe3dc4b5421c0bf69f00fb7e3d94501a102d49ef71"}, + {file = "fonttools-4.58.3-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:525d7d7c65402baabe3b9f5ac0e97bedaf89910d4b9344b9cdebe31370abeaaf"}, + {file = "fonttools-4.58.3-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:f6911a35ce45cef7d174960e0a30428cd8fba30c0d274914d1f85ba6f99f3a9d"}, + {file = "fonttools-4.58.3-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:be3386dd0b48c796aca9788e118b306cec4b1929a46ed0b120e9a2370980ad63"}, + {file = "fonttools-4.58.3-cp39-cp39-win32.whl", hash = "sha256:87ad62b231c55b30603bff67bd7ce0018be99a26335dab41dd605d4fc613416d"}, + {file = "fonttools-4.58.3-cp39-cp39-win_amd64.whl", hash = "sha256:d55ea3707c27c373815602118064478547712d691a593f1f4476d8a245dbb173"}, + {file = "fonttools-4.58.3-py3-none-any.whl", hash = "sha256:b4829a59eb644050f97e6fc3cd3c2e2123535ac16e2d9a5ef7f14690fdc5c0e6"}, + {file = "fonttools-4.58.3.tar.gz", hash = "sha256:de9df7a2a16c9df518be8a5dcf2afd6feac63e26c6d44b29d34c4b697ac09e0e"}, +] + +[package.extras] +all = ["brotli (>=1.0.1) ; platform_python_implementation == \"CPython\"", "brotlicffi (>=0.8.0) ; platform_python_implementation != \"CPython\"", "fs (>=2.2.0,<3)", "lxml (>=4.0)", "lz4 (>=1.7.4.2)", "matplotlib", "munkres ; platform_python_implementation == \"PyPy\"", "pycairo", "scipy ; platform_python_implementation != \"PyPy\"", "skia-pathops (>=0.5.0)", "sympy", "uharfbuzz (>=0.23.0)", "unicodedata2 (>=15.1.0) ; python_version <= \"3.12\"", "xattr ; sys_platform == \"darwin\"", "zopfli (>=0.1.4)"] +graphite = ["lz4 (>=1.7.4.2)"] +interpolatable = ["munkres ; platform_python_implementation == \"PyPy\"", "pycairo", "scipy ; platform_python_implementation != \"PyPy\""] +lxml = ["lxml (>=4.0)"] +pathops = ["skia-pathops (>=0.5.0)"] +plot = ["matplotlib"] +repacker = ["uharfbuzz (>=0.23.0)"] +symfont = ["sympy"] +type1 = ["xattr ; sys_platform == \"darwin\""] +ufo = ["fs (>=2.2.0,<3)"] +unicode = ["unicodedata2 (>=15.1.0) ; python_version <= \"3.12\""] +woff = ["brotli (>=1.0.1) ; platform_python_implementation == \"CPython\"", "brotlicffi (>=0.8.0) ; platform_python_implementation != \"CPython\"", "zopfli (>=0.1.4)"] + [[package]] name = "formulaic" version = "0.5.2" description = "An implementation of Wilkinson formulas." optional = false python-versions = ">=3.7.2" +groups = ["main"] files = [ {file = "formulaic-0.5.2-py3-none-any.whl", hash = "sha256:65d04b1249584504912eb64f83b47fc1e7e95b0ff3e24fb0859148e2f2f033c2"}, {file = "formulaic-0.5.2.tar.gz", hash = "sha256:25b1e1c8dff73f0b11c0028a6ab350222de6bbc47b316ccb770cec16189cef53"}, @@ -454,6 +756,7 @@ version = "2024.10.0" description = "File-system specification" optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "fsspec-2024.10.0-py3-none-any.whl", hash = "sha256:03b9a6785766a4de40368b88906366755e2819e758b83705c88cd7cb5fe81871"}, {file = "fsspec-2024.10.0.tar.gz", hash = "sha256:eda2d8a4116d4f2429db8550f2457da57279247dd930bb12f821b58391359493"}, @@ -493,6 +796,7 @@ version = "2.1.0" description = "Copy your docs directly to the gh-pages branch." optional = false python-versions = "*" +groups = ["doc"] files = [ {file = "ghp-import-2.1.0.tar.gz", hash = "sha256:9c535c4c61193c2df8871222567d7fd7e5014d835f97dc7b7439069e2413d343"}, {file = "ghp_import-2.1.0-py3-none-any.whl", hash = "sha256:8337dd7b50877f163d4c0289bc1f1c7f127550241988d568c1db512c4324a619"}, @@ -510,6 +814,8 @@ version = "3.1.1" description = "Lightweight in-process concurrent programming" optional = false python-versions = ">=3.7" +groups = ["main"] +markers = "python_version < \"3.13\" and (platform_machine == \"aarch64\" or platform_machine == \"ppc64le\" or platform_machine == \"x86_64\" or platform_machine == \"amd64\" or platform_machine == \"AMD64\" or platform_machine == \"win32\" or platform_machine == \"WIN32\")" files = [ {file = "greenlet-3.1.1-cp310-cp310-macosx_11_0_universal2.whl", hash = "sha256:0bbae94a29c9e5c7e4a2b7f0aae5c17e8e90acbfd3bf6270eeba60c39fce3563"}, {file = "greenlet-3.1.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0fde093fb93f35ca72a556cf72c92ea3ebfda3d79fc35bb19fbe685853869a83"}, @@ -596,6 +902,7 @@ version = "1.5.1" description = "Signatures for entire Python programs. Extract the structure, the frame, the skeleton of your project, to generate API documentation or find breaking changes in your API." optional = false python-versions = ">=3.9" +groups = ["doc"] files = [ {file = "griffe-1.5.1-py3-none-any.whl", hash = "sha256:ad6a7980f8c424c9102160aafa3bcdf799df0e75f7829d75af9ee5aef656f860"}, {file = "griffe-1.5.1.tar.gz", hash = "sha256:72964f93e08c553257706d6cd2c42d1c172213feb48b2be386f243380b405d4b"}, @@ -610,6 +917,7 @@ version = "3.10" description = "Internationalized Domain Names in Applications (IDNA)" optional = false python-versions = ">=3.6" +groups = ["main", "doc"] files = [ {file = "idna-3.10-py3-none-any.whl", hash = "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3"}, {file = "idna-3.10.tar.gz", hash = "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9"}, @@ -624,6 +932,8 @@ version = "8.5.0" description = "Read metadata from Python packages" optional = false python-versions = ">=3.8" +groups = ["doc"] +markers = "python_version == \"3.9\"" files = [ {file = "importlib_metadata-8.5.0-py3-none-any.whl", hash = "sha256:45e54197d28b7a7f1559e60b95e7c567032b602131fbd588f1497f47880aa68b"}, {file = "importlib_metadata-8.5.0.tar.gz", hash = "sha256:71522656f0abace1d072b9e5481a48f07c138e00f079c38c8f883823f9c26bd7"}, @@ -633,12 +943,12 @@ files = [ zipp = ">=3.20" [package.extras] -check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1)"] +check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1) ; sys_platform != \"cygwin\""] cover = ["pytest-cov"] doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] enabler = ["pytest-enabler (>=2.2)"] perf = ["ipython"] -test = ["flufl.flake8", "importlib-resources (>=1.3)", "jaraco.test (>=5.4)", "packaging", "pyfakefs", "pytest (>=6,!=8.1.*)", "pytest-perf (>=0.9.2)"] +test = ["flufl.flake8", "importlib-resources (>=1.3) ; python_version < \"3.9\"", "jaraco.test (>=5.4)", "packaging", "pyfakefs", "pytest (>=6,!=8.1.*)", "pytest-perf (>=0.9.2)"] type = ["pytest-mypy"] [[package]] @@ -647,6 +957,8 @@ version = "6.4.5" description = "Read resources from Python packages" optional = false python-versions = ">=3.8" +groups = ["main"] +markers = "python_version <= \"3.11\"" files = [ {file = "importlib_resources-6.4.5-py3-none-any.whl", hash = "sha256:ac29d5f956f01d5e4bb63102a5a19957f1b9175e45649977264a1416783bb717"}, {file = "importlib_resources-6.4.5.tar.gz", hash = "sha256:980862a1d16c9e147a59603677fa2aa5fd82b87f223b6cb870695bcfce830065"}, @@ -656,7 +968,7 @@ files = [ zipp = {version = ">=3.1.0", markers = "python_version < \"3.10\""} [package.extras] -check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1)"] +check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1) ; sys_platform != \"cygwin\""] cover = ["pytest-cov"] doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] enabler = ["pytest-enabler (>=2.2)"] @@ -669,6 +981,7 @@ version = "2.0.0" description = "brain-dead simple config-ini parsing" optional = false python-versions = ">=3.7" +groups = ["dev"] files = [ {file = "iniconfig-2.0.0-py3-none-any.whl", hash = "sha256:b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374"}, {file = "iniconfig-2.0.0.tar.gz", hash = "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3"}, @@ -680,6 +993,7 @@ version = "1.3.0" description = "`interface_meta` provides a convenient way to expose an extensible API with enforced method signatures and consistent documentation." optional = false python-versions = ">=3.7,<4.0" +groups = ["main"] files = [ {file = "interface_meta-1.3.0-py3-none-any.whl", hash = "sha256:de35dc5241431886e709e20a14d6597ed07c9f1e8b4bfcffde2190ca5b700ee8"}, {file = "interface_meta-1.3.0.tar.gz", hash = "sha256:8a4493f8bdb73fb9655dcd5115bc897e207319e36c8835f39c516a2d7e9d79a1"}, @@ -691,6 +1005,7 @@ version = "5.13.2" description = "A Python utility / library to sort Python imports." optional = false python-versions = ">=3.8.0" +groups = ["dev"] files = [ {file = "isort-5.13.2-py3-none-any.whl", hash = "sha256:8ca5e72a8d85860d5a3fa69b8745237f2939afe12dbf656afbcb47fe72d947a6"}, {file = "isort-5.13.2.tar.gz", hash = "sha256:48fdfcb9face5d58a4f6dde2e72a1fb8dcaf8ab26f95ab49fab84c2ddefb0109"}, @@ -705,6 +1020,7 @@ version = "3.1.4" description = "A very fast and expressive template engine." optional = false python-versions = ">=3.7" +groups = ["doc"] files = [ {file = "jinja2-3.1.4-py3-none-any.whl", hash = "sha256:bc5dd2abb727a5319567b7a813e6a2e7318c39f4f487cfe6c89c6f9c7d25197d"}, {file = "jinja2-3.1.4.tar.gz", hash = "sha256:4a3aee7acbbe7303aede8e9648d13b8bf88a429282aa6122a993f0ac800cb369"}, @@ -722,6 +1038,7 @@ version = "4.23.0" description = "An implementation of JSON Schema validation for Python" optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "jsonschema-4.23.0-py3-none-any.whl", hash = "sha256:fbadb6f8b144a8f8cf9f0b89ba94501d143e50411a1278633f56a7acf7fd5566"}, {file = "jsonschema-4.23.0.tar.gz", hash = "sha256:d71497fef26351a33265337fa77ffeb82423f3ea21283cd9467bb03999266bc4"}, @@ -743,6 +1060,7 @@ version = "2024.10.1" description = "The JSON Schema meta-schemas and vocabularies, exposed as a Registry" optional = false python-versions = ">=3.9" +groups = ["main"] files = [ {file = "jsonschema_specifications-2024.10.1-py3-none-any.whl", hash = "sha256:a09a0680616357d9a0ecf05c12ad234479f549239d0f5b55f3deea67475da9bf"}, {file = "jsonschema_specifications-2024.10.1.tar.gz", hash = "sha256:0f38b83639958ce1152d02a7f062902c41c8fd20d558b0c34344292d417ae272"}, @@ -751,12 +1069,253 @@ files = [ [package.dependencies] referencing = ">=0.31.0" +[[package]] +name = "kagglehub" +version = "0.3.12" +description = "Access Kaggle resources anywhere" +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "kagglehub-0.3.12-py3-none-any.whl", hash = "sha256:6eeb7c5c8e1f25a28e9b26d3d8ed0c24a4a32f763ea5de6e1b3aabba442f2a26"}, + {file = "kagglehub-0.3.12.tar.gz", hash = "sha256:45e75854630a30605b794eb786b3757beccbbea1acca71600642f67b60e0d7bf"}, +] + +[package.dependencies] +packaging = "*" +pyyaml = "*" +requests = "*" +tqdm = "*" + +[package.extras] +hf-datasets = ["datasets", "pandas"] +pandas-datasets = ["pandas"] +polars-datasets = ["polars"] +signing = ["betterproto (>=2.0.0b6)", "model-signing", "sigstore (>=3.6.1)"] + +[[package]] +name = "kiwisolver" +version = "1.4.7" +description = "A fast implementation of the Cassowary constraint solver" +optional = false +python-versions = ">=3.8" +groups = ["main"] +markers = "python_version < \"3.11\"" +files = [ + {file = "kiwisolver-1.4.7-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:8a9c83f75223d5e48b0bc9cb1bf2776cf01563e00ade8775ffe13b0b6e1af3a6"}, + {file = "kiwisolver-1.4.7-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:58370b1ffbd35407444d57057b57da5d6549d2d854fa30249771775c63b5fe17"}, + {file = "kiwisolver-1.4.7-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:aa0abdf853e09aff551db11fce173e2177d00786c688203f52c87ad7fcd91ef9"}, + {file = "kiwisolver-1.4.7-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:8d53103597a252fb3ab8b5845af04c7a26d5e7ea8122303dd7a021176a87e8b9"}, + {file = "kiwisolver-1.4.7-cp310-cp310-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:88f17c5ffa8e9462fb79f62746428dd57b46eb931698e42e990ad63103f35e6c"}, + {file = "kiwisolver-1.4.7-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:88a9ca9c710d598fd75ee5de59d5bda2684d9db36a9f50b6125eaea3969c2599"}, + {file = "kiwisolver-1.4.7-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f4d742cb7af1c28303a51b7a27aaee540e71bb8e24f68c736f6f2ffc82f2bf05"}, + {file = "kiwisolver-1.4.7-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e28c7fea2196bf4c2f8d46a0415c77a1c480cc0724722f23d7410ffe9842c407"}, + {file = "kiwisolver-1.4.7-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:e968b84db54f9d42046cf154e02911e39c0435c9801681e3fc9ce8a3c4130278"}, + {file = "kiwisolver-1.4.7-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:0c18ec74c0472de033e1bebb2911c3c310eef5649133dd0bedf2a169a1b269e5"}, + {file = "kiwisolver-1.4.7-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:8f0ea6da6d393d8b2e187e6a5e3fb81f5862010a40c3945e2c6d12ae45cfb2ad"}, + {file = "kiwisolver-1.4.7-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:f106407dda69ae456dd1227966bf445b157ccc80ba0dff3802bb63f30b74e895"}, + {file = "kiwisolver-1.4.7-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:84ec80df401cfee1457063732d90022f93951944b5b58975d34ab56bb150dfb3"}, + {file = "kiwisolver-1.4.7-cp310-cp310-win32.whl", hash = "sha256:71bb308552200fb2c195e35ef05de12f0c878c07fc91c270eb3d6e41698c3bcc"}, + {file = "kiwisolver-1.4.7-cp310-cp310-win_amd64.whl", hash = "sha256:44756f9fd339de0fb6ee4f8c1696cfd19b2422e0d70b4cefc1cc7f1f64045a8c"}, + {file = "kiwisolver-1.4.7-cp310-cp310-win_arm64.whl", hash = "sha256:78a42513018c41c2ffd262eb676442315cbfe3c44eed82385c2ed043bc63210a"}, + {file = "kiwisolver-1.4.7-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:d2b0e12a42fb4e72d509fc994713d099cbb15ebf1103545e8a45f14da2dfca54"}, + {file = "kiwisolver-1.4.7-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:2a8781ac3edc42ea4b90bc23e7d37b665d89423818e26eb6df90698aa2287c95"}, + {file = "kiwisolver-1.4.7-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:46707a10836894b559e04b0fd143e343945c97fd170d69a2d26d640b4e297935"}, + {file = "kiwisolver-1.4.7-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ef97b8df011141c9b0f6caf23b29379f87dd13183c978a30a3c546d2c47314cb"}, + {file = "kiwisolver-1.4.7-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3ab58c12a2cd0fc769089e6d38466c46d7f76aced0a1f54c77652446733d2d02"}, + {file = "kiwisolver-1.4.7-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:803b8e1459341c1bb56d1c5c010406d5edec8a0713a0945851290a7930679b51"}, + {file = "kiwisolver-1.4.7-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f9a9e8a507420fe35992ee9ecb302dab68550dedc0da9e2880dd88071c5fb052"}, + {file = "kiwisolver-1.4.7-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:18077b53dc3bb490e330669a99920c5e6a496889ae8c63b58fbc57c3d7f33a18"}, + {file = "kiwisolver-1.4.7-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:6af936f79086a89b3680a280c47ea90b4df7047b5bdf3aa5c524bbedddb9e545"}, + {file = "kiwisolver-1.4.7-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:3abc5b19d24af4b77d1598a585b8a719beb8569a71568b66f4ebe1fb0449460b"}, + {file = "kiwisolver-1.4.7-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:933d4de052939d90afbe6e9d5273ae05fb836cc86c15b686edd4b3560cc0ee36"}, + {file = "kiwisolver-1.4.7-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:65e720d2ab2b53f1f72fb5da5fb477455905ce2c88aaa671ff0a447c2c80e8e3"}, + {file = "kiwisolver-1.4.7-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:3bf1ed55088f214ba6427484c59553123fdd9b218a42bbc8c6496d6754b1e523"}, + {file = "kiwisolver-1.4.7-cp311-cp311-win32.whl", hash = "sha256:4c00336b9dd5ad96d0a558fd18a8b6f711b7449acce4c157e7343ba92dd0cf3d"}, + {file = "kiwisolver-1.4.7-cp311-cp311-win_amd64.whl", hash = "sha256:929e294c1ac1e9f615c62a4e4313ca1823ba37326c164ec720a803287c4c499b"}, + {file = "kiwisolver-1.4.7-cp311-cp311-win_arm64.whl", hash = "sha256:e33e8fbd440c917106b237ef1a2f1449dfbb9b6f6e1ce17c94cd6a1e0d438376"}, + {file = "kiwisolver-1.4.7-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:5360cc32706dab3931f738d3079652d20982511f7c0ac5711483e6eab08efff2"}, + {file = "kiwisolver-1.4.7-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:942216596dc64ddb25adb215c3c783215b23626f8d84e8eff8d6d45c3f29f75a"}, + {file = "kiwisolver-1.4.7-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:48b571ecd8bae15702e4f22d3ff6a0f13e54d3d00cd25216d5e7f658242065ee"}, + {file = "kiwisolver-1.4.7-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ad42ba922c67c5f219097b28fae965e10045ddf145d2928bfac2eb2e17673640"}, + {file = "kiwisolver-1.4.7-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:612a10bdae23404a72941a0fc8fa2660c6ea1217c4ce0dbcab8a8f6543ea9e7f"}, + {file = "kiwisolver-1.4.7-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9e838bba3a3bac0fe06d849d29772eb1afb9745a59710762e4ba3f4cb8424483"}, + {file = "kiwisolver-1.4.7-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:22f499f6157236c19f4bbbd472fa55b063db77a16cd74d49afe28992dff8c258"}, + {file = "kiwisolver-1.4.7-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:693902d433cf585133699972b6d7c42a8b9f8f826ebcaf0132ff55200afc599e"}, + {file = "kiwisolver-1.4.7-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:4e77f2126c3e0b0d055f44513ed349038ac180371ed9b52fe96a32aa071a5107"}, + {file = "kiwisolver-1.4.7-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:657a05857bda581c3656bfc3b20e353c232e9193eb167766ad2dc58b56504948"}, + {file = "kiwisolver-1.4.7-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:4bfa75a048c056a411f9705856abfc872558e33c055d80af6a380e3658766038"}, + {file = "kiwisolver-1.4.7-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:34ea1de54beef1c104422d210c47c7d2a4999bdecf42c7b5718fbe59a4cac383"}, + {file = "kiwisolver-1.4.7-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:90da3b5f694b85231cf93586dad5e90e2d71b9428f9aad96952c99055582f520"}, + {file = "kiwisolver-1.4.7-cp312-cp312-win32.whl", hash = "sha256:18e0cca3e008e17fe9b164b55735a325140a5a35faad8de92dd80265cd5eb80b"}, + {file = "kiwisolver-1.4.7-cp312-cp312-win_amd64.whl", hash = "sha256:58cb20602b18f86f83a5c87d3ee1c766a79c0d452f8def86d925e6c60fbf7bfb"}, + {file = "kiwisolver-1.4.7-cp312-cp312-win_arm64.whl", hash = "sha256:f5a8b53bdc0b3961f8b6125e198617c40aeed638b387913bf1ce78afb1b0be2a"}, + {file = "kiwisolver-1.4.7-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:2e6039dcbe79a8e0f044f1c39db1986a1b8071051efba3ee4d74f5b365f5226e"}, + {file = "kiwisolver-1.4.7-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:a1ecf0ac1c518487d9d23b1cd7139a6a65bc460cd101ab01f1be82ecf09794b6"}, + {file = "kiwisolver-1.4.7-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:7ab9ccab2b5bd5702ab0803676a580fffa2aa178c2badc5557a84cc943fcf750"}, + {file = "kiwisolver-1.4.7-cp313-cp313-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f816dd2277f8d63d79f9c8473a79fe54047bc0467754962840782c575522224d"}, + {file = "kiwisolver-1.4.7-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cf8bcc23ceb5a1b624572a1623b9f79d2c3b337c8c455405ef231933a10da379"}, + {file = "kiwisolver-1.4.7-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:dea0bf229319828467d7fca8c7c189780aa9ff679c94539eed7532ebe33ed37c"}, + {file = "kiwisolver-1.4.7-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7c06a4c7cf15ec739ce0e5971b26c93638730090add60e183530d70848ebdd34"}, + {file = "kiwisolver-1.4.7-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:913983ad2deb14e66d83c28b632fd35ba2b825031f2fa4ca29675e665dfecbe1"}, + {file = "kiwisolver-1.4.7-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:5337ec7809bcd0f424c6b705ecf97941c46279cf5ed92311782c7c9c2026f07f"}, + {file = "kiwisolver-1.4.7-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:4c26ed10c4f6fa6ddb329a5120ba3b6db349ca192ae211e882970bfc9d91420b"}, + {file = "kiwisolver-1.4.7-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:c619b101e6de2222c1fcb0531e1b17bbffbe54294bfba43ea0d411d428618c27"}, + {file = "kiwisolver-1.4.7-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:073a36c8273647592ea332e816e75ef8da5c303236ec0167196793eb1e34657a"}, + {file = "kiwisolver-1.4.7-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:3ce6b2b0231bda412463e152fc18335ba32faf4e8c23a754ad50ffa70e4091ee"}, + {file = "kiwisolver-1.4.7-cp313-cp313-win32.whl", hash = "sha256:f4c9aee212bc89d4e13f58be11a56cc8036cabad119259d12ace14b34476fd07"}, + {file = "kiwisolver-1.4.7-cp313-cp313-win_amd64.whl", hash = "sha256:8a3ec5aa8e38fc4c8af308917ce12c536f1c88452ce554027e55b22cbbfbff76"}, + {file = "kiwisolver-1.4.7-cp313-cp313-win_arm64.whl", hash = "sha256:76c8094ac20ec259471ac53e774623eb62e6e1f56cd8690c67ce6ce4fcb05650"}, + {file = "kiwisolver-1.4.7-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:5d5abf8f8ec1f4e22882273c423e16cae834c36856cac348cfbfa68e01c40f3a"}, + {file = "kiwisolver-1.4.7-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:aeb3531b196ef6f11776c21674dba836aeea9d5bd1cf630f869e3d90b16cfade"}, + {file = "kiwisolver-1.4.7-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:b7d755065e4e866a8086c9bdada157133ff466476a2ad7861828e17b6026e22c"}, + {file = "kiwisolver-1.4.7-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:08471d4d86cbaec61f86b217dd938a83d85e03785f51121e791a6e6689a3be95"}, + {file = "kiwisolver-1.4.7-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7bbfcb7165ce3d54a3dfbe731e470f65739c4c1f85bb1018ee912bae139e263b"}, + {file = "kiwisolver-1.4.7-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5d34eb8494bea691a1a450141ebb5385e4b69d38bb8403b5146ad279f4b30fa3"}, + {file = "kiwisolver-1.4.7-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:9242795d174daa40105c1d86aba618e8eab7bf96ba8c3ee614da8302a9f95503"}, + {file = "kiwisolver-1.4.7-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:a0f64a48bb81af7450e641e3fe0b0394d7381e342805479178b3d335d60ca7cf"}, + {file = "kiwisolver-1.4.7-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:8e045731a5416357638d1700927529e2b8ab304811671f665b225f8bf8d8f933"}, + {file = "kiwisolver-1.4.7-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:4322872d5772cae7369f8351da1edf255a604ea7087fe295411397d0cfd9655e"}, + {file = "kiwisolver-1.4.7-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:e1631290ee9271dffe3062d2634c3ecac02c83890ada077d225e081aca8aab89"}, + {file = "kiwisolver-1.4.7-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:edcfc407e4eb17e037bca59be0e85a2031a2ac87e4fed26d3e9df88b4165f92d"}, + {file = "kiwisolver-1.4.7-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:4d05d81ecb47d11e7f8932bd8b61b720bf0b41199358f3f5e36d38e28f0532c5"}, + {file = "kiwisolver-1.4.7-cp38-cp38-win32.whl", hash = "sha256:b38ac83d5f04b15e515fd86f312479d950d05ce2368d5413d46c088dda7de90a"}, + {file = "kiwisolver-1.4.7-cp38-cp38-win_amd64.whl", hash = "sha256:d83db7cde68459fc803052a55ace60bea2bae361fc3b7a6d5da07e11954e4b09"}, + {file = "kiwisolver-1.4.7-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:3f9362ecfca44c863569d3d3c033dbe8ba452ff8eed6f6b5806382741a1334bd"}, + {file = "kiwisolver-1.4.7-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:e8df2eb9b2bac43ef8b082e06f750350fbbaf2887534a5be97f6cf07b19d9583"}, + {file = "kiwisolver-1.4.7-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:f32d6edbc638cde7652bd690c3e728b25332acbadd7cad670cc4a02558d9c417"}, + {file = "kiwisolver-1.4.7-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:e2e6c39bd7b9372b0be21456caab138e8e69cc0fc1190a9dfa92bd45a1e6e904"}, + {file = "kiwisolver-1.4.7-cp39-cp39-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:dda56c24d869b1193fcc763f1284b9126550eaf84b88bbc7256e15028f19188a"}, + {file = "kiwisolver-1.4.7-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:79849239c39b5e1fd906556c474d9b0439ea6792b637511f3fe3a41158d89ca8"}, + {file = "kiwisolver-1.4.7-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5e3bc157fed2a4c02ec468de4ecd12a6e22818d4f09cde2c31ee3226ffbefab2"}, + {file = "kiwisolver-1.4.7-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3da53da805b71e41053dc670f9a820d1157aae77b6b944e08024d17bcd51ef88"}, + {file = "kiwisolver-1.4.7-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:8705f17dfeb43139a692298cb6637ee2e59c0194538153e83e9ee0c75c2eddde"}, + {file = "kiwisolver-1.4.7-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:82a5c2f4b87c26bb1a0ef3d16b5c4753434633b83d365cc0ddf2770c93829e3c"}, + {file = "kiwisolver-1.4.7-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:ce8be0466f4c0d585cdb6c1e2ed07232221df101a4c6f28821d2aa754ca2d9e2"}, + {file = "kiwisolver-1.4.7-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:409afdfe1e2e90e6ee7fc896f3df9a7fec8e793e58bfa0d052c8a82f99c37abb"}, + {file = "kiwisolver-1.4.7-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:5b9c3f4ee0b9a439d2415012bd1b1cc2df59e4d6a9939f4d669241d30b414327"}, + {file = "kiwisolver-1.4.7-cp39-cp39-win32.whl", hash = "sha256:a79ae34384df2b615eefca647a2873842ac3b596418032bef9a7283675962644"}, + {file = "kiwisolver-1.4.7-cp39-cp39-win_amd64.whl", hash = "sha256:cf0438b42121a66a3a667de17e779330fc0f20b0d97d59d2f2121e182b0505e4"}, + {file = "kiwisolver-1.4.7-cp39-cp39-win_arm64.whl", hash = "sha256:764202cc7e70f767dab49e8df52c7455e8de0df5d858fa801a11aa0d882ccf3f"}, + {file = "kiwisolver-1.4.7-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:94252291e3fe68001b1dd747b4c0b3be12582839b95ad4d1b641924d68fd4643"}, + {file = "kiwisolver-1.4.7-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:5b7dfa3b546da08a9f622bb6becdb14b3e24aaa30adba66749d38f3cc7ea9706"}, + {file = "kiwisolver-1.4.7-pp310-pypy310_pp73-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bd3de6481f4ed8b734da5df134cd5a6a64fe32124fe83dde1e5b5f29fe30b1e6"}, + {file = "kiwisolver-1.4.7-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a91b5f9f1205845d488c928e8570dcb62b893372f63b8b6e98b863ebd2368ff2"}, + {file = "kiwisolver-1.4.7-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:40fa14dbd66b8b8f470d5fc79c089a66185619d31645f9b0773b88b19f7223c4"}, + {file = "kiwisolver-1.4.7-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:eb542fe7933aa09d8d8f9d9097ef37532a7df6497819d16efe4359890a2f417a"}, + {file = "kiwisolver-1.4.7-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:bfa1acfa0c54932d5607e19a2c24646fb4c1ae2694437789129cf099789a3b00"}, + {file = "kiwisolver-1.4.7-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:eee3ea935c3d227d49b4eb85660ff631556841f6e567f0f7bda972df6c2c9935"}, + {file = "kiwisolver-1.4.7-pp38-pypy38_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:f3160309af4396e0ed04db259c3ccbfdc3621b5559b5453075e5de555e1f3a1b"}, + {file = "kiwisolver-1.4.7-pp38-pypy38_pp73-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:a17f6a29cf8935e587cc8a4dbfc8368c55edc645283db0ce9801016f83526c2d"}, + {file = "kiwisolver-1.4.7-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:10849fb2c1ecbfae45a693c070e0320a91b35dd4bcf58172c023b994283a124d"}, + {file = "kiwisolver-1.4.7-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:ac542bf38a8a4be2dc6b15248d36315ccc65f0743f7b1a76688ffb6b5129a5c2"}, + {file = "kiwisolver-1.4.7-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:8b01aac285f91ca889c800042c35ad3b239e704b150cfd3382adfc9dcc780e39"}, + {file = "kiwisolver-1.4.7-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:48be928f59a1f5c8207154f935334d374e79f2b5d212826307d072595ad76a2e"}, + {file = "kiwisolver-1.4.7-pp39-pypy39_pp73-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f37cfe618a117e50d8c240555331160d73d0411422b59b5ee217843d7b693608"}, + {file = "kiwisolver-1.4.7-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:599b5c873c63a1f6ed7eead644a8a380cfbdf5db91dcb6f85707aaab213b1674"}, + {file = "kiwisolver-1.4.7-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:801fa7802e5cfabe3ab0c81a34c323a319b097dfb5004be950482d882f3d7225"}, + {file = "kiwisolver-1.4.7-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:0c6c43471bc764fad4bc99c5c2d6d16a676b1abf844ca7c8702bdae92df01ee0"}, + {file = "kiwisolver-1.4.7.tar.gz", hash = "sha256:9893ff81bd7107f7b685d3017cc6583daadb4fc26e4a888350df530e41980a60"}, +] + +[[package]] +name = "kiwisolver" +version = "1.4.8" +description = "A fast implementation of the Cassowary constraint solver" +optional = false +python-versions = ">=3.10" +groups = ["main"] +markers = "python_version >= \"3.11\"" +files = [ + {file = "kiwisolver-1.4.8-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:88c6f252f6816a73b1f8c904f7bbe02fd67c09a69f7cb8a0eecdbf5ce78e63db"}, + {file = "kiwisolver-1.4.8-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c72941acb7b67138f35b879bbe85be0f6c6a70cab78fe3ef6db9c024d9223e5b"}, + {file = "kiwisolver-1.4.8-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:ce2cf1e5688edcb727fdf7cd1bbd0b6416758996826a8be1d958f91880d0809d"}, + {file = "kiwisolver-1.4.8-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:c8bf637892dc6e6aad2bc6d4d69d08764166e5e3f69d469e55427b6ac001b19d"}, + {file = "kiwisolver-1.4.8-cp310-cp310-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:034d2c891f76bd3edbdb3ea11140d8510dca675443da7304205a2eaa45d8334c"}, + {file = "kiwisolver-1.4.8-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d47b28d1dfe0793d5e96bce90835e17edf9a499b53969b03c6c47ea5985844c3"}, + {file = "kiwisolver-1.4.8-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:eb158fe28ca0c29f2260cca8c43005329ad58452c36f0edf298204de32a9a3ed"}, + {file = "kiwisolver-1.4.8-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d5536185fce131780ebd809f8e623bf4030ce1b161353166c49a3c74c287897f"}, + {file = "kiwisolver-1.4.8-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:369b75d40abedc1da2c1f4de13f3482cb99e3237b38726710f4a793432b1c5ff"}, + {file = "kiwisolver-1.4.8-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:641f2ddf9358c80faa22e22eb4c9f54bd3f0e442e038728f500e3b978d00aa7d"}, + {file = "kiwisolver-1.4.8-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:d561d2d8883e0819445cfe58d7ddd673e4015c3c57261d7bdcd3710d0d14005c"}, + {file = "kiwisolver-1.4.8-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:1732e065704b47c9afca7ffa272f845300a4eb959276bf6970dc07265e73b605"}, + {file = "kiwisolver-1.4.8-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:bcb1ebc3547619c3b58a39e2448af089ea2ef44b37988caf432447374941574e"}, + {file = "kiwisolver-1.4.8-cp310-cp310-win_amd64.whl", hash = "sha256:89c107041f7b27844179ea9c85d6da275aa55ecf28413e87624d033cf1f6b751"}, + {file = "kiwisolver-1.4.8-cp310-cp310-win_arm64.whl", hash = "sha256:b5773efa2be9eb9fcf5415ea3ab70fc785d598729fd6057bea38d539ead28271"}, + {file = "kiwisolver-1.4.8-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:a4d3601908c560bdf880f07d94f31d734afd1bb71e96585cace0e38ef44c6d84"}, + {file = "kiwisolver-1.4.8-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:856b269c4d28a5c0d5e6c1955ec36ebfd1651ac00e1ce0afa3e28da95293b561"}, + {file = "kiwisolver-1.4.8-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:c2b9a96e0f326205af81a15718a9073328df1173a2619a68553decb7097fd5d7"}, + {file = "kiwisolver-1.4.8-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c5020c83e8553f770cb3b5fc13faac40f17e0b205bd237aebd21d53d733adb03"}, + {file = "kiwisolver-1.4.8-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dace81d28c787956bfbfbbfd72fdcef014f37d9b48830829e488fdb32b49d954"}, + {file = "kiwisolver-1.4.8-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:11e1022b524bd48ae56c9b4f9296bce77e15a2e42a502cceba602f804b32bb79"}, + {file = "kiwisolver-1.4.8-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3b9b4d2892fefc886f30301cdd80debd8bb01ecdf165a449eb6e78f79f0fabd6"}, + {file = "kiwisolver-1.4.8-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3a96c0e790ee875d65e340ab383700e2b4891677b7fcd30a699146f9384a2bb0"}, + {file = "kiwisolver-1.4.8-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:23454ff084b07ac54ca8be535f4174170c1094a4cff78fbae4f73a4bcc0d4dab"}, + {file = "kiwisolver-1.4.8-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:87b287251ad6488e95b4f0b4a79a6d04d3ea35fde6340eb38fbd1ca9cd35bbbc"}, + {file = "kiwisolver-1.4.8-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:b21dbe165081142b1232a240fc6383fd32cdd877ca6cc89eab93e5f5883e1c25"}, + {file = "kiwisolver-1.4.8-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:768cade2c2df13db52475bd28d3a3fac8c9eff04b0e9e2fda0f3760f20b3f7fc"}, + {file = "kiwisolver-1.4.8-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:d47cfb2650f0e103d4bf68b0b5804c68da97272c84bb12850d877a95c056bd67"}, + {file = "kiwisolver-1.4.8-cp311-cp311-win_amd64.whl", hash = "sha256:ed33ca2002a779a2e20eeb06aea7721b6e47f2d4b8a8ece979d8ba9e2a167e34"}, + {file = "kiwisolver-1.4.8-cp311-cp311-win_arm64.whl", hash = "sha256:16523b40aab60426ffdebe33ac374457cf62863e330a90a0383639ce14bf44b2"}, + {file = "kiwisolver-1.4.8-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:d6af5e8815fd02997cb6ad9bbed0ee1e60014438ee1a5c2444c96f87b8843502"}, + {file = "kiwisolver-1.4.8-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:bade438f86e21d91e0cf5dd7c0ed00cda0f77c8c1616bd83f9fc157fa6760d31"}, + {file = "kiwisolver-1.4.8-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:b83dc6769ddbc57613280118fb4ce3cd08899cc3369f7d0e0fab518a7cf37fdb"}, + {file = "kiwisolver-1.4.8-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:111793b232842991be367ed828076b03d96202c19221b5ebab421ce8bcad016f"}, + {file = "kiwisolver-1.4.8-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:257af1622860e51b1a9d0ce387bf5c2c4f36a90594cb9514f55b074bcc787cfc"}, + {file = "kiwisolver-1.4.8-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:69b5637c3f316cab1ec1c9a12b8c5f4750a4c4b71af9157645bf32830e39c03a"}, + {file = "kiwisolver-1.4.8-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:782bb86f245ec18009890e7cb8d13a5ef54dcf2ebe18ed65f795e635a96a1c6a"}, + {file = "kiwisolver-1.4.8-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cc978a80a0db3a66d25767b03688f1147a69e6237175c0f4ffffaaedf744055a"}, + {file = "kiwisolver-1.4.8-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:36dbbfd34838500a31f52c9786990d00150860e46cd5041386f217101350f0d3"}, + {file = "kiwisolver-1.4.8-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:eaa973f1e05131de5ff3569bbba7f5fd07ea0595d3870ed4a526d486fe57fa1b"}, + {file = "kiwisolver-1.4.8-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:a66f60f8d0c87ab7f59b6fb80e642ebb29fec354a4dfad687ca4092ae69d04f4"}, + {file = "kiwisolver-1.4.8-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:858416b7fb777a53f0c59ca08190ce24e9abbd3cffa18886a5781b8e3e26f65d"}, + {file = "kiwisolver-1.4.8-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:085940635c62697391baafaaeabdf3dd7a6c3643577dde337f4d66eba021b2b8"}, + {file = "kiwisolver-1.4.8-cp312-cp312-win_amd64.whl", hash = "sha256:01c3d31902c7db5fb6182832713d3b4122ad9317c2c5877d0539227d96bb2e50"}, + {file = "kiwisolver-1.4.8-cp312-cp312-win_arm64.whl", hash = "sha256:a3c44cb68861de93f0c4a8175fbaa691f0aa22550c331fefef02b618a9dcb476"}, + {file = "kiwisolver-1.4.8-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:1c8ceb754339793c24aee1c9fb2485b5b1f5bb1c2c214ff13368431e51fc9a09"}, + {file = "kiwisolver-1.4.8-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:54a62808ac74b5e55a04a408cda6156f986cefbcf0ada13572696b507cc92fa1"}, + {file = "kiwisolver-1.4.8-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:68269e60ee4929893aad82666821aaacbd455284124817af45c11e50a4b42e3c"}, + {file = "kiwisolver-1.4.8-cp313-cp313-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:34d142fba9c464bc3bbfeff15c96eab0e7310343d6aefb62a79d51421fcc5f1b"}, + {file = "kiwisolver-1.4.8-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3ddc373e0eef45b59197de815b1b28ef89ae3955e7722cc9710fb91cd77b7f47"}, + {file = "kiwisolver-1.4.8-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:77e6f57a20b9bd4e1e2cedda4d0b986ebd0216236f0106e55c28aea3d3d69b16"}, + {file = "kiwisolver-1.4.8-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:08e77738ed7538f036cd1170cbed942ef749137b1311fa2bbe2a7fda2f6bf3cc"}, + {file = "kiwisolver-1.4.8-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a5ce1e481a74b44dd5e92ff03ea0cb371ae7a0268318e202be06c8f04f4f1246"}, + {file = "kiwisolver-1.4.8-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:fc2ace710ba7c1dfd1a3b42530b62b9ceed115f19a1656adefce7b1782a37794"}, + {file = "kiwisolver-1.4.8-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:3452046c37c7692bd52b0e752b87954ef86ee2224e624ef7ce6cb21e8c41cc1b"}, + {file = "kiwisolver-1.4.8-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:7e9a60b50fe8b2ec6f448fe8d81b07e40141bfced7f896309df271a0b92f80f3"}, + {file = "kiwisolver-1.4.8-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:918139571133f366e8362fa4a297aeba86c7816b7ecf0bc79168080e2bd79957"}, + {file = "kiwisolver-1.4.8-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:e063ef9f89885a1d68dd8b2e18f5ead48653176d10a0e324e3b0030e3a69adeb"}, + {file = "kiwisolver-1.4.8-cp313-cp313-win_amd64.whl", hash = "sha256:a17b7c4f5b2c51bb68ed379defd608a03954a1845dfed7cc0117f1cc8a9b7fd2"}, + {file = "kiwisolver-1.4.8-cp313-cp313-win_arm64.whl", hash = "sha256:3cd3bc628b25f74aedc6d374d5babf0166a92ff1317f46267f12d2ed54bc1d30"}, + {file = "kiwisolver-1.4.8-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:370fd2df41660ed4e26b8c9d6bbcad668fbe2560462cba151a721d49e5b6628c"}, + {file = "kiwisolver-1.4.8-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:84a2f830d42707de1d191b9490ac186bf7997a9495d4e9072210a1296345f7dc"}, + {file = "kiwisolver-1.4.8-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:7a3ad337add5148cf51ce0b55642dc551c0b9d6248458a757f98796ca7348712"}, + {file = "kiwisolver-1.4.8-cp313-cp313t-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7506488470f41169b86d8c9aeff587293f530a23a23a49d6bc64dab66bedc71e"}, + {file = "kiwisolver-1.4.8-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2f0121b07b356a22fb0414cec4666bbe36fd6d0d759db3d37228f496ed67c880"}, + {file = "kiwisolver-1.4.8-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d6d6bd87df62c27d4185de7c511c6248040afae67028a8a22012b010bc7ad062"}, + {file = "kiwisolver-1.4.8-cp313-cp313t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:291331973c64bb9cce50bbe871fb2e675c4331dab4f31abe89f175ad7679a4d7"}, + {file = "kiwisolver-1.4.8-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:893f5525bb92d3d735878ec00f781b2de998333659507d29ea4466208df37bed"}, + {file = "kiwisolver-1.4.8-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:b47a465040146981dc9db8647981b8cb96366fbc8d452b031e4f8fdffec3f26d"}, + {file = "kiwisolver-1.4.8-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:99cea8b9dd34ff80c521aef46a1dddb0dcc0283cf18bde6d756f1e6f31772165"}, + {file = "kiwisolver-1.4.8-cp313-cp313t-musllinux_1_2_ppc64le.whl", hash = "sha256:151dffc4865e5fe6dafce5480fab84f950d14566c480c08a53c663a0020504b6"}, + {file = "kiwisolver-1.4.8-cp313-cp313t-musllinux_1_2_s390x.whl", hash = "sha256:577facaa411c10421314598b50413aa1ebcf5126f704f1e5d72d7e4e9f020d90"}, + {file = "kiwisolver-1.4.8-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:be4816dc51c8a471749d664161b434912eee82f2ea66bd7628bd14583a833e85"}, + {file = "kiwisolver-1.4.8-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:e7a019419b7b510f0f7c9dceff8c5eae2392037eae483a7f9162625233802b0a"}, + {file = "kiwisolver-1.4.8-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:286b18e86682fd2217a48fc6be6b0f20c1d0ed10958d8dc53453ad58d7be0bf8"}, + {file = "kiwisolver-1.4.8-pp310-pypy310_pp73-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4191ee8dfd0be1c3666ccbac178c5a05d5f8d689bbe3fc92f3c4abec817f8fe0"}, + {file = "kiwisolver-1.4.8-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7cd2785b9391f2873ad46088ed7599a6a71e762e1ea33e87514b1a441ed1da1c"}, + {file = "kiwisolver-1.4.8-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c07b29089b7ba090b6f1a669f1411f27221c3662b3a1b7010e67b59bb5a6f10b"}, + {file = "kiwisolver-1.4.8-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:65ea09a5a3faadd59c2ce96dc7bf0f364986a315949dc6374f04396b0d60e09b"}, + {file = "kiwisolver-1.4.8.tar.gz", hash = "sha256:23d5f023bdc8c7e54eb65f03ca5d5bb25b601eac4d7f1a042888a1f45237987e"}, +] + [[package]] name = "markdown" version = "3.7" description = "Python implementation of John Gruber's Markdown." optional = false python-versions = ">=3.8" +groups = ["doc"] files = [ {file = "Markdown-3.7-py3-none-any.whl", hash = "sha256:7eb6df5690b81a1d7942992c97fad2938e956e79df20cbc6186e9c3a77b1c803"}, {file = "markdown-3.7.tar.gz", hash = "sha256:2ae2471477cfd02dbbf038d5d9bc226d40def84b4fe2986e49b59b6b472bbed2"}, @@ -775,6 +1334,7 @@ version = "3.0.0" description = "Python port of markdown-it. Markdown parsing, done right!" optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "markdown-it-py-3.0.0.tar.gz", hash = "sha256:e3f60a94fa066dc52ec76661e37c851cb232d92f9886b15cb560aaada2df8feb"}, {file = "markdown_it_py-3.0.0-py3-none-any.whl", hash = "sha256:355216845c60bd96232cd8d8c40e8f9765cc86f46880e43a8fd22dc1a1a8cab1"}, @@ -799,6 +1359,7 @@ version = "3.0.2" description = "Safely add untrusted strings to HTML/XML markup." optional = false python-versions = ">=3.9" +groups = ["doc"] files = [ {file = "MarkupSafe-3.0.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:7e94c425039cde14257288fd61dcfb01963e658efbc0ff54f5306b06054700f8"}, {file = "MarkupSafe-3.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9e2d922824181480953426608b81967de705c3cef4d1af983af849d7bd619158"}, @@ -863,12 +1424,139 @@ files = [ {file = "markupsafe-3.0.2.tar.gz", hash = "sha256:ee55d3edf80167e48ea11a923c7386f4669df67d7994554387f84e7d8b0a2bf0"}, ] +[[package]] +name = "matplotlib" +version = "3.9.4" +description = "Python plotting package" +optional = false +python-versions = ">=3.9" +groups = ["main"] +markers = "python_version < \"3.11\"" +files = [ + {file = "matplotlib-3.9.4-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:c5fdd7abfb706dfa8d307af64a87f1a862879ec3cd8d0ec8637458f0885b9c50"}, + {file = "matplotlib-3.9.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:d89bc4e85e40a71d1477780366c27fb7c6494d293e1617788986f74e2a03d7ff"}, + {file = "matplotlib-3.9.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ddf9f3c26aae695c5daafbf6b94e4c1a30d6cd617ba594bbbded3b33a1fcfa26"}, + {file = "matplotlib-3.9.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:18ebcf248030173b59a868fda1fe42397253f6698995b55e81e1f57431d85e50"}, + {file = "matplotlib-3.9.4-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:974896ec43c672ec23f3f8c648981e8bc880ee163146e0312a9b8def2fac66f5"}, + {file = "matplotlib-3.9.4-cp310-cp310-win_amd64.whl", hash = "sha256:4598c394ae9711cec135639374e70871fa36b56afae17bdf032a345be552a88d"}, + {file = "matplotlib-3.9.4-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:d4dd29641d9fb8bc4492420c5480398dd40a09afd73aebe4eb9d0071a05fbe0c"}, + {file = "matplotlib-3.9.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:30e5b22e8bcfb95442bf7d48b0d7f3bdf4a450cbf68986ea45fca3d11ae9d099"}, + {file = "matplotlib-3.9.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2bb0030d1d447fd56dcc23b4c64a26e44e898f0416276cac1ebc25522e0ac249"}, + {file = "matplotlib-3.9.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aca90ed222ac3565d2752b83dbb27627480d27662671e4d39da72e97f657a423"}, + {file = "matplotlib-3.9.4-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:a181b2aa2906c608fcae72f977a4a2d76e385578939891b91c2550c39ecf361e"}, + {file = "matplotlib-3.9.4-cp311-cp311-win_amd64.whl", hash = "sha256:1f6882828231eca17f501c4dcd98a05abb3f03d157fbc0769c6911fe08b6cfd3"}, + {file = "matplotlib-3.9.4-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:dfc48d67e6661378a21c2983200a654b72b5c5cdbd5d2cf6e5e1ece860f0cc70"}, + {file = "matplotlib-3.9.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:47aef0fab8332d02d68e786eba8113ffd6f862182ea2999379dec9e237b7e483"}, + {file = "matplotlib-3.9.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fba1f52c6b7dc764097f52fd9ab627b90db452c9feb653a59945de16752e965f"}, + {file = "matplotlib-3.9.4-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:173ac3748acaac21afcc3fa1633924609ba1b87749006bc25051c52c422a5d00"}, + {file = "matplotlib-3.9.4-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:320edea0cadc07007765e33f878b13b3738ffa9745c5f707705692df70ffe0e0"}, + {file = "matplotlib-3.9.4-cp312-cp312-win_amd64.whl", hash = "sha256:a4a4cfc82330b27042a7169533da7991e8789d180dd5b3daeaee57d75cd5a03b"}, + {file = "matplotlib-3.9.4-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:37eeffeeca3c940985b80f5b9a7b95ea35671e0e7405001f249848d2b62351b6"}, + {file = "matplotlib-3.9.4-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:3e7465ac859ee4abcb0d836137cd8414e7bb7ad330d905abced457217d4f0f45"}, + {file = "matplotlib-3.9.4-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f4c12302c34afa0cf061bea23b331e747e5e554b0fa595c96e01c7b75bc3b858"}, + {file = "matplotlib-3.9.4-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2b8c97917f21b75e72108b97707ba3d48f171541a74aa2a56df7a40626bafc64"}, + {file = "matplotlib-3.9.4-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:0229803bd7e19271b03cb09f27db76c918c467aa4ce2ae168171bc67c3f508df"}, + {file = "matplotlib-3.9.4-cp313-cp313-win_amd64.whl", hash = "sha256:7c0d8ef442ebf56ff5e206f8083d08252ee738e04f3dc88ea882853a05488799"}, + {file = "matplotlib-3.9.4-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:a04c3b00066a688834356d196136349cb32f5e1003c55ac419e91585168b88fb"}, + {file = "matplotlib-3.9.4-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:04c519587f6c210626741a1e9a68eefc05966ede24205db8982841826af5871a"}, + {file = "matplotlib-3.9.4-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:308afbf1a228b8b525fcd5cec17f246bbbb63b175a3ef6eb7b4d33287ca0cf0c"}, + {file = "matplotlib-3.9.4-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ddb3b02246ddcffd3ce98e88fed5b238bc5faff10dbbaa42090ea13241d15764"}, + {file = "matplotlib-3.9.4-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:8a75287e9cb9eee48cb79ec1d806f75b29c0fde978cb7223a1f4c5848d696041"}, + {file = "matplotlib-3.9.4-cp313-cp313t-win_amd64.whl", hash = "sha256:488deb7af140f0ba86da003e66e10d55ff915e152c78b4b66d231638400b1965"}, + {file = "matplotlib-3.9.4-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:3c3724d89a387ddf78ff88d2a30ca78ac2b4c89cf37f2db4bd453c34799e933c"}, + {file = "matplotlib-3.9.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:d5f0a8430ffe23d7e32cfd86445864ccad141797f7d25b7c41759a5b5d17cfd7"}, + {file = "matplotlib-3.9.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6bb0141a21aef3b64b633dc4d16cbd5fc538b727e4958be82a0e1c92a234160e"}, + {file = "matplotlib-3.9.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:57aa235109e9eed52e2c2949db17da185383fa71083c00c6c143a60e07e0888c"}, + {file = "matplotlib-3.9.4-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:b18c600061477ccfdd1e6fd050c33d8be82431700f3452b297a56d9ed7037abb"}, + {file = "matplotlib-3.9.4-cp39-cp39-win_amd64.whl", hash = "sha256:ef5f2d1b67d2d2145ff75e10f8c008bfbf71d45137c4b648c87193e7dd053eac"}, + {file = "matplotlib-3.9.4-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:44e0ed786d769d85bc787b0606a53f2d8d2d1d3c8a2608237365e9121c1a338c"}, + {file = "matplotlib-3.9.4-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:09debb9ce941eb23ecdbe7eab972b1c3e0276dcf01688073faff7b0f61d6c6ca"}, + {file = "matplotlib-3.9.4-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bcc53cf157a657bfd03afab14774d54ba73aa84d42cfe2480c91bd94873952db"}, + {file = "matplotlib-3.9.4-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:ad45da51be7ad02387801fd154ef74d942f49fe3fcd26a64c94842ba7ec0d865"}, + {file = "matplotlib-3.9.4.tar.gz", hash = "sha256:1e00e8be7393cbdc6fedfa8a6fba02cf3e83814b285db1c60b906a023ba41bc3"}, +] + +[package.dependencies] +contourpy = ">=1.0.1" +cycler = ">=0.10" +fonttools = ">=4.22.0" +importlib-resources = {version = ">=3.2.0", markers = "python_version < \"3.10\""} +kiwisolver = ">=1.3.1" +numpy = ">=1.23" +packaging = ">=20.0" +pillow = ">=8" +pyparsing = ">=2.3.1" +python-dateutil = ">=2.7" + +[package.extras] +dev = ["meson-python (>=0.13.1,<0.17.0)", "numpy (>=1.25)", "pybind11 (>=2.6,!=2.13.3)", "setuptools (>=64)", "setuptools_scm (>=7)"] + +[[package]] +name = "matplotlib" +version = "3.10.3" +description = "Python plotting package" +optional = false +python-versions = ">=3.10" +groups = ["main"] +markers = "python_version >= \"3.11\"" +files = [ + {file = "matplotlib-3.10.3-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:213fadd6348d106ca7db99e113f1bea1e65e383c3ba76e8556ba4a3054b65ae7"}, + {file = "matplotlib-3.10.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:d3bec61cb8221f0ca6313889308326e7bb303d0d302c5cc9e523b2f2e6c73deb"}, + {file = "matplotlib-3.10.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8c21ae75651c0231b3ba014b6d5e08fb969c40cdb5a011e33e99ed0c9ea86ecb"}, + {file = "matplotlib-3.10.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a49e39755580b08e30e3620efc659330eac5d6534ab7eae50fa5e31f53ee4e30"}, + {file = "matplotlib-3.10.3-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:cf4636203e1190871d3a73664dea03d26fb019b66692cbfd642faafdad6208e8"}, + {file = "matplotlib-3.10.3-cp310-cp310-win_amd64.whl", hash = "sha256:fd5641a9bb9d55f4dd2afe897a53b537c834b9012684c8444cc105895c8c16fd"}, + {file = "matplotlib-3.10.3-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:0ef061f74cd488586f552d0c336b2f078d43bc00dc473d2c3e7bfee2272f3fa8"}, + {file = "matplotlib-3.10.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:d96985d14dc5f4a736bbea4b9de9afaa735f8a0fc2ca75be2fa9e96b2097369d"}, + {file = "matplotlib-3.10.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7c5f0283da91e9522bdba4d6583ed9d5521566f63729ffb68334f86d0bb98049"}, + {file = "matplotlib-3.10.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fdfa07c0ec58035242bc8b2c8aae37037c9a886370eef6850703d7583e19964b"}, + {file = "matplotlib-3.10.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:c0b9849a17bce080a16ebcb80a7b714b5677d0ec32161a2cc0a8e5a6030ae220"}, + {file = "matplotlib-3.10.3-cp311-cp311-win_amd64.whl", hash = "sha256:eef6ed6c03717083bc6d69c2d7ee8624205c29a8e6ea5a31cd3492ecdbaee1e1"}, + {file = "matplotlib-3.10.3-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:0ab1affc11d1f495ab9e6362b8174a25afc19c081ba5b0775ef00533a4236eea"}, + {file = "matplotlib-3.10.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:2a818d8bdcafa7ed2eed74487fdb071c09c1ae24152d403952adad11fa3c65b4"}, + {file = "matplotlib-3.10.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:748ebc3470c253e770b17d8b0557f0aa85cf8c63fd52f1a61af5b27ec0b7ffee"}, + {file = "matplotlib-3.10.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ed70453fd99733293ace1aec568255bc51c6361cb0da94fa5ebf0649fdb2150a"}, + {file = "matplotlib-3.10.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:dbed9917b44070e55640bd13419de83b4c918e52d97561544814ba463811cbc7"}, + {file = "matplotlib-3.10.3-cp312-cp312-win_amd64.whl", hash = "sha256:cf37d8c6ef1a48829443e8ba5227b44236d7fcaf7647caa3178a4ff9f7a5be05"}, + {file = "matplotlib-3.10.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:9f2efccc8dcf2b86fc4ee849eea5dcaecedd0773b30f47980dc0cbeabf26ec84"}, + {file = "matplotlib-3.10.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:3ddbba06a6c126e3301c3d272a99dcbe7f6c24c14024e80307ff03791a5f294e"}, + {file = "matplotlib-3.10.3-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:748302b33ae9326995b238f606e9ed840bf5886ebafcb233775d946aa8107a15"}, + {file = "matplotlib-3.10.3-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a80fcccbef63302c0efd78042ea3c2436104c5b1a4d3ae20f864593696364ac7"}, + {file = "matplotlib-3.10.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:55e46cbfe1f8586adb34f7587c3e4f7dedc59d5226719faf6cb54fc24f2fd52d"}, + {file = "matplotlib-3.10.3-cp313-cp313-win_amd64.whl", hash = "sha256:151d89cb8d33cb23345cd12490c76fd5d18a56581a16d950b48c6ff19bb2ab93"}, + {file = "matplotlib-3.10.3-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:c26dd9834e74d164d06433dc7be5d75a1e9890b926b3e57e74fa446e1a62c3e2"}, + {file = "matplotlib-3.10.3-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:24853dad5b8c84c8c2390fc31ce4858b6df504156893292ce8092d190ef8151d"}, + {file = "matplotlib-3.10.3-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:68f7878214d369d7d4215e2a9075fef743be38fa401d32e6020bab2dfabaa566"}, + {file = "matplotlib-3.10.3-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f6929fc618cb6db9cb75086f73b3219bbb25920cb24cee2ea7a12b04971a4158"}, + {file = "matplotlib-3.10.3-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:6c7818292a5cc372a2dc4c795e5c356942eb8350b98ef913f7fda51fe175ac5d"}, + {file = "matplotlib-3.10.3-cp313-cp313t-win_amd64.whl", hash = "sha256:4f23ffe95c5667ef8a2b56eea9b53db7f43910fa4a2d5472ae0f72b64deab4d5"}, + {file = "matplotlib-3.10.3-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:86ab63d66bbc83fdb6733471d3bff40897c1e9921cba112accd748eee4bce5e4"}, + {file = "matplotlib-3.10.3-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:a48f9c08bf7444b5d2391a83e75edb464ccda3c380384b36532a0962593a1751"}, + {file = "matplotlib-3.10.3-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cb73d8aa75a237457988f9765e4dfe1c0d2453c5ca4eabc897d4309672c8e014"}, + {file = "matplotlib-3.10.3.tar.gz", hash = "sha256:2f82d2c5bb7ae93aaaa4cd42aca65d76ce6376f83304fa3a630b569aca274df0"}, +] + +[package.dependencies] +contourpy = ">=1.0.1" +cycler = ">=0.10" +fonttools = ">=4.22.0" +kiwisolver = ">=1.3.1" +numpy = ">=1.23" +packaging = ">=20.0" +pillow = ">=8" +pyparsing = ">=2.3.1" +python-dateutil = ">=2.7" + +[package.extras] +dev = ["meson-python (>=0.13.1,<0.17.0)", "pybind11 (>=2.13.2,!=2.13.3)", "setuptools (>=64)", "setuptools_scm (>=7)"] + [[package]] name = "mccabe" version = "0.6.1" description = "McCabe checker, plugin for flake8" optional = false python-versions = "*" +groups = ["dev"] files = [ {file = "mccabe-0.6.1-py2.py3-none-any.whl", hash = "sha256:ab8a6258860da4b6677da4bd2fe5dc2c659cff31b3ee4f7f5d64e79735b80d42"}, {file = "mccabe-0.6.1.tar.gz", hash = "sha256:dd8d182285a0fe56bace7f45b5e7d1a6ebcbf524e8f3bd87eb0f125271b8831f"}, @@ -880,6 +1568,7 @@ version = "0.1.2" description = "Markdown URL utilities" optional = false python-versions = ">=3.7" +groups = ["main"] files = [ {file = "mdurl-0.1.2-py3-none-any.whl", hash = "sha256:84008a41e51615a49fc9966191ff91509e3c40b939176e643fd50a5c2196b8f8"}, {file = "mdurl-0.1.2.tar.gz", hash = "sha256:bb413d29f5eea38f31dd4754dd7377d4465116fb207585f97bf925588687c1ba"}, @@ -891,6 +1580,7 @@ version = "1.3.4" description = "A deep merge function for 🐍." optional = false python-versions = ">=3.6" +groups = ["doc"] files = [ {file = "mergedeep-1.3.4-py3-none-any.whl", hash = "sha256:70775750742b25c0d8f36c55aed03d24c3384d17c951b3175d898bd778ef0307"}, {file = "mergedeep-1.3.4.tar.gz", hash = "sha256:0096d52e9dad9939c3d975a774666af186eda617e6ca84df4c94dec30004f2a8"}, @@ -902,6 +1592,7 @@ version = "1.6.1" description = "Project documentation with Markdown." optional = false python-versions = ">=3.8" +groups = ["doc"] files = [ {file = "mkdocs-1.6.1-py3-none-any.whl", hash = "sha256:db91759624d1647f3f34aa0c3f327dd2601beae39a366d6e064c03468d35c20e"}, {file = "mkdocs-1.6.1.tar.gz", hash = "sha256:7b432f01d928c084353ab39c57282f29f92136665bdd6abf7c1ec8d822ef86f2"}, @@ -925,7 +1616,7 @@ watchdog = ">=2.0" [package.extras] i18n = ["babel (>=2.9.0)"] -min-versions = ["babel (==2.9.0)", "click (==7.0)", "colorama (==0.4)", "ghp-import (==1.0)", "importlib-metadata (==4.4)", "jinja2 (==2.11.1)", "markdown (==3.3.6)", "markupsafe (==2.0.1)", "mergedeep (==1.3.4)", "mkdocs-get-deps (==0.2.0)", "packaging (==20.5)", "pathspec (==0.11.1)", "pyyaml (==5.1)", "pyyaml-env-tag (==0.1)", "watchdog (==2.0)"] +min-versions = ["babel (==2.9.0)", "click (==7.0)", "colorama (==0.4) ; platform_system == \"Windows\"", "ghp-import (==1.0)", "importlib-metadata (==4.4) ; python_version < \"3.10\"", "jinja2 (==2.11.1)", "markdown (==3.3.6)", "markupsafe (==2.0.1)", "mergedeep (==1.3.4)", "mkdocs-get-deps (==0.2.0)", "packaging (==20.5)", "pathspec (==0.11.1)", "pyyaml (==5.1)", "pyyaml-env-tag (==0.1)", "watchdog (==2.0)"] [[package]] name = "mkdocs-autorefs" @@ -933,6 +1624,7 @@ version = "1.2.0" description = "Automatically link across pages in MkDocs." optional = false python-versions = ">=3.8" +groups = ["doc"] files = [ {file = "mkdocs_autorefs-1.2.0-py3-none-any.whl", hash = "sha256:d588754ae89bd0ced0c70c06f58566a4ee43471eeeee5202427da7de9ef85a2f"}, {file = "mkdocs_autorefs-1.2.0.tar.gz", hash = "sha256:a86b93abff653521bda71cf3fc5596342b7a23982093915cb74273f67522190f"}, @@ -949,6 +1641,7 @@ version = "0.2.0" description = "MkDocs extension that lists all dependencies according to a mkdocs.yml file" optional = false python-versions = ">=3.8" +groups = ["doc"] files = [ {file = "mkdocs_get_deps-0.2.0-py3-none-any.whl", hash = "sha256:2bf11d0b133e77a0dd036abeeb06dec8775e46efa526dc70667d8863eefc6134"}, {file = "mkdocs_get_deps-0.2.0.tar.gz", hash = "sha256:162b3d129c7fad9b19abfdcb9c1458a651628e4b1dea628ac68790fb3061c60c"}, @@ -966,6 +1659,7 @@ version = "9.5.46" description = "Documentation that simply works" optional = false python-versions = ">=3.8" +groups = ["doc"] files = [ {file = "mkdocs_material-9.5.46-py3-none-any.whl", hash = "sha256:98f0a2039c62e551a68aad0791a8d41324ff90c03a6e6cea381a384b84908b83"}, {file = "mkdocs_material-9.5.46.tar.gz", hash = "sha256:ae2043f4238e572f9a40e0b577f50400d6fc31e2fef8ea141800aebf3bd273d7"}, @@ -995,6 +1689,7 @@ version = "1.3.1" description = "Extension pack for Python Markdown and MkDocs Material." optional = false python-versions = ">=3.8" +groups = ["doc"] files = [ {file = "mkdocs_material_extensions-1.3.1-py3-none-any.whl", hash = "sha256:adff8b62700b25cb77b53358dad940f3ef973dd6db797907c49e3c2ef3ab4e31"}, {file = "mkdocs_material_extensions-1.3.1.tar.gz", hash = "sha256:10c9511cea88f568257f960358a467d12b970e1f7b2c0e5fb2bb48cab1928443"}, @@ -1006,6 +1701,7 @@ version = "0.26.2" description = "Automatic documentation from sources, for MkDocs." optional = false python-versions = ">=3.9" +groups = ["doc"] files = [ {file = "mkdocstrings-0.26.2-py3-none-any.whl", hash = "sha256:1248f3228464f3b8d1a15bd91249ce1701fe3104ac517a5f167a0e01ca850ba5"}, {file = "mkdocstrings-0.26.2.tar.gz", hash = "sha256:34a8b50f1e6cfd29546c6c09fbe02154adfb0b361bb758834bf56aa284ba876e"}, @@ -1034,6 +1730,7 @@ version = "1.12.2" description = "A Python handler for mkdocstrings." optional = false python-versions = ">=3.9" +groups = ["doc"] files = [ {file = "mkdocstrings_python-1.12.2-py3-none-any.whl", hash = "sha256:7f7d40d6db3cb1f5d19dbcd80e3efe4d0ba32b073272c0c0de9de2e604eda62a"}, {file = "mkdocstrings_python-1.12.2.tar.gz", hash = "sha256:7a1760941c0b52a2cd87b960a9e21112ffe52e7df9d0b9583d04d47ed2e186f3"}, @@ -1050,6 +1747,8 @@ version = "1.3.0" description = "shlex for windows" optional = false python-versions = ">=3.5" +groups = ["dev"] +markers = "sys_platform == \"win32\"" files = [ {file = "mslex-1.3.0-py3-none-any.whl", hash = "sha256:c7074b347201b3466fc077c5692fbce9b5f62a63a51f537a53fbbd02eff2eea4"}, {file = "mslex-1.3.0.tar.gz", hash = "sha256:641c887d1d3db610eee2af37a8e5abda3f70b3006cdfd2d0d29dc0d1ae28a85d"}, @@ -1061,6 +1760,7 @@ version = "1.0.0" description = "Type system extensions for programs checked with the mypy type checker." optional = false python-versions = ">=3.5" +groups = ["dev"] files = [ {file = "mypy_extensions-1.0.0-py3-none-any.whl", hash = "sha256:4392f6c0eb8a5668a69e23d168ffa70f0be9ccfd32b5cc2d26a34ae5b844552d"}, {file = "mypy_extensions-1.0.0.tar.gz", hash = "sha256:75dbf8955dc00442a438fc4d0666508a9a97b6bd41aa2f0ffe9d2f2725af0782"}, @@ -1072,6 +1772,7 @@ version = "5.3.2" description = "Access a multitude of neuroimaging data formats" optional = false python-versions = ">=3.9" +groups = ["main"] files = [ {file = "nibabel-5.3.2-py3-none-any.whl", hash = "sha256:52970a5a8a53b1b55249cba4d9bcfaa8cc57e3e5af35a29d7352237e8680a6f8"}, {file = "nibabel-5.3.2.tar.gz", hash = "sha256:0bdca6503b1c784b446c745a4542367de7756cfba0d72143b91f9ffb78be569b"}, @@ -1088,7 +1789,7 @@ all = ["h5py", "pillow", "pydicom (>=2.3)", "pyzstd (>=0.14.3)", "scipy"] dev = ["tox"] dicom = ["pydicom (>=2.3)"] dicomfs = ["pillow", "pydicom (>=2.3)"] -doc = ["matplotlib (>=3.5)", "numpydoc", "sphinx", "texext", "tomli"] +doc = ["matplotlib (>=3.5)", "numpydoc", "sphinx", "texext", "tomli ; python_version < \"3.11\""] doctest = ["tox"] minc2 = ["h5py"] spm = ["scipy"] @@ -1103,6 +1804,7 @@ version = "0.5.13" description = "Modules to convert numbers to words. Easily extensible." optional = false python-versions = "*" +groups = ["main"] files = [ {file = "num2words-0.5.13-py3-none-any.whl", hash = "sha256:39e662c663f0a7e15415431ea68eb3dc711b49e3b776d93403e1da0a219ca4ee"}, {file = "num2words-0.5.13.tar.gz", hash = "sha256:a3064716fbbf90d75c449450cebfbc73a6a13e63b2531d09bdecc3ab1a2209cf"}, @@ -1117,6 +1819,7 @@ version = "1.26.4" description = "Fundamental package for array computing in Python" optional = false python-versions = ">=3.9" +groups = ["main"] files = [ {file = "numpy-1.26.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:9ff0f4f29c51e2803569d7a51c2304de5554655a60c5d776e35b4a41413830d0"}, {file = "numpy-1.26.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:2e4ee3380d6de9c9ec04745830fd9e2eccb3e6cf790d39d7b98ffd19b0dd754a"}, @@ -1162,6 +1865,7 @@ version = "24.2" description = "Core utilities for Python packages" optional = false python-versions = ">=3.8" +groups = ["main", "dev", "doc"] files = [ {file = "packaging-24.2-py3-none-any.whl", hash = "sha256:09abb1bccd265c01f4a3aa3f7a7db064b36514d2cba19a2f694fe6150451a759"}, {file = "packaging-24.2.tar.gz", hash = "sha256:c228a6dc5e932d346bc5739379109d49e8853dd8223571c7c5b55260edc0b97f"}, @@ -1173,6 +1877,7 @@ version = "0.5.7" description = "Divides large result sets into pages for easier browsing" optional = false python-versions = "*" +groups = ["doc"] files = [ {file = "paginate-0.5.7-py2.py3-none-any.whl", hash = "sha256:b885e2af73abcf01d9559fd5216b57ef722f8c42affbb63942377668e35c7591"}, {file = "paginate-0.5.7.tar.gz", hash = "sha256:22bd083ab41e1a8b4f3690544afb2c60c25e5c9a63a30fa2f483f6c60c8e5945"}, @@ -1188,6 +1893,7 @@ version = "2.2.3" description = "Powerful data structures for data analysis, time series, and statistics" optional = false python-versions = ">=3.9" +groups = ["main"] files = [ {file = "pandas-2.2.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:1948ddde24197a0f7add2bdc4ca83bf2b1ef84a1bc8ccffd95eda17fd836ecb5"}, {file = "pandas-2.2.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:381175499d3802cde0eabbaf6324cce0c4f5d52ca6f8c377c29ad442f50f6348"}, @@ -1274,17 +1980,137 @@ version = "0.12.1" description = "Utility library for gitignore style pattern matching of file paths." optional = false python-versions = ">=3.8" +groups = ["dev", "doc"] files = [ {file = "pathspec-0.12.1-py3-none-any.whl", hash = "sha256:a0d503e138a4c123b27490a4f7beda6a01c6f288df0e4a8b79c7eb0dc7b4cc08"}, {file = "pathspec-0.12.1.tar.gz", hash = "sha256:a482d51503a1ab33b1c67a6c3813a26953dbdc71c31dacaef9a838c4e29f5712"}, ] +[[package]] +name = "patsy" +version = "1.0.1" +description = "A Python package for describing statistical models and for building design matrices." +optional = false +python-versions = ">=3.6" +groups = ["main"] +files = [ + {file = "patsy-1.0.1-py2.py3-none-any.whl", hash = "sha256:751fb38f9e97e62312e921a1954b81e1bb2bcda4f5eeabaf94db251ee791509c"}, + {file = "patsy-1.0.1.tar.gz", hash = "sha256:e786a9391eec818c054e359b737bbce692f051aee4c661f4141cc88fb459c0c4"}, +] + +[package.dependencies] +numpy = ">=1.4" + +[package.extras] +test = ["pytest", "pytest-cov", "scipy"] + +[[package]] +name = "pillow" +version = "11.2.1" +description = "Python Imaging Library (Fork)" +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "pillow-11.2.1-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:d57a75d53922fc20c165016a20d9c44f73305e67c351bbc60d1adaf662e74047"}, + {file = "pillow-11.2.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:127bf6ac4a5b58b3d32fc8289656f77f80567d65660bc46f72c0d77e6600cc95"}, + {file = "pillow-11.2.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b4ba4be812c7a40280629e55ae0b14a0aafa150dd6451297562e1764808bbe61"}, + {file = "pillow-11.2.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c8bd62331e5032bc396a93609982a9ab6b411c05078a52f5fe3cc59234a3abd1"}, + {file = "pillow-11.2.1-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:562d11134c97a62fe3af29581f083033179f7ff435f78392565a1ad2d1c2c45c"}, + {file = "pillow-11.2.1-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:c97209e85b5be259994eb5b69ff50c5d20cca0f458ef9abd835e262d9d88b39d"}, + {file = "pillow-11.2.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:0c3e6d0f59171dfa2e25d7116217543310908dfa2770aa64b8f87605f8cacc97"}, + {file = "pillow-11.2.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:cc1c3bc53befb6096b84165956e886b1729634a799e9d6329a0c512ab651e579"}, + {file = "pillow-11.2.1-cp310-cp310-win32.whl", hash = "sha256:312c77b7f07ab2139924d2639860e084ec2a13e72af54d4f08ac843a5fc9c79d"}, + {file = "pillow-11.2.1-cp310-cp310-win_amd64.whl", hash = "sha256:9bc7ae48b8057a611e5fe9f853baa88093b9a76303937449397899385da06fad"}, + {file = "pillow-11.2.1-cp310-cp310-win_arm64.whl", hash = "sha256:2728567e249cdd939f6cc3d1f049595c66e4187f3c34078cbc0a7d21c47482d2"}, + {file = "pillow-11.2.1-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:35ca289f712ccfc699508c4658a1d14652e8033e9b69839edf83cbdd0ba39e70"}, + {file = "pillow-11.2.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:e0409af9f829f87a2dfb7e259f78f317a5351f2045158be321fd135973fff7bf"}, + {file = "pillow-11.2.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d4e5c5edee874dce4f653dbe59db7c73a600119fbea8d31f53423586ee2aafd7"}, + {file = "pillow-11.2.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b93a07e76d13bff9444f1a029e0af2964e654bfc2e2c2d46bfd080df5ad5f3d8"}, + {file = "pillow-11.2.1-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:e6def7eed9e7fa90fde255afaf08060dc4b343bbe524a8f69bdd2a2f0018f600"}, + {file = "pillow-11.2.1-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:8f4f3724c068be008c08257207210c138d5f3731af6c155a81c2b09a9eb3a788"}, + {file = "pillow-11.2.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:a0a6709b47019dff32e678bc12c63008311b82b9327613f534e496dacaefb71e"}, + {file = "pillow-11.2.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:f6b0c664ccb879109ee3ca702a9272d877f4fcd21e5eb63c26422fd6e415365e"}, + {file = "pillow-11.2.1-cp311-cp311-win32.whl", hash = "sha256:cc5d875d56e49f112b6def6813c4e3d3036d269c008bf8aef72cd08d20ca6df6"}, + {file = "pillow-11.2.1-cp311-cp311-win_amd64.whl", hash = "sha256:0f5c7eda47bf8e3c8a283762cab94e496ba977a420868cb819159980b6709193"}, + {file = "pillow-11.2.1-cp311-cp311-win_arm64.whl", hash = "sha256:4d375eb838755f2528ac8cbc926c3e31cc49ca4ad0cf79cff48b20e30634a4a7"}, + {file = "pillow-11.2.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:78afba22027b4accef10dbd5eed84425930ba41b3ea0a86fa8d20baaf19d807f"}, + {file = "pillow-11.2.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:78092232a4ab376a35d68c4e6d5e00dfd73454bd12b230420025fbe178ee3b0b"}, + {file = "pillow-11.2.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:25a5f306095c6780c52e6bbb6109624b95c5b18e40aab1c3041da3e9e0cd3e2d"}, + {file = "pillow-11.2.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0c7b29dbd4281923a2bfe562acb734cee96bbb129e96e6972d315ed9f232bef4"}, + {file = "pillow-11.2.1-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:3e645b020f3209a0181a418bffe7b4a93171eef6c4ef6cc20980b30bebf17b7d"}, + {file = "pillow-11.2.1-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:b2dbea1012ccb784a65349f57bbc93730b96e85b42e9bf7b01ef40443db720b4"}, + {file = "pillow-11.2.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:da3104c57bbd72948d75f6a9389e6727d2ab6333c3617f0a89d72d4940aa0443"}, + {file = "pillow-11.2.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:598174aef4589af795f66f9caab87ba4ff860ce08cd5bb447c6fc553ffee603c"}, + {file = "pillow-11.2.1-cp312-cp312-win32.whl", hash = "sha256:1d535df14716e7f8776b9e7fee118576d65572b4aad3ed639be9e4fa88a1cad3"}, + {file = "pillow-11.2.1-cp312-cp312-win_amd64.whl", hash = "sha256:14e33b28bf17c7a38eede290f77db7c664e4eb01f7869e37fa98a5aa95978941"}, + {file = "pillow-11.2.1-cp312-cp312-win_arm64.whl", hash = "sha256:21e1470ac9e5739ff880c211fc3af01e3ae505859392bf65458c224d0bf283eb"}, + {file = "pillow-11.2.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:fdec757fea0b793056419bca3e9932eb2b0ceec90ef4813ea4c1e072c389eb28"}, + {file = "pillow-11.2.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:b0e130705d568e2f43a17bcbe74d90958e8a16263868a12c3e0d9c8162690830"}, + {file = "pillow-11.2.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7bdb5e09068332578214cadd9c05e3d64d99e0e87591be22a324bdbc18925be0"}, + {file = "pillow-11.2.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d189ba1bebfbc0c0e529159631ec72bb9e9bc041f01ec6d3233d6d82eb823bc1"}, + {file = "pillow-11.2.1-cp313-cp313-manylinux_2_28_aarch64.whl", hash = "sha256:191955c55d8a712fab8934a42bfefbf99dd0b5875078240943f913bb66d46d9f"}, + {file = "pillow-11.2.1-cp313-cp313-manylinux_2_28_x86_64.whl", hash = "sha256:ad275964d52e2243430472fc5d2c2334b4fc3ff9c16cb0a19254e25efa03a155"}, + {file = "pillow-11.2.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:750f96efe0597382660d8b53e90dd1dd44568a8edb51cb7f9d5d918b80d4de14"}, + {file = "pillow-11.2.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:fe15238d3798788d00716637b3d4e7bb6bde18b26e5d08335a96e88564a36b6b"}, + {file = "pillow-11.2.1-cp313-cp313-win32.whl", hash = "sha256:3fe735ced9a607fee4f481423a9c36701a39719252a9bb251679635f99d0f7d2"}, + {file = "pillow-11.2.1-cp313-cp313-win_amd64.whl", hash = "sha256:74ee3d7ecb3f3c05459ba95eed5efa28d6092d751ce9bf20e3e253a4e497e691"}, + {file = "pillow-11.2.1-cp313-cp313-win_arm64.whl", hash = "sha256:5119225c622403afb4b44bad4c1ca6c1f98eed79db8d3bc6e4e160fc6339d66c"}, + {file = "pillow-11.2.1-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:8ce2e8411c7aaef53e6bb29fe98f28cd4fbd9a1d9be2eeea434331aac0536b22"}, + {file = "pillow-11.2.1-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:9ee66787e095127116d91dea2143db65c7bb1e232f617aa5957c0d9d2a3f23a7"}, + {file = "pillow-11.2.1-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9622e3b6c1d8b551b6e6f21873bdcc55762b4b2126633014cea1803368a9aa16"}, + {file = "pillow-11.2.1-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:63b5dff3a68f371ea06025a1a6966c9a1e1ee452fc8020c2cd0ea41b83e9037b"}, + {file = "pillow-11.2.1-cp313-cp313t-manylinux_2_28_aarch64.whl", hash = "sha256:31df6e2d3d8fc99f993fd253e97fae451a8db2e7207acf97859732273e108406"}, + {file = "pillow-11.2.1-cp313-cp313t-manylinux_2_28_x86_64.whl", hash = "sha256:062b7a42d672c45a70fa1f8b43d1d38ff76b63421cbbe7f88146b39e8a558d91"}, + {file = "pillow-11.2.1-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:4eb92eca2711ef8be42fd3f67533765d9fd043b8c80db204f16c8ea62ee1a751"}, + {file = "pillow-11.2.1-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:f91ebf30830a48c825590aede79376cb40f110b387c17ee9bd59932c961044f9"}, + {file = "pillow-11.2.1-cp313-cp313t-win32.whl", hash = "sha256:e0b55f27f584ed623221cfe995c912c61606be8513bfa0e07d2c674b4516d9dd"}, + {file = "pillow-11.2.1-cp313-cp313t-win_amd64.whl", hash = "sha256:36d6b82164c39ce5482f649b437382c0fb2395eabc1e2b1702a6deb8ad647d6e"}, + {file = "pillow-11.2.1-cp313-cp313t-win_arm64.whl", hash = "sha256:225c832a13326e34f212d2072982bb1adb210e0cc0b153e688743018c94a2681"}, + {file = "pillow-11.2.1-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:7491cf8a79b8eb867d419648fff2f83cb0b3891c8b36da92cc7f1931d46108c8"}, + {file = "pillow-11.2.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:8b02d8f9cb83c52578a0b4beadba92e37d83a4ef11570a8688bbf43f4ca50909"}, + {file = "pillow-11.2.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:014ca0050c85003620526b0ac1ac53f56fc93af128f7546623cc8e31875ab928"}, + {file = "pillow-11.2.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3692b68c87096ac6308296d96354eddd25f98740c9d2ab54e1549d6c8aea9d79"}, + {file = "pillow-11.2.1-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:f781dcb0bc9929adc77bad571b8621ecb1e4cdef86e940fe2e5b5ee24fd33b35"}, + {file = "pillow-11.2.1-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:2b490402c96f907a166615e9a5afacf2519e28295f157ec3a2bb9bd57de638cb"}, + {file = "pillow-11.2.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:dd6b20b93b3ccc9c1b597999209e4bc5cf2853f9ee66e3fc9a400a78733ffc9a"}, + {file = "pillow-11.2.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:4b835d89c08a6c2ee7781b8dd0a30209a8012b5f09c0a665b65b0eb3560b6f36"}, + {file = "pillow-11.2.1-cp39-cp39-win32.whl", hash = "sha256:b10428b3416d4f9c61f94b494681280be7686bda15898a3a9e08eb66a6d92d67"}, + {file = "pillow-11.2.1-cp39-cp39-win_amd64.whl", hash = "sha256:6ebce70c3f486acf7591a3d73431fa504a4e18a9b97ff27f5f47b7368e4b9dd1"}, + {file = "pillow-11.2.1-cp39-cp39-win_arm64.whl", hash = "sha256:c27476257b2fdcd7872d54cfd119b3a9ce4610fb85c8e32b70b42e3680a29a1e"}, + {file = "pillow-11.2.1-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:9b7b0d4fd2635f54ad82785d56bc0d94f147096493a79985d0ab57aedd563156"}, + {file = "pillow-11.2.1-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:aa442755e31c64037aa7c1cb186e0b369f8416c567381852c63444dd666fb772"}, + {file = "pillow-11.2.1-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f0d3348c95b766f54b76116d53d4cb171b52992a1027e7ca50c81b43b9d9e363"}, + {file = "pillow-11.2.1-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:85d27ea4c889342f7e35f6d56e7e1cb345632ad592e8c51b693d7b7556043ce0"}, + {file = "pillow-11.2.1-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:bf2c33d6791c598142f00c9c4c7d47f6476731c31081331664eb26d6ab583e01"}, + {file = "pillow-11.2.1-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:e616e7154c37669fc1dfc14584f11e284e05d1c650e1c0f972f281c4ccc53193"}, + {file = "pillow-11.2.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:39ad2e0f424394e3aebc40168845fee52df1394a4673a6ee512d840d14ab3013"}, + {file = "pillow-11.2.1-pp311-pypy311_pp73-macosx_10_15_x86_64.whl", hash = "sha256:80f1df8dbe9572b4b7abdfa17eb5d78dd620b1d55d9e25f834efdbee872d3aed"}, + {file = "pillow-11.2.1-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:ea926cfbc3957090becbcbbb65ad177161a2ff2ad578b5a6ec9bb1e1cd78753c"}, + {file = "pillow-11.2.1-pp311-pypy311_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:738db0e0941ca0376804d4de6a782c005245264edaa253ffce24e5a15cbdc7bd"}, + {file = "pillow-11.2.1-pp311-pypy311_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9db98ab6565c69082ec9b0d4e40dd9f6181dab0dd236d26f7a50b8b9bfbd5076"}, + {file = "pillow-11.2.1-pp311-pypy311_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:036e53f4170e270ddb8797d4c590e6dd14d28e15c7da375c18978045f7e6c37b"}, + {file = "pillow-11.2.1-pp311-pypy311_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:14f73f7c291279bd65fda51ee87affd7c1e097709f7fdd0188957a16c264601f"}, + {file = "pillow-11.2.1-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:208653868d5c9ecc2b327f9b9ef34e0e42a4cdd172c2988fd81d62d2bc9bc044"}, + {file = "pillow-11.2.1.tar.gz", hash = "sha256:a64dd61998416367b7ef979b73d3a85853ba9bec4c2925f74e588879a58716b6"}, +] + +[package.extras] +docs = ["furo", "olefile", "sphinx (>=8.2)", "sphinx-copybutton", "sphinx-inline-tabs", "sphinxext-opengraph"] +fpx = ["olefile"] +mic = ["olefile"] +test-arrow = ["pyarrow"] +tests = ["check-manifest", "coverage (>=7.4.2)", "defusedxml", "markdown2", "olefile", "packaging", "pyroma", "pytest", "pytest-cov", "pytest-timeout", "trove-classifiers (>=2024.10.12)"] +typing = ["typing-extensions ; python_version < \"3.10\""] +xmp = ["defusedxml"] + [[package]] name = "platformdirs" version = "4.3.6" description = "A small Python package for determining appropriate platform-specific dirs, e.g. a `user data dir`." optional = false python-versions = ">=3.8" +groups = ["dev", "doc"] files = [ {file = "platformdirs-4.3.6-py3-none-any.whl", hash = "sha256:73e575e1408ab8103900836b97580d5307456908a03e92031bab39e4554cc3fb"}, {file = "platformdirs-4.3.6.tar.gz", hash = "sha256:357fb2acbc885b0419afd3ce3ed34564c13c9b95c89360cd9563f73aa5e2b907"}, @@ -1301,6 +2127,7 @@ version = "1.5.0" description = "plugin and hook calling mechanisms for python" optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "pluggy-1.5.0-py3-none-any.whl", hash = "sha256:44e1ad92c8ca002de6377e165f3e0f1be63266ab4d554740532335b9d75ea669"}, {file = "pluggy-1.5.0.tar.gz", hash = "sha256:2cffa88e94fdc978c4c574f15f9e59b7f4201d439195c3715ca9e2486f1d0cf1"}, @@ -1316,6 +2143,7 @@ version = "6.1.0" description = "Cross-platform lib for process and system monitoring in Python." optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,>=2.7" +groups = ["dev"] files = [ {file = "psutil-6.1.0-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:ff34df86226c0227c52f38b919213157588a678d049688eded74c76c8ba4a5d0"}, {file = "psutil-6.1.0-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:c0e0c00aa18ca2d3b2b991643b799a15fc8f0563d2ebb6040f64ce8dc027b942"}, @@ -1346,6 +2174,7 @@ version = "0.17.2" description = "bids: interface with datasets conforming to BIDS" optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "pybids-0.17.2-py3-none-any.whl", hash = "sha256:68f8c77f0063f6f4d6002e07a7d2b7239a3d6f0904a484f23594490632bcabbf"}, {file = "pybids-0.17.2.tar.gz", hash = "sha256:e0ca455c6876b8e1c28cc6b6d77085e90cca0b21103623759a8c8da2511cc924"}, @@ -1380,6 +2209,7 @@ version = "2.8.0" description = "Python style guide checker" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" +groups = ["dev"] files = [ {file = "pycodestyle-2.8.0-py2.py3-none-any.whl", hash = "sha256:720f8b39dde8b293825e7ff02c475f3077124006db4f440dcbc9a20b76548a20"}, {file = "pycodestyle-2.8.0.tar.gz", hash = "sha256:eddd5847ef438ea1c7870ca7eb78a9d47ce0cdb4851a5523949f2601d0cbbe7f"}, @@ -1391,6 +2221,7 @@ version = "2.4.0" description = "passive checker of Python programs" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +groups = ["dev"] files = [ {file = "pyflakes-2.4.0-py2.py3-none-any.whl", hash = "sha256:3bb3a3f256f4b7968c9c788781e4ff07dce46bdf12339dcda61053375426ee2e"}, {file = "pyflakes-2.4.0.tar.gz", hash = "sha256:05a85c2872edf37a4ed30b0cce2f6093e1d0581f8c19d7393122da7e25b2b24c"}, @@ -1402,6 +2233,7 @@ version = "2.18.0" description = "Pygments is a syntax highlighting package written in Python." optional = false python-versions = ">=3.8" +groups = ["main", "doc"] files = [ {file = "pygments-2.18.0-py3-none-any.whl", hash = "sha256:b8e6aca0523f3ab76fee51799c488e38782ac06eafcf95e7ba832985c8e7b13a"}, {file = "pygments-2.18.0.tar.gz", hash = "sha256:786ff802f32e91311bff3889f6e9a86e81505fe99f2735bb6d60ae0c5004f199"}, @@ -1416,6 +2248,7 @@ version = "10.12" description = "Extension pack for Python Markdown." optional = false python-versions = ">=3.8" +groups = ["doc"] files = [ {file = "pymdown_extensions-10.12-py3-none-any.whl", hash = "sha256:49f81412242d3527b8b4967b990df395c89563043bc51a3d2d7d500e52123b77"}, {file = "pymdown_extensions-10.12.tar.gz", hash = "sha256:b0ee1e0b2bef1071a47891ab17003bfe5bf824a398e13f49f8ed653b699369a7"}, @@ -1428,12 +2261,28 @@ pyyaml = "*" [package.extras] extra = ["pygments (>=2.12)"] +[[package]] +name = "pyparsing" +version = "3.2.3" +description = "pyparsing module - Classes and methods to define and execute parsing grammars" +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "pyparsing-3.2.3-py3-none-any.whl", hash = "sha256:a749938e02d6fd0b59b356ca504a24982314bb090c383e3cf201c95ef7e2bfcf"}, + {file = "pyparsing-3.2.3.tar.gz", hash = "sha256:b9c13f1ab8b3b542f72e28f634bad4de758ab3ce4546e4301970ad6fa77c38be"}, +] + +[package.extras] +diagrams = ["jinja2", "railroad-diagrams"] + [[package]] name = "pytest" version = "8.3.3" description = "pytest: simple powerful testing with Python" optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "pytest-8.3.3-py3-none-any.whl", hash = "sha256:a6853c7375b2663155079443d2e45de913a911a11d669df02a50814944db57b2"}, {file = "pytest-8.3.3.tar.gz", hash = "sha256:70b98107bd648308a7952b06e6ca9a50bc660be218d53c257cc1fc94fda10181"}, @@ -1456,6 +2305,7 @@ version = "5.0.0" description = "Pytest plugin for measuring coverage." optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "pytest-cov-5.0.0.tar.gz", hash = "sha256:5837b58e9f6ebd335b0f8060eecce69b662415b16dc503883a02f45dfeb14857"}, {file = "pytest_cov-5.0.0-py3-none-any.whl", hash = "sha256:4f0764a1219df53214206bf1feea4633c3b558a2925c8b59f144f682861ce652"}, @@ -1468,12 +2318,31 @@ pytest = ">=4.6" [package.extras] testing = ["fields", "hunter", "process-tests", "pytest-xdist", "virtualenv"] +[[package]] +name = "pytest-mock" +version = "3.14.1" +description = "Thin-wrapper around the mock package for easier use with pytest" +optional = false +python-versions = ">=3.8" +groups = ["dev"] +files = [ + {file = "pytest_mock-3.14.1-py3-none-any.whl", hash = "sha256:178aefcd11307d874b4cd3100344e7e2d888d9791a6a1d9bfe90fbc1b74fd1d0"}, + {file = "pytest_mock-3.14.1.tar.gz", hash = "sha256:159e9edac4c451ce77a5cdb9fc5d1100708d2dd4ba3c3df572f14097351af80e"}, +] + +[package.dependencies] +pytest = ">=6.2.5" + +[package.extras] +dev = ["pre-commit", "pytest-asyncio", "tox"] + [[package]] name = "python-dateutil" version = "2.9.0.post0" description = "Extensions to the standard Python datetime module" optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" +groups = ["main", "doc"] files = [ {file = "python-dateutil-2.9.0.post0.tar.gz", hash = "sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3"}, {file = "python_dateutil-2.9.0.post0-py2.py3-none-any.whl", hash = "sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427"}, @@ -1488,6 +2357,7 @@ version = "2024.2" description = "World timezone definitions, modern and historical" optional = false python-versions = "*" +groups = ["main"] files = [ {file = "pytz-2024.2-py2.py3-none-any.whl", hash = "sha256:31c7c1817eb7fae7ca4b8c7ee50c72f93aa2dd863de768e1ef4245d426aa0725"}, {file = "pytz-2024.2.tar.gz", hash = "sha256:2aa355083c50a0f93fa581709deac0c9ad65cca8a9e9beac660adcbd493c798a"}, @@ -1499,6 +2369,7 @@ version = "6.0.2" description = "YAML parser and emitter for Python" optional = false python-versions = ">=3.8" +groups = ["main", "doc"] files = [ {file = "PyYAML-6.0.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0a9a2848a5b7feac301353437eb7d5957887edbf81d56e903999a75a3d743086"}, {file = "PyYAML-6.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:29717114e51c84ddfba879543fb232a6ed60086602313ca38cce623c1d62cfbf"}, @@ -1561,6 +2432,7 @@ version = "0.1" description = "A custom YAML tag for referencing environment variables in YAML files. " optional = false python-versions = ">=3.6" +groups = ["doc"] files = [ {file = "pyyaml_env_tag-0.1-py3-none-any.whl", hash = "sha256:af31106dec8a4d68c60207c1886031cbf839b68aa7abccdb19868200532c2069"}, {file = "pyyaml_env_tag-0.1.tar.gz", hash = "sha256:70092675bda14fdec33b31ba77e7543de9ddc88f2e5b99160396572d11525bdb"}, @@ -1575,6 +2447,7 @@ version = "0.35.1" description = "JSON Referencing + Python" optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "referencing-0.35.1-py3-none-any.whl", hash = "sha256:eda6d3234d62814d1c64e305c1331c9a3a6132da475ab6382eaa997b21ee75de"}, {file = "referencing-0.35.1.tar.gz", hash = "sha256:25b42124a6c8b632a425174f24087783efb348a6f1e0008e63cd4466fedf703c"}, @@ -1590,6 +2463,7 @@ version = "2024.11.6" description = "Alternative regular expression module, to replace re." optional = false python-versions = ">=3.8" +groups = ["doc"] files = [ {file = "regex-2024.11.6-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:ff590880083d60acc0433f9c3f713c51f7ac6ebb9adf889c79a261ecf541aa91"}, {file = "regex-2024.11.6-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:658f90550f38270639e83ce492f27d2c8d2cd63805c65a13a14d36ca126753f0"}, @@ -1693,6 +2567,7 @@ version = "2.32.3" description = "Python HTTP for Humans." optional = false python-versions = ">=3.8" +groups = ["main", "doc"] files = [ {file = "requests-2.32.3-py3-none-any.whl", hash = "sha256:70761cfe03c773ceb22aa2f671b4757976145175cdfca038c02654d061d6dcc6"}, {file = "requests-2.32.3.tar.gz", hash = "sha256:55365417734eb18255590a9ff9eb97e9e1da868d4ccd6402399eaf68af20a760"}, @@ -1714,6 +2589,7 @@ version = "13.9.4" description = "Render rich text, tables, progress bars, syntax highlighting, markdown and more to the terminal" optional = false python-versions = ">=3.8.0" +groups = ["main"] files = [ {file = "rich-13.9.4-py3-none-any.whl", hash = "sha256:6049d5e6ec054bf2779ab3358186963bac2ea89175919d699e378b99738c2a90"}, {file = "rich-13.9.4.tar.gz", hash = "sha256:439594978a49a09530cff7ebc4b5c7103ef57baf48d5ea3184f21d9a2befa098"}, @@ -1733,6 +2609,7 @@ version = "0.21.0" description = "Python bindings to Rust's persistent data structures (rpds)" optional = false python-versions = ">=3.9" +groups = ["main"] files = [ {file = "rpds_py-0.21.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:a017f813f24b9df929674d0332a374d40d7f0162b326562daae8066b502d0590"}, {file = "rpds_py-0.21.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:20cc1ed0bcc86d8e1a7e968cce15be45178fd16e2ff656a243145e0b439bd250"}, @@ -1832,6 +2709,7 @@ version = "1.13.1" description = "Fundamental algorithms for scientific computing in Python" optional = false python-versions = ">=3.9" +groups = ["main"] files = [ {file = "scipy-1.13.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:20335853b85e9a49ff7572ab453794298bcf0354d8068c5f6775a0eabf350aca"}, {file = "scipy-1.13.1-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:d605e9c23906d1994f55ace80e0125c587f96c020037ea6aa98d01b4bd2e222f"}, @@ -1874,6 +2752,7 @@ version = "2.4.0" description = "SimpleITK is a simplified interface to the Insight Toolkit (ITK) for image registration and segmentation" optional = false python-versions = "*" +groups = ["main"] files = [ {file = "SimpleITK-2.4.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:8a0493cf49291c6fee067463f2c353690878666500d4799c1bd0facf83302b9a"}, {file = "SimpleITK-2.4.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:aedea771980e558940f0c5ef1ee180a822ebcdbf3b65faf609bfaf45c8b96fc1"}, @@ -1895,6 +2774,7 @@ files = [ {file = "SimpleITK-2.4.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4fc64ef6ba63832ff5dee4112bcc45367d6f2124cdad187f5daf3552bdf2a2d7"}, {file = "SimpleITK-2.4.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:888ee5e04c9e4e02e7d31f0555fdd88240b7a7a9e883cf40780c51d45aaf3950"}, {file = "SimpleITK-2.4.0-cp39-cp39-win_amd64.whl", hash = "sha256:f3ff657a58ce515c5742eedcd711ddeddb1673b8bac71be725b3182a936e29ff"}, + {file = "simpleitk-2.4.0.tar.gz", hash = "sha256:73e16e25291f8d107409aaad9e9a731840c273726516cd82b8f174a8552ea7c3"}, ] [[package]] @@ -1903,6 +2783,7 @@ version = "1.16.0" description = "Python 2 and 3 compatibility utilities" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*" +groups = ["main", "doc"] files = [ {file = "six-1.16.0-py2.py3-none-any.whl", hash = "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254"}, {file = "six-1.16.0.tar.gz", hash = "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926"}, @@ -1914,6 +2795,7 @@ version = "2.0.36" description = "Database Abstraction Library" optional = false python-versions = ">=3.7" +groups = ["main"] files = [ {file = "SQLAlchemy-2.0.36-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:59b8f3adb3971929a3e660337f5dacc5942c2cdb760afcabb2614ffbda9f9f72"}, {file = "SQLAlchemy-2.0.36-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:37350015056a553e442ff672c2d20e6f4b6d0b2495691fa239d8aa18bb3bc908"}, @@ -2003,12 +2885,65 @@ postgresql-psycopgbinary = ["psycopg[binary] (>=3.0.7)"] pymysql = ["pymysql"] sqlcipher = ["sqlcipher3_binary"] +[[package]] +name = "statsmodels" +version = "0.14.4" +description = "Statistical computations and models for Python" +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "statsmodels-0.14.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:7a62f1fc9086e4b7ee789a6f66b3c0fc82dd8de1edda1522d30901a0aa45e42b"}, + {file = "statsmodels-0.14.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:46ac7ddefac0c9b7b607eed1d47d11e26fe92a1bc1f4d9af48aeed4e21e87981"}, + {file = "statsmodels-0.14.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2a337b731aa365d09bb0eab6da81446c04fde6c31976b1d8e3d3a911f0f1e07b"}, + {file = "statsmodels-0.14.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:631bb52159117c5da42ba94bd94859276b68cab25dc4cac86475bc24671143bc"}, + {file = "statsmodels-0.14.4-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:3bb2e580d382545a65f298589809af29daeb15f9da2eb252af8f79693e618abc"}, + {file = "statsmodels-0.14.4-cp310-cp310-win_amd64.whl", hash = "sha256:9729642884147ee9db67b5a06a355890663d21f76ed608a56ac2ad98b94d201a"}, + {file = "statsmodels-0.14.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:5ed7e118e6e3e02d6723a079b8c97eaadeed943fa1f7f619f7148dfc7862670f"}, + {file = "statsmodels-0.14.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f5f537f7d000de4a1708c63400755152b862cd4926bb81a86568e347c19c364b"}, + {file = "statsmodels-0.14.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aa74aaa26eaa5012b0a01deeaa8a777595d0835d3d6c7175f2ac65435a7324d2"}, + {file = "statsmodels-0.14.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e332c2d9b806083d1797231280602340c5c913f90d4caa0213a6a54679ce9331"}, + {file = "statsmodels-0.14.4-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:d9c8fa28dfd75753d9cf62769ba1fecd7e73a0be187f35cc6f54076f98aa3f3f"}, + {file = "statsmodels-0.14.4-cp311-cp311-win_amd64.whl", hash = "sha256:a6087ecb0714f7c59eb24c22781491e6f1cfffb660b4740e167625ca4f052056"}, + {file = "statsmodels-0.14.4-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:5221dba7424cf4f2561b22e9081de85f5bb871228581124a0d1b572708545199"}, + {file = "statsmodels-0.14.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:17672b30c6b98afe2b095591e32d1d66d4372f2651428e433f16a3667f19eabb"}, + {file = "statsmodels-0.14.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ab5e6312213b8cfb9dca93dd46a0f4dccb856541f91d3306227c3d92f7659245"}, + {file = "statsmodels-0.14.4-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4bbb150620b53133d6cd1c5d14c28a4f85701e6c781d9b689b53681effaa655f"}, + {file = "statsmodels-0.14.4-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:bb695c2025d122a101c2aca66d2b78813c321b60d3a7c86bb8ec4467bb53b0f9"}, + {file = "statsmodels-0.14.4-cp312-cp312-win_amd64.whl", hash = "sha256:7f7917a51766b4e074da283c507a25048ad29a18e527207883d73535e0dc6184"}, + {file = "statsmodels-0.14.4-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:b5a24f5d2c22852d807d2b42daf3a61740820b28d8381daaf59dcb7055bf1a79"}, + {file = "statsmodels-0.14.4-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:df4f7864606fa843d7e7c0e6af288f034a2160dba14e6ccc09020a3cf67cb092"}, + {file = "statsmodels-0.14.4-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:91341cbde9e8bea5fb419a76e09114e221567d03f34ca26e6d67ae2c27d8fe3c"}, + {file = "statsmodels-0.14.4-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1322286a7bfdde2790bf72d29698a1b76c20b8423a55bdcd0d457969d0041f72"}, + {file = "statsmodels-0.14.4-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:e31b95ac603415887c9f0d344cb523889cf779bc52d68e27e2d23c358958fec7"}, + {file = "statsmodels-0.14.4-cp313-cp313-win_amd64.whl", hash = "sha256:81030108d27aecc7995cac05aa280cf8c6025f6a6119894eef648997936c2dd0"}, + {file = "statsmodels-0.14.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:4793b01b7a5f5424f5a1dbcefc614c83c7608aa2b035f087538253007c339d5d"}, + {file = "statsmodels-0.14.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:d330da34f59f1653c5193f9fe3a3a258977c880746db7f155fc33713ea858db5"}, + {file = "statsmodels-0.14.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6e9ddefba1d4e1107c1f20f601b0581421ea3ad9fd75ce3c2ba6a76b6dc4682c"}, + {file = "statsmodels-0.14.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6f43da7957e00190104c5dd0f661bfc6dfc68b87313e3f9c4dbd5e7d222e0aeb"}, + {file = "statsmodels-0.14.4-cp39-cp39-win_amd64.whl", hash = "sha256:8286f69a5e1d0e0b366ffed5691140c83d3efc75da6dbf34a3d06e88abfaaab6"}, + {file = "statsmodels-0.14.4.tar.gz", hash = "sha256:5d69e0f39060dc72c067f9bb6e8033b6dccdb0bae101d76a7ef0bcc94e898b67"}, +] + +[package.dependencies] +numpy = ">=1.22.3,<3" +packaging = ">=21.3" +pandas = ">=1.4,<2.1.0 || >2.1.0" +patsy = ">=0.5.6" +scipy = ">=1.8,<1.9.2 || >1.9.2" + +[package.extras] +build = ["cython (>=3.0.10)"] +develop = ["colorama", "cython (>=3.0.10)", "cython (>=3.0.10,<4)", "flake8", "isort", "joblib", "matplotlib (>=3)", "pytest (>=7.3.0,<8)", "pytest-cov", "pytest-randomly", "pytest-xdist", "pywinpty ; os_name == \"nt\"", "setuptools-scm[toml] (>=8.0,<9.0)"] +docs = ["ipykernel", "jupyter-client", "matplotlib", "nbconvert", "nbformat", "numpydoc", "pandas-datareader", "sphinx"] + [[package]] name = "taskipy" version = "1.14.1" description = "tasks runner for python projects" optional = false python-versions = "<4.0,>=3.6" +groups = ["dev"] files = [ {file = "taskipy-1.14.1-py3-none-any.whl", hash = "sha256:6e361520f29a0fd2159848e953599f9c75b1d0b047461e4965069caeb94908f1"}, {file = "taskipy-1.14.1.tar.gz", hash = "sha256:410fbcf89692dfd4b9f39c2b49e1750b0a7b81affd0e2d7ea8c35f9d6a4774ed"}, @@ -2026,21 +2961,46 @@ version = "2.1.0" description = "A lil' TOML parser" optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "tomli-2.1.0-py3-none-any.whl", hash = "sha256:a5c57c3d1c56f5ccdf89f6523458f60ef716e210fc47c4cfb188c5ba473e0391"}, {file = "tomli-2.1.0.tar.gz", hash = "sha256:3f646cae2aec94e17d04973e4249548320197cfabdf130015d023de4b74d8ab8"}, ] +[[package]] +name = "tqdm" +version = "4.67.1" +description = "Fast, Extensible Progress Meter" +optional = false +python-versions = ">=3.7" +groups = ["main"] +files = [ + {file = "tqdm-4.67.1-py3-none-any.whl", hash = "sha256:26445eca388f82e72884e0d580d5464cd801a3ea01e63e5601bdff9ba6a48de2"}, + {file = "tqdm-4.67.1.tar.gz", hash = "sha256:f8aef9c52c08c13a65f30ea34f4e5aac3fd1a34959879d7e59e63027286627f2"}, +] + +[package.dependencies] +colorama = {version = "*", markers = "platform_system == \"Windows\""} + +[package.extras] +dev = ["nbval", "pytest (>=6)", "pytest-asyncio (>=0.24)", "pytest-cov", "pytest-timeout"] +discord = ["requests"] +notebook = ["ipywidgets (>=6)"] +slack = ["slack-sdk"] +telegram = ["requests"] + [[package]] name = "typing-extensions" version = "4.12.2" description = "Backported and Experimental Type Hints for Python 3.8+" optional = false python-versions = ">=3.8" +groups = ["main", "dev", "doc"] files = [ {file = "typing_extensions-4.12.2-py3-none-any.whl", hash = "sha256:04e5ca0351e0f3f85c6853954072df659d0d13fac324d0072316b67d7794700d"}, {file = "typing_extensions-4.12.2.tar.gz", hash = "sha256:1a7ead55c7e559dd4dee8856e3a88b41225abfe1ce8df57b7c13915fe121ffb8"}, ] +markers = {dev = "python_version == \"3.9\"", doc = "python_version == \"3.9\""} [[package]] name = "tzdata" @@ -2048,6 +3008,7 @@ version = "2024.2" description = "Provider of IANA time zone data" optional = false python-versions = ">=2" +groups = ["main"] files = [ {file = "tzdata-2024.2-py2.py3-none-any.whl", hash = "sha256:a48093786cdcde33cad18c2555e8532f34422074448fbc874186f0abd79565cd"}, {file = "tzdata-2024.2.tar.gz", hash = "sha256:7d85cc416e9382e69095b7bdf4afd9e3880418a2413feec7069d533d6b4e31cc"}, @@ -2059,6 +3020,7 @@ version = "0.2.5" description = "pathlib api extended to use fsspec backends" optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "universal_pathlib-0.2.5-py3-none-any.whl", hash = "sha256:a634f700eca827b4ad03bfa0267e51161560dd1de83b051cf0fccf39b3e56b32"}, {file = "universal_pathlib-0.2.5.tar.gz", hash = "sha256:ea5d4fb8178c2ab469cf4fa46d0ceb16ccb378da46dbbc28a8b9c1eebdccc655"}, @@ -2068,7 +3030,7 @@ files = [ fsspec = ">=2022.1.0,<2024.3.1 || >2024.3.1" [package.extras] -dev = ["adlfs", "aiohttp", "cheroot", "gcsfs", "moto[s3,server]", "paramiko", "pydantic", "pydantic-settings", "requests", "s3fs", "smbprotocol", "webdav4[fsspec]", "wsgidav"] +dev = ["adlfs ; python_version <= \"3.12\" or os_name != \"nt\"", "aiohttp", "cheroot", "gcsfs", "moto[s3,server] ; python_version <= \"3.12\" or os_name != \"nt\"", "paramiko", "pydantic", "pydantic-settings", "requests", "s3fs", "smbprotocol", "webdav4[fsspec]", "wsgidav"] tests = ["mypy (>=1.10.0)", "packaging", "pylint (>=2.17.4)", "pytest (>=8)", "pytest-cov (>=4.1.0)", "pytest-mock (>=3.12.0)", "pytest-mypy-plugins (>=3.1.2)", "pytest-sugar (>=0.9.7)"] [[package]] @@ -2077,13 +3039,14 @@ version = "2.2.3" description = "HTTP library with thread-safe connection pooling, file post, and more." optional = false python-versions = ">=3.8" +groups = ["main", "doc"] files = [ {file = "urllib3-2.2.3-py3-none-any.whl", hash = "sha256:ca899ca043dcb1bafa3e262d73aa25c465bfb49e0bd9dd5d59f1d0acba2f8fac"}, {file = "urllib3-2.2.3.tar.gz", hash = "sha256:e7d814a81dad81e6caf2ec9fdedb284ecc9c73076b62654547cc64ccdcae26e9"}, ] [package.extras] -brotli = ["brotli (>=1.0.9)", "brotlicffi (>=0.8.0)"] +brotli = ["brotli (>=1.0.9) ; platform_python_implementation == \"CPython\"", "brotlicffi (>=0.8.0) ; platform_python_implementation != \"CPython\""] h2 = ["h2 (>=4,<5)"] socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"] zstd = ["zstandard (>=0.18.0)"] @@ -2094,6 +3057,7 @@ version = "6.0.0" description = "Filesystem events monitoring" optional = false python-versions = ">=3.9" +groups = ["doc"] files = [ {file = "watchdog-6.0.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:d1cdb490583ebd691c012b3d6dae011000fe42edb7a82ece80965b42abd61f26"}, {file = "watchdog-6.0.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:bc64ab3bdb6a04d69d4023b29422170b74681784ffb9463ed4870cf2f3e66112"}, @@ -2130,12 +3094,25 @@ files = [ [package.extras] watchmedo = ["PyYAML (>=3.10)"] +[[package]] +name = "webcolors" +version = "24.11.1" +description = "A library for working with the color formats defined by HTML and CSS." +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "webcolors-24.11.1-py3-none-any.whl", hash = "sha256:515291393b4cdf0eb19c155749a096f779f7d909f7cceea072791cb9095b92e9"}, + {file = "webcolors-24.11.1.tar.gz", hash = "sha256:ecb3d768f32202af770477b8b65f318fa4f566c22948673a977b00d589dd80f6"}, +] + [[package]] name = "wrapt" version = "1.17.0" description = "Module for decorators, wrappers and monkey patching." optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "wrapt-1.17.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:2a0c23b8319848426f305f9cb0c98a6e32ee68a36264f45948ccf8e7d2b941f8"}, {file = "wrapt-1.17.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b1ca5f060e205f72bec57faae5bd817a1560fcfc4af03f414b08fa29106b7e2d"}, @@ -2210,20 +3187,22 @@ version = "3.21.0" description = "Backport of pathlib-compatible object wrapper for zip files" optional = false python-versions = ">=3.9" +groups = ["main", "doc"] +markers = "python_version == \"3.9\"" files = [ {file = "zipp-3.21.0-py3-none-any.whl", hash = "sha256:ac1bbe05fd2991f160ebce24ffbac5f6d11d83dc90891255885223d42b3cd931"}, {file = "zipp-3.21.0.tar.gz", hash = "sha256:2c9958f6430a2040341a52eb608ed6dd93ef4392e02ffe219417c1b28b5dd1f4"}, ] [package.extras] -check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1)"] +check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1) ; sys_platform != \"cygwin\""] cover = ["pytest-cov"] doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] enabler = ["pytest-enabler (>=2.2)"] -test = ["big-O", "importlib-resources", "jaraco.functools", "jaraco.itertools", "jaraco.test", "more-itertools", "pytest (>=6,!=8.1.*)", "pytest-ignore-flaky"] +test = ["big-O", "importlib-resources ; python_version < \"3.9\"", "jaraco.functools", "jaraco.itertools", "jaraco.test", "more-itertools", "pytest (>=6,!=8.1.*)", "pytest-ignore-flaky"] type = ["pytest-mypy"] [metadata] -lock-version = "2.0" +lock-version = "2.1" python-versions = "^3.9" -content-hash = "cbddfa2ddb674c9f5a388395d25b568f2b6222d48d19d30dcadafd41bcdc7153" +content-hash = "22b5e96ebb73b18d56f35bc0bfe79ed019d6931ef0a2f40323ad9c822b3191d2" diff --git a/pyproject.toml b/pyproject.toml index acc680d..3d8d760 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -31,6 +31,8 @@ rich = "^13.8.1" scipy = "^1.13.1" dill = "^0.3.9" pybids = "^0.17.2" +antspyx = "^0.5.4" +kagglehub = "^0.3.12" [tool.poetry.group.dev.dependencies] @@ -39,6 +41,7 @@ pytest-cov = "^5.0.0" blue = "^0.9.1" isort = "^5.13.2" taskipy = "^1.13.0" +pytest-mock = "^3.14.1" [tool.poetry.group.doc.dependencies] diff --git a/tests/data/__init__.py b/tests/data/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/data/brain_atlas/__init__.py b/tests/data/brain_atlas/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/data/brain_atlas/test_brain_atlas.py b/tests/data/brain_atlas/test_brain_atlas.py new file mode 100644 index 0000000..57f83f4 --- /dev/null +++ b/tests/data/brain_atlas/test_brain_atlas.py @@ -0,0 +1,160 @@ +import pytest + +from asltk.data.brain_atlas import BrainAtlas + + +def test_set_atlas_raise_error_when_atlas_name_does_not_exist(): + """ + Test if setting an atlas raises an error when the atlas name does not exist. + """ + atlas = BrainAtlas() + with pytest.raises(ValueError) as e: + atlas.set_atlas('non_existent_atlas') + + assert 'not found in the database' in str(e.value) + + +def test_list_all_atlas(): + """ + Test if the BrainAtlas class can list all available atlases. + """ + atlas = BrainAtlas() + atlases = atlas.list_atlas() + assert isinstance(atlases, list), 'The list of atlases should be a list.' + assert len(atlases) > 0, 'There should be at least one atlas available.' + + +def test_get_atlas_url(): + """ + Test if the BrainAtlas class can retrieve the URL of a known atlas. + """ + atlas = BrainAtlas(atlas_name='MNI2009') + url = atlas.get_atlas_url('MNI2009') + assert isinstance(url, str) # The URL should be a string. + assert 'loamri' in url + + +def test_get_atlas_labels(): + """ + Test if the BrainAtlas class can retrieve labels for a known atlas. + """ + atlas = BrainAtlas(atlas_name='MNI2009') + labels = atlas.get_atlas_labels() + assert isinstance(labels, dict) # 'Labels should be a dictionary.' + assert ( + len(labels) > 0 + ) # 'There should be at least one label in the atlas.' + + +@pytest.mark.parametrize('known_atlas', ['AAL', 'HOCSA2006', 'AAT']) +def test_list_all_atlas_contains_known_atlas_parametrized(known_atlas): + """ + Test if known atlases are present in the list of atlases. + """ + atlas = BrainAtlas() + atlases = atlas.list_atlas() + assert any( + known_atlas.lower() in a.lower() for a in atlases + ), f"Known atlas '{known_atlas}' should be in the list." + + +def test_list_all_atlas_contains_known_atlas(): + """ + Test if a known atlas is present in the list of atlases. + """ + atlas = BrainAtlas() + atlases = atlas.list_atlas() + # Replace 'AAL' with a known atlas name if different + assert any( + 'aal' in a.lower() for a in atlases + ), "Known atlas 'AAL' should be in the list." + + +def test_list_all_atlas_unique_names(): + """ + Test that the list of atlases does not contain duplicates. + """ + atlas = BrainAtlas() + atlases = atlas.list_atlas() + assert len(atlases) == len(set(atlases)), 'Atlas names should be unique.' + + +def test_list_all_atlas_string_type(): + """ + Test that all atlas names are strings. + """ + atlas = BrainAtlas() + atlases = atlas.list_atlas() + assert all( + isinstance(a, str) for a in atlases + ), 'All atlas names should be strings.' + + +def test_get_atlas_url_raise_error_when_atlas_name_does_not_exist(): + atlas = BrainAtlas() + with pytest.raises(ValueError) as e: + atlas.get_atlas_url('non_existent_atlas') + + assert 'not found in the database' in str(e) + + +@pytest.mark.parametrize( + 'atlas_name', + [ + 'MNI2009', + 'AAL32024', + 'HOCSA2006', + 'AAT2022', + 'AICHA2021', + 'DKA2006', + 'FCA7N2011', + 'HA2003', + 'JHA2005', + 'LGPHCC2022', + ], +) +def test_brain_atlas_creation_with_various_names(atlas_name): + """ + Test creating BrainAtlas objects with different valid atlas names. + """ + atlas = BrainAtlas(atlas_name=atlas_name) + assert isinstance(atlas.get_atlas(), dict) + + +def test_atlas_download_failure(mocker): + """ + Test that appropriate error is raised when atlas download fails. + """ + atlas = BrainAtlas() + # Mock the kagglehub.dataset_download function to raise an exception + mock_download = mocker.patch( + 'kagglehub.dataset_download', side_effect=Exception('Connection error') + ) + + # Attempt to set an atlas that would trigger the download + with pytest.raises(ValueError) as e: + atlas.set_atlas('MNI2009') # This should try to download the atlas + + # Verify the error message contains the expected text + assert 'Error downloading the atlas' in str(e.value) + assert 'Connection error' in str(e.value) + + # Verify that the mocked function was called + mock_download.assert_called_once() + + +def test_atlas_url_raises_error_when_atlas_not_set(): + """ + Test that appropriate error is raised when trying to get atlas URL + without setting an atlas first. + """ + atlas = BrainAtlas() + atlas._chosen_atlas = None # Simulate that no atlas is set + # Don't set any atlas, which should cause an AttributeError in the implementation + # that's caught and converted to a ValueError + with pytest.raises(Exception) as e: + # Access the private method directly since we want to test the specific exception handling + atlas.get_atlas_url('MNI2009') + + # Verify the error message + assert 'is not set or does not have a dataset URL' in str(e.value) diff --git a/tests/data/reports/__init__.py b/tests/data/reports/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/data/reports/test_basic_report.py b/tests/data/reports/test_basic_report.py index e69de29..f5a05e7 100644 --- a/tests/data/reports/test_basic_report.py +++ b/tests/data/reports/test_basic_report.py @@ -0,0 +1,62 @@ +# import pytest + +# from asltk.data.reports import BasicReport + + +# def test_basic_report_create_object_success(): +# """ +# Test the BasicReport class. +# This test checks if the report can be generated and saved correctly. +# """ +# # Create an instance of BasicReport +# class TestClass(BasicReport): +# def __init__(self, title='Test Report'): +# super().__init__(title=title) + +# def generate_report(self): +# pass + +# def save_report(self, path): +# pass + +# report = TestClass() + +# assert isinstance(report, BasicReport) +# assert report.title == 'Test Report' +# assert report.report is None + + +# def test_basic_report_create_object_raise_error_when_report_not_generated_yet(): +# """ +# Test the BasicReport class. +# This test checks if the report can be generated and saved correctly. +# """ +# # Create an instance of BasicReport +# class TestClass(BasicReport): +# def __init__(self, title='Test Report'): +# super().__init__(title=title) + +# def generate_report(self): +# pass + +# def save_report(self, path): +# # Call the parent method to get the validation check +# super().save_report(path) + +# report = TestClass() +# with pytest.raises(Exception) as e: +# report.save_report('dummy_path') + +# assert 'Report has not been generated yet' in str(e.value) + + +# def test_basic_report_generate_report_abstract_method(): +# """ +# Test that the generate_report method raises NotImplementedError. +# This test checks if the abstract method is correctly defined and raises an error when called. +# """ + +# with pytest.raises(Exception) as e: +# report = BasicReport(title='Test Report') + +# assert isinstance(e.value, TypeError) diff --git a/tests/data/reports/test_parcellation_report.py b/tests/data/reports/test_parcellation_report.py new file mode 100644 index 0000000..6d66e2f --- /dev/null +++ b/tests/data/reports/test_parcellation_report.py @@ -0,0 +1,11 @@ +# from asltk.data.reports import ParcellationReport + +# # def test_parcellation_report_create_object_sucess(): +# # """ +# # Test the ParcellationReport class. +# # This test checks if the report can be generated and saved correctly. +# # """ +# # # Create an instance of ParcellationReport +# # report = ParcellationReport(atlas_name='MNI2009') + +# # assert isinstance(report, ParcellationReport) diff --git a/tests/reconstruction/test_cbf_mapping.py b/tests/reconstruction/test_cbf_mapping.py index 6e315f1..2400771 100644 --- a/tests/reconstruction/test_cbf_mapping.py +++ b/tests/reconstruction/test_cbf_mapping.py @@ -6,7 +6,7 @@ from asltk.asldata import ASLData from asltk.reconstruction import CBFMapping -from asltk.utils import load_image +from asltk.utils.io import load_image SEP = os.sep @@ -117,7 +117,6 @@ def test_set_brain_mask_gives_binary_image_using_correct_label_value(): assert np.min(cbf._brain_mask) == np.uint8(0) -# def test_ TODO Teste se mask tem mesma dimensao que 3D asl def test_set_brain_mask_raise_error_if_image_dimension_is_different_from_3d_volume(): cbf = CBFMapping(asldata_te) pcasl_3d_vol = load_image(PCASL_MTE)[0, 0, :, :, :] diff --git a/tests/reconstruction/test_multi_dw_mapping.py b/tests/reconstruction/test_multi_dw_mapping.py index 0c88add..f062ec1 100644 --- a/tests/reconstruction/test_multi_dw_mapping.py +++ b/tests/reconstruction/test_multi_dw_mapping.py @@ -7,7 +7,7 @@ from asltk.asldata import ASLData from asltk.reconstruction import MultiDW_ASLMapping -from asltk.utils import load_image +from asltk.utils.io import load_image SEP = os.sep diff --git a/tests/reconstruction/test_multi_te_mapping.py b/tests/reconstruction/test_multi_te_mapping.py index daad2e6..f619672 100644 --- a/tests/reconstruction/test_multi_te_mapping.py +++ b/tests/reconstruction/test_multi_te_mapping.py @@ -7,7 +7,7 @@ from asltk.asldata import ASLData from asltk.reconstruction import CBFMapping, MultiTE_ASLMapping -from asltk.utils import load_image +from asltk.utils.io import load_image SEP = os.sep diff --git a/tests/registration/__init__.py b/tests/registration/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/registration/test_asl_normalization.py b/tests/registration/test_asl_normalization.py new file mode 100644 index 0000000..c9fa0c4 --- /dev/null +++ b/tests/registration/test_asl_normalization.py @@ -0,0 +1,157 @@ +# import os + +# import numpy as np +# import pytest + +# from asltk.asldata import ASLData +# from asltk.registration.asl_normalization import ( +# asl_template_registration, +# head_movement_correction, +# ) + +# SEP = os.sep +# M0_ORIG = ( +# f'tests' + SEP + 'files' + SEP + 'registration' + SEP + 'm0_mean.nii.gz' +# ) +# M0_RIGID = ( +# f'tests' +# + SEP +# + 'files' +# + SEP +# + 'registration' +# + SEP +# + 'm0_mean-rigid-25degrees.nrrd' +# ) +# PCASL_MTE = f'tests' + SEP + 'files' + SEP + 'pcasl_mte.nii.gz' +# M0 = f'tests' + SEP + 'files' + SEP + 'm0.nii.gz' + + +# def test_head_movement_correction_build_asldata_success(): +# pcasl_orig = ASLData(pcasl=PCASL_MTE, m0=M0) + +# asldata, _ = head_movement_correction(pcasl_orig) + +# assert asldata('pcasl').shape == pcasl_orig('pcasl').shape + + +# def test_head_movement_correction_error_input_is_not_ASLData_object(): +# with pytest.raises(TypeError) as e: +# head_movement_correction('invalid_input') + +# assert str(e.value) == 'Input must be an ASLData object.' + + +# def test_head_movement_correction_error_ref_vol_is_not_int(): +# pcasl_orig = ASLData(pcasl=PCASL_MTE, m0=M0) + +# with pytest.raises(Exception) as e: +# head_movement_correction(pcasl_orig, ref_vol='invalid_ref_vol') + +# assert ( +# str(e.value) +# == 'ref_vol must be an positive integer based on the total asl data volumes.' +# ) + + +# def test_head_movement_correction_success(): +# pcasl_orig = ASLData(pcasl=PCASL_MTE, m0=M0) + +# pcasl_corrected, trans_mtxs = head_movement_correction( +# pcasl_orig, verbose=True +# ) + +# assert pcasl_corrected('pcasl').shape == pcasl_orig('pcasl').shape +# # assert ( +# # np.abs( +# # np.mean(np.subtract(pcasl_corrected('pcasl'), pcasl_orig('pcasl'))) +# # ) +# # > np.abs(np.mean(pcasl_orig('pcasl')) * 0.01) +# # ) +# assert any(not np.array_equal(mtx, np.eye(4)) for mtx in trans_mtxs) + + +# def test_head_movement_correction_returns_asl_data_corrected(): +# pcasl_orig = ASLData(pcasl=PCASL_MTE, m0=M0) + +# asl_data_corrected, _ = head_movement_correction(pcasl_orig) + +# assert isinstance(asl_data_corrected, ASLData) +# assert asl_data_corrected('pcasl').shape == pcasl_orig('pcasl').shape +# assert asl_data_corrected('pcasl').dtype == pcasl_orig('pcasl').dtype + + +# # TODO Arrumar o path do arquivo de template +# # def test_asl_template_registration_success(): +# # pcasl_orig = ASLData(pcasl=PCASL_MTE, m0=M0) +# # # pcasl_orig = ASLData( +# # # pcasl='/home/antonio/Imagens/loamri-samples/20240909/pcasl.nii.gz', +# # # m0='/home/antonio/Imagens/loamri-samples/20240909/m0.nii.gz', +# # # ) +# # # asl_data_mask = np.ones_like(pcasl_orig('m0'), dtype=bool) + +# # asl_data_registered, trans_mtxs = asl_template_registration( +# # pcasl_orig, +# # atlas_name='MNI2009', +# # verbose=True, +# # ) + +# # assert isinstance(asl_data_registered, ASLData) +# # assert asl_data_registered('pcasl').shape == pcasl_orig('pcasl').shape +# # assert isinstance(trans_mtxs, list) +# # assert len(trans_mtxs) == pcasl_orig('pcasl').shape[0] + + +# def test_asl_template_registration_invalid_input_type(): +# with pytest.raises(TypeError) as e: +# asl_template_registration('not_asldata') +# assert str(e.value) == 'Input must be an ASLData object.' + + +# # def test_asl_template_registration_invalid_ref_vol_type(): +# # pcasl_orig = ASLData(pcasl=PCASL_MTE, m0=M0) +# # with pytest.raises(ValueError) as e: +# # asl_template_registration(pcasl_orig, ref_vol='invalid') +# # assert str(e.value) == 'ref_vol must be a non-negative integer.' + + +# # def test_asl_template_registration_invalid_ref_vol_type_with_negative_volume(): +# # pcasl_orig = ASLData(pcasl=PCASL_MTE, m0=M0) +# # with pytest.raises(ValueError) as e: +# # asl_template_registration(pcasl_orig, ref_vol=-1) +# # assert str(e.value) == 'ref_vol must be a non-negative integer.' + + +# # def test_asl_template_registration_invalid_ref_vol_index(): +# # pcasl_orig = ASLData(pcasl=PCASL_MTE, m0=M0) +# # n_vols = 1000000 +# # with pytest.raises(ValueError) as e: +# # asl_template_registration(pcasl_orig, ref_vol=n_vols) +# # assert 'ref_vol must be a valid index' in str(e.value) + + +# # def test_asl_template_registration_create_another_asldata_object(): +# # pcasl_orig = ASLData(pcasl=PCASL_MTE, m0=M0) + +# # asl_data_registered, _ = asl_template_registration( +# # pcasl_orig, +# # ref_vol=0, +# # atlas_name='MNI2009', +# # verbose=True, +# # ) + +# # assert isinstance(asl_data_registered, ASLData) +# # assert asl_data_registered('pcasl').shape == pcasl_orig('pcasl').shape +# # assert asl_data_registered('m0').shape == pcasl_orig('m0').shape +# # assert asl_data_registered is not pcasl_orig + + +# # def test_asl_template_registration_returns_transforms(): +# # pcasl_orig = ASLData(pcasl=PCASL_MTE, m0=M0) +# # asl_data_mask = np.ones_like(pcasl_orig('pcasl')[0], dtype=bool) + +# # asl_data_registered, trans_mtxs = asl_template_registration( +# # pcasl_orig, ref_vol=0, asl_data_mask=asl_data_mask +# # ) + +# # assert isinstance(trans_mtxs, list) +# # assert all(isinstance(mtx, np.ndarray) for mtx in trans_mtxs) diff --git a/tests/registration/test_registration.py b/tests/registration/test_registration.py new file mode 100644 index 0000000..3bb4590 --- /dev/null +++ b/tests/registration/test_registration.py @@ -0,0 +1,365 @@ +import os + +import numpy as np +import pytest + +from asltk.asldata import ASLData +from asltk.data.brain_atlas import BrainAtlas +from asltk.registration import ( + affine_registration, + apply_transformation, + rigid_body_registration, + space_normalization, +) +from asltk.registration.asl_normalization import head_movement_correction +from asltk.utils.io import load_image + +SEP = os.sep +M0_ORIG = ( + f'tests' + SEP + 'files' + SEP + 'registration' + SEP + 'm0_mean.nii.gz' +) +M0_RIGID = ( + f'tests' + + SEP + + 'files' + + SEP + + 'registration' + + SEP + + 'm0_mean-rigid-25degrees.nrrd' +) +PCASL_MTE = f'tests' + SEP + 'files' + SEP + 'pcasl_mte.nii.gz' +M0 = f'tests' + SEP + 'files' + SEP + 'm0.nii.gz' + + +def test_head_movement_correction_build_asldata_success(): + pcasl_orig = ASLData(pcasl=PCASL_MTE, m0=M0) + + asldata, _ = head_movement_correction(pcasl_orig) + + assert asldata('pcasl').shape == pcasl_orig('pcasl').shape + + +def test_head_movement_correction_error_input_is_not_ASLData_object(): + with pytest.raises(TypeError) as e: + head_movement_correction('invalid_input') + + assert str(e.value) == 'Input must be an ASLData object.' + + +def test_head_movement_correction_error_ref_vol_is_not_int(): + pcasl_orig = ASLData(pcasl=PCASL_MTE, m0=M0) + + with pytest.raises(Exception) as e: + head_movement_correction(pcasl_orig, ref_vol='invalid_ref_vol') + + assert ( + str(e.value) + == 'ref_vol must be a valid volume from the total asl data volumes.' + ) + + +def test_head_movement_correction_success(): + pcasl_orig = ASLData(pcasl=PCASL_MTE, m0=M0) + + pcasl_corrected, trans_mtxs = head_movement_correction( + pcasl_orig, verbose=True + ) + + assert pcasl_corrected('pcasl').shape == pcasl_orig('pcasl').shape + assert ( + np.abs( + np.mean(np.subtract(pcasl_corrected('pcasl'), pcasl_orig('pcasl'))) + ) + != 0 + ) + assert any(not np.array_equal(mtx, np.eye(4)) for mtx in trans_mtxs) + + +def test_rigid_body_registration_run_sucess(): + img_orig = load_image(M0_ORIG) + img_rot = load_image(M0_RIGID) + + resampled_image, _ = rigid_body_registration(img_orig, img_rot) + + assert ( + np.mean(np.subtract(img_orig, resampled_image)) + < np.mean(img_orig) * 0.5 + ) + + +@pytest.mark.parametrize( + 'img_rot', [('invalid_image'), ([1, 2, 3]), (['a', 1, 5.23])] +) +def test_rigid_body_registration_error_fixed_image_is_not_numpy_array(img_rot): + img_orig = load_image(M0_ORIG) + + with pytest.raises(Exception) as e: + rigid_body_registration(img_orig, img_rot) + + assert ( + str(e.value) == 'fixed_image and moving_image must be a numpy array.' + ) + + +def test_rigid_body_registration_output_registration_matrix_success(): + img_orig = load_image(M0_ORIG) + img_rot = load_image(M0_RIGID) + + _, trans_matrix = rigid_body_registration(img_orig, img_rot) + + assert isinstance(trans_matrix[0], str) + + +def test_rigid_body_registration_raise_exception_if_moving_mask_not_numpy(): + img_orig = load_image(M0_ORIG) + img_rot = load_image(M0_RIGID) + + with pytest.raises(Exception) as e: + rigid_body_registration(img_orig, img_rot, moving_mask='invalid_mask') + + assert str(e.value) == 'moving_mask must be a numpy array.' + + +def test_rigid_body_registration_raise_exception_if_template_mask_not_numpy(): + img_orig = load_image(M0_ORIG) + img_rot = load_image(M0_RIGID) + + with pytest.raises(Exception) as e: + rigid_body_registration( + img_orig, img_rot, template_mask='invalid_mask' + ) + + assert str(e.value) == 'template_mask must be a numpy array.' + + +def test_space_normalization_success(): + pcasl_orig = ASLData(pcasl=PCASL_MTE, m0=M0) + + normalized_image, transform = space_normalization( + pcasl_orig('m0'), + template_image='MNI2009', + transform_type='Affine', + verbose=True, + ) + + assert isinstance(normalized_image, np.ndarray) + assert normalized_image.shape == (182, 218, 182) + assert len(transform) == 1 + + +def test_space_normalization_success_transform_type_Affine(): + pcasl_orig = ASLData(pcasl=PCASL_MTE, m0=M0) + + # Use the ASLData object directly + normalized_image, transform = space_normalization( + pcasl_orig('m0'), template_image='MNI2009', transform_type='Affine' + ) + + assert isinstance(normalized_image, np.ndarray) + assert normalized_image.shape == (182, 218, 182) + assert len(transform) == 1 + + +def test_space_normalization_raise_exception_if_fixed_image_not_numpy(): + with pytest.raises(Exception) as e: + space_normalization('invalid_image', template_image='MNI2009') + + assert ( + 'moving_image must be a numpy array and template_image must be a BrainAtlas object' + in str(e.value) + ) + + +def test_space_normalization_raise_exception_if_template_image_not_a_valid_BrainAtlas_option(): + img_orig = load_image(M0_ORIG) + + with pytest.raises(Exception) as e: + space_normalization(img_orig, template_image='invalid_image') + + assert 'Atlas invalid_image not found' in str(e.value) + + +def test_space_normalization_success_passing_template_image_as_BrainAtlas_option(): + img_orig = load_image(M0_ORIG) + + # Use the BrainAtlas object directly + normalized_image, transform = space_normalization( + img_orig, template_image='MNI2009' + ) + + assert isinstance(normalized_image, np.ndarray) + assert normalized_image.shape == (182, 218, 182) + assert len(transform) == 2 + + +def test_space_normalization_success_passing_template_image_as_BrainAtlas_object(): + img_orig = load_image(M0_ORIG) + atlas = BrainAtlas(atlas_name='MNI2009') + + # Use the BrainAtlas object directly + normalized_image, transform = space_normalization( + img_orig, template_image=atlas + ) + + assert isinstance(normalized_image, np.ndarray) + assert normalized_image.shape == (182, 218, 182) + assert len(transform) == 2 + + +def test_affine_registration_success(): + img_orig = load_image(M0_ORIG) + img_rot = load_image(M0_RIGID) + + resampled_image, _ = affine_registration(img_orig, img_rot) + + assert ( + np.mean(np.subtract(img_orig, resampled_image)) + < np.mean(img_orig) * 0.5 + ) + + +def test_affine_registration_raise_exception_if_fixed_image_not_numpy(): + img_rot = load_image(M0_RIGID) + + with pytest.raises(Exception) as e: + affine_registration('invalid_image', img_rot) + + assert ( + str(e.value) == 'fixed_image and moving_image must be a numpy array.' + ) + + +def test_affine_registration_raise_exception_if_moving_image_not_numpy(): + img_orig = load_image(M0_ORIG) + + with pytest.raises(Exception) as e: + affine_registration(img_orig, 'invalid_image') + + assert ( + str(e.value) == 'fixed_image and moving_image must be a numpy array.' + ) + + +def test_affine_registration_raise_exception_if_moving_mask_not_numpy(): + img_orig = load_image(M0_ORIG) + img_rot = load_image(M0_RIGID) + + with pytest.raises(Exception) as e: + affine_registration(img_orig, img_rot, moving_mask='invalid_mask') + + assert str(e.value) == 'moving_mask must be a numpy array.' + + +def test_affine_registration_raise_exception_if_template_mask_not_numpy(): + img_orig = load_image(M0_ORIG) + img_rot = load_image(M0_RIGID) + + with pytest.raises(Exception) as e: + affine_registration(img_orig, img_rot, template_mask='invalid_mask') + + assert str(e.value) == 'template_mask must be a numpy array.' + + +def test_affine_registration_fast_method(): + img_orig = load_image(M0_ORIG) + img_rot = load_image(M0_RIGID) + + resampled_image, _ = affine_registration( + img_orig, img_rot, fast_method=True + ) + + assert isinstance(resampled_image, np.ndarray) + assert resampled_image.shape == img_rot.shape + assert np.mean(np.abs(img_orig - resampled_image)) < 0.5 * np.mean( + img_orig + ) + + +def test_affine_registration_slow_method(): + img_orig = load_image(M0_ORIG) + img_rot = load_image(M0_RIGID) + + resampled_image, _ = affine_registration( + img_orig, img_rot, fast_method=False + ) + + assert isinstance(resampled_image, np.ndarray) + assert resampled_image.shape == img_rot.shape + assert np.mean(np.abs(img_orig - resampled_image)) < 0.5 * np.mean( + img_orig + ) + + +def test_apply_transformation_success(): + img_orig = load_image(M0_ORIG) + img_rot = load_image(M0_RIGID) + # Get transformation matrix from rigid registration + _, trans_matrix = rigid_body_registration(img_orig, img_rot) + # Apply transformation + transformed_img = apply_transformation(img_rot, img_orig, trans_matrix) + assert isinstance(transformed_img, np.ndarray) + assert transformed_img.shape == img_rot.shape + assert np.mean(np.abs(transformed_img - img_rot)) < 0.5 * np.mean(img_rot) + + +def test_apply_transformation_invalid_fixed_image(): + img_rot = load_image(M0_RIGID) + _, trans_matrix = rigid_body_registration(img_rot, img_rot) + with pytest.raises(Exception) as e: + apply_transformation('invalid_image', img_rot, trans_matrix) + assert 'moving image must be numpy array' in str(e.value) + + +def test_apply_transformation_invalid_moving_image(): + img_orig = load_image(M0_ORIG) + _, trans_matrix = rigid_body_registration(img_orig, img_orig) + with pytest.raises(Exception) as e: + apply_transformation(img_orig, 'invalid_image', trans_matrix) + assert 'reference_image must be a numpy array' in str(e.value) + + +def test_apply_transformation_invalid_transformation_matrix(): + img_orig = load_image(M0_ORIG) + img_rot = load_image(M0_RIGID) + with pytest.raises(Exception) as e: + apply_transformation(img_orig, img_rot, 'invalid_matrix') + assert 'transforms must be a list of transformation matrices' in str( + e.value + ) + + +def test_apply_transformation_with_mask(): + img_orig = load_image(M0_ORIG) + img_rot = load_image(M0_RIGID) + mask = np.ones_like(img_orig, dtype=bool) + _, trans_matrix = rigid_body_registration(img_orig, img_rot) + transformed_img = apply_transformation( + img_orig, img_rot, trans_matrix, mask=mask + ) + assert isinstance(transformed_img, np.ndarray) + assert transformed_img.shape == img_rot.shape + + +def test_apply_transformation_with_BrainAtlas_reference_input_error(): + img_rot = load_image(M0_RIGID) + img_orig = load_image(M0_ORIG) + _, trans_matrix = rigid_body_registration(img_orig, img_rot) + with pytest.raises(Exception) as e: + apply_transformation(img_rot, 'invalid atlas', trans_matrix) + + assert ( + 'reference_image must be a numpy array or a BrainAtlas object' + in str(e.value) + ) + + +def test_apply_transformation_with_BrainAtlas_reference_input_sucess(): + img_rot = load_image(M0_RIGID) + img_orig = load_image(M0_ORIG) + _, trans_matrix = rigid_body_registration(img_orig, img_rot) + atlas = BrainAtlas(atlas_name='MNI2009') + atlas_img = load_image(atlas.get_atlas()['t1_data']) + corr_img = apply_transformation(img_rot, atlas, trans_matrix) + + assert isinstance(corr_img, np.ndarray) + assert corr_img.shape == atlas_img.shape diff --git a/tests/test_asldata.py b/tests/test_asldata.py index fd274d4..266319f 100644 --- a/tests/test_asldata.py +++ b/tests/test_asldata.py @@ -2,9 +2,9 @@ import numpy as np import pytest -import SimpleITK as sitk from asltk import asldata +from asltk.utils.io import load_image, save_image SEP = os.sep T1_MRI = f'tests' + SEP + 'files' + SEP + 't1-mri.nrrd' @@ -18,6 +18,19 @@ def test_create_successfuly_asldata_object(): assert isinstance(obj, asldata.ASLData) +def test_asldata_object_shows_warning_if_m0_has_more_than_3D_dimensions( + tmp_path, +): + tmp_file = tmp_path / 'temp_m0_4D.nii.gz' + # Create a 4D M0 image + m0_4d = np.stack([load_image(M0), load_image(M0), load_image(M0)], axis=0) + save_image(m0_4d, str(tmp_file)) + with pytest.warns(Warning) as record: + obj = asldata.ASLData(m0=str(tmp_file)) + assert len(record) == 1 + assert 'M0 image has more than 3 dimensions.' in str(record[0].message) + + def test_create_successfuly_asldata_object_with_inputs(): obj_0 = asldata.ASLData(m0=M0) assert isinstance(obj_0, asldata.ASLData) @@ -71,20 +84,33 @@ def test_create_object_with_different_image_formats(): assert isinstance(obj, asldata.ASLData) -def test_load_image_with_different_file_formats(): - pass - - -def test_load_image_asl_data_correct_array_shape(): - pass - - def test_create_object_check_initial_parameters(): obj = asldata.ASLData() assert obj.get_ld() == [] assert obj.get_pld() == [] +def test_create_object_with_m0_as_numpy_array(): + array = load_image(M0) + obj = asldata.ASLData(m0=array) + + assert obj('m0').shape == array.shape + + +def test_create_object_with_m0_as_numpy_array(): + array = load_image(M0) + obj = asldata.ASLData(m0=array) + + assert obj('m0').shape == array.shape + + +def test_create_object_with_pcasl_as_numpy_array(): + array = load_image(PCASL_MTE) + obj = asldata.ASLData(pcasl=array) + + assert obj('pcasl').shape == array.shape + + def test_get_ld_show_empty_list_for_new_object(): obj = asldata.ASLData() assert obj.get_ld() == [] @@ -269,3 +295,53 @@ def test_set_image_sucess_pcasl(): obj = asldata.ASLData() obj.set_image(M0, 'pcasl') assert isinstance(obj('pcasl'), np.ndarray) + + +@pytest.mark.parametrize( + 'input', + [ + ('not_a_valid_image'), + (123), + (None), + ({'key': 'value'}), + (['not', 'a', 'valid', 'image']), + ], +) +def test_set_image_raises_error_if_input_is_not_a_valid_image(input): + obj = asldata.ASLData() + with pytest.raises(Exception) as e: + obj.set_image(input, 'pcasl') + + assert 'Invalid image type or path' in e.value.args[0] + assert e.type == ValueError + + +def test_asldata_copy_creates_deepcopy(): + obj = asldata.ASLData( + pcasl=PCASL_MTE, + ld_values=[1, 2, 3], + pld_values=[1, 2, 3], + te_values=[10, 20, 30], + dw_values=[100, 200, 300], + ) + obj_copy = obj.copy() + assert isinstance(obj_copy, asldata.ASLData) + assert obj is not obj_copy + assert obj.get_ld() == obj_copy.get_ld() + assert obj.get_pld() == obj_copy.get_pld() + assert obj.get_te() == obj_copy.get_te() + assert obj.get_dw() == obj_copy.get_dw() + # Mutate original, copy should not change + obj.set_ld([9, 8, 7]) + assert obj.get_ld() != obj_copy.get_ld() + + +def test_asldata_len_returns_zero_for_no_image(): + obj = asldata.ASLData() + assert len(obj) == 0 + + +def test_asldata_len_returns_total_volumes(): + asl = asldata.ASLData(pcasl=PCASL_MTE, m0=M0) + + assert len(asl) == 56 diff --git a/tests/test_orientation_checking.py b/tests/test_orientation_checking.py index e69de29..1645e64 100644 --- a/tests/test_orientation_checking.py +++ b/tests/test_orientation_checking.py @@ -0,0 +1,258 @@ +# """ +# Tests for orientation checking and correction functionality. +# """ + +# import numpy as np +# import pytest + +# from asltk.registration import ( +# _compute_normalized_correlation, +# _normalize_image_intensity, +# check_and_fix_orientation, +# orientation_check, +# ) + + +# class TestOrientationChecking: +# """Test cases for orientation checking functionality.""" + +# def setup_method(self): +# """Set up test data.""" +# # Create a simple 3D image with identifiable features +# self.test_image = np.zeros((20, 30, 40)) + +# # Add some features to make orientation detection meaningful +# self.test_image[5:15, 10:20, 15:25] = 100 # Central bright region +# self.test_image[2:4, 5:25, 10:30] = 50 # Top bright strip +# self.test_image[16:18, 5:25, 10:30] = 50 # Bottom bright strip + +# # Add some noise +# noise = np.random.rand(*self.test_image.shape) * 10 +# self.test_image += noise + +# def test_identical_images(self): +# """Test that identical images have high correlation.""" +# # Test with identical images +# corrected, transform = check_and_fix_orientation( +# self.test_image, self.test_image, verbose=False +# ) + +# # Should not apply any transformations +# assert not transform['flip_x'] +# assert not transform['flip_y'] +# assert not transform['flip_z'] +# assert transform['transpose_axes'] is None + +# # Images should be nearly identical +# np.testing.assert_array_almost_equal(corrected, self.test_image) + +# def test_axial_flip_detection(self): +# """Test detection and correction of axial flip.""" +# # Create axially flipped version +# flipped_image = np.flip(self.test_image, axis=0) + +# # Check and fix orientation +# corrected, transform = check_and_fix_orientation( +# flipped_image, self.test_image, verbose=False +# ) + +# # Should detect Z-axis flip +# assert transform['flip_z'] == True + +# # Corrected image should be closer to original +# original_corr = _compute_normalized_correlation( +# flipped_image, self.test_image +# ) +# corrected_corr = _compute_normalized_correlation( +# corrected, self.test_image +# ) +# assert corrected_corr > original_corr + +# def test_sagittal_flip_detection(self): +# """Test detection and correction of sagittal flip.""" +# # Create sagittally flipped version +# flipped_image = np.flip(self.test_image, axis=2) + +# # Check and fix orientation +# corrected, transform = check_and_fix_orientation( +# flipped_image, self.test_image, verbose=False +# ) + +# # Should detect X-axis flip +# assert transform['flip_x'] == True + +# # Corrected image should be closer to original +# original_corr = _compute_normalized_correlation( +# flipped_image, self.test_image +# ) +# corrected_corr = _compute_normalized_correlation( +# corrected, self.test_image +# ) +# assert corrected_corr > original_corr + +# def test_coronal_flip_detection(self): +# """Test detection and correction of coronal flip.""" +# # Create coronally flipped version +# flipped_image = np.flip(self.test_image, axis=1) + +# # Check and fix orientation +# corrected, transform = check_and_fix_orientation( +# flipped_image, self.test_image, verbose=False +# ) + +# # Should detect Y-axis flip +# assert transform['flip_y'] == True + +# # Corrected image should be closer to original +# original_corr = _compute_normalized_correlation( +# flipped_image, self.test_image +# ) +# corrected_corr = _compute_normalized_correlation( +# corrected, self.test_image +# ) +# assert corrected_corr > original_corr + +# def test_orientation_check(self): +# """Test quick orientation compatibility check.""" +# # Test with identical images +# result = orientation_check(self.test_image, self.test_image) +# assert result['compatible'] == True +# assert result['correlation'] > 0.9 + +# # Test with flipped image +# flipped_image = np.flip(self.test_image, axis=0) +# result = orientation_check(flipped_image, self.test_image) +# # May or may not be compatible depending on the threshold and symmetry +# assert 'compatible' in result +# assert 'correlation' in result +# assert 'recommendation' in result + +# def test_normalize_image_intensity(self): +# """Test image intensity normalization.""" +# # Test with positive values +# test_array = np.array( +# [[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]] +# ) +# normalized = _normalize_image_intensity(test_array) + +# assert np.min(normalized) == 0.0 +# assert np.max(normalized) == 1.0 +# assert normalized.shape == test_array.shape + +# # Test with constant values +# constant_array = np.ones((5, 5, 5)) * 42 +# normalized_constant = _normalize_image_intensity(constant_array) +# # Should handle constant values gracefully +# assert normalized_constant.shape == constant_array.shape + +# def test_compute_normalized_correlation(self): +# """Test normalized correlation computation.""" +# # Test with identical arrays +# corr = _compute_normalized_correlation( +# self.test_image, self.test_image +# ) +# assert corr == 1.0 + +# # Test with completely different arrays +# random_image = np.random.rand(*self.test_image.shape) * 1000 +# corr = _compute_normalized_correlation(self.test_image, random_image) +# assert 0 <= corr <= 1 + +# # Test with different shapes (should return -1) +# different_shape = np.random.rand(10, 10, 10) +# corr = _compute_normalized_correlation( +# self.test_image, different_shape +# ) +# assert corr == -1 + +# def test_multiple_transformations(self): +# """Test detection of multiple orientation issues.""" +# # Apply both flip and transpose +# transformed_image = np.flip(self.test_image, axis=0) # Z flip +# transformed_image = np.flip(transformed_image, axis=1) # Y flip + +# # Check and fix orientation +# corrected, transform = check_and_fix_orientation( +# transformed_image, self.test_image, verbose=False +# ) + +# # Should detect multiple flips +# flip_count = sum( +# [transform['flip_x'], transform['flip_y'], transform['flip_z']] +# ) +# assert flip_count >= 1 # At least one flip should be detected + +# # Corrected image should be closer to original +# original_corr = _compute_normalized_correlation( +# transformed_image, self.test_image +# ) +# corrected_corr = _compute_normalized_correlation( +# corrected, self.test_image +# ) +# assert corrected_corr >= original_corr + +# def test_edge_cases(self): +# """Test edge cases and error handling.""" +# # Test with very small images +# small_image = np.random.rand(2, 2, 2) +# small_fixed = np.random.rand(2, 2, 2) + +# # Should not crash +# corrected, transform = check_and_fix_orientation( +# small_image, small_fixed, verbose=False +# ) +# assert corrected.shape == small_image.shape + +# # Test with zero images +# zero_image = np.zeros((10, 10, 10)) +# zero_fixed = np.zeros((10, 10, 10)) + +# # Should handle gracefully +# corrected, transform = check_and_fix_orientation( +# zero_image, zero_fixed, verbose=False +# ) +# assert corrected.shape == zero_image.shape + + +# if __name__ == '__main__': +# # Run tests manually if pytest is not available +# test_case = TestOrientationChecking() +# test_case.setup_method() + +# print('Running orientation checking tests...') + +# try: +# test_case.test_identical_images() +# print('✓ Identical images test passed') + +# test_case.test_axial_flip_detection() +# print('✓ Axial flip detection test passed') + +# test_case.test_sagittal_flip_detection() +# print('✓ Sagittal flip detection test passed') + +# test_case.test_coronal_flip_detection() +# print('✓ Coronal flip detection test passed') + +# test_case.test_orientation_check() +# print('✓ Quick orientation check test passed') + +# test_case.test_normalize_image_intensity() +# print('✓ Image normalization test passed') + +# test_case.test_compute_normalized_correlation() +# print('✓ Correlation computation test passed') + +# test_case.test_multiple_transformations() +# print('✓ Multiple transformations test passed') + +# test_case.test_edge_cases() +# print('✓ Edge cases test passed') + +# print('\nAll tests passed! ✓') + +# except Exception as e: +# print(f'\nTest failed: {e}') +# import traceback + +# traceback.print_exc() diff --git a/tests/test_registration.py b/tests/test_registration.py deleted file mode 100644 index bb753cb..0000000 --- a/tests/test_registration.py +++ /dev/null @@ -1,112 +0,0 @@ -import os - -import numpy as np -import pytest - -from asltk.asldata import ASLData -from asltk.registration import head_movement_correction -from asltk.registration.rigid import rigid_body_registration -from asltk.utils import load_image - -SEP = os.sep -M0_ORIG = ( - f'tests' + SEP + 'files' + SEP + 'registration' + SEP + 'm0_mean.nii.gz' -) -M0_RIGID = ( - f'tests' - + SEP - + 'files' - + SEP - + 'registration' - + SEP - + 'm0_mean-rigid-25degrees.nrrd' -) -PCASL_MTE = f'tests' + SEP + 'files' + SEP + 'pcasl_mte.nii.gz' -M0 = f'tests' + SEP + 'files' + SEP + 'm0.nii.gz' - - -def test_rigid_body_registration_run_sucess(): - img_orig = load_image(M0_ORIG) - img_rot = load_image(M0_RIGID) - - resampled_image, _ = rigid_body_registration(img_orig, img_rot) - - assert resampled_image.shape == img_orig.shape - - -@pytest.mark.parametrize( - 'img_orig', [('invalid_image'), ([1, 2, 3]), (['a', 1, 5.23])] -) -def test_rigid_body_registration_error_fixed_image_is_not_numpy_array( - img_orig, -): - img_rot = load_image(M0_RIGID) - - with pytest.raises(Exception) as e: - rigid_body_registration(img_orig, img_rot) - - assert ( - str(e.value) == 'fixed_image and moving_image must be a numpy array.' - ) - - -@pytest.mark.parametrize( - 'img_rot', [('invalid_image'), ([1, 2, 3]), (['a', 1, 5.23])] -) -def test_rigid_body_registration_error_fixed_image_is_not_numpy_array(img_rot): - img_orig = load_image(M0_ORIG) - - with pytest.raises(Exception) as e: - rigid_body_registration(img_orig, img_rot) - - assert ( - str(e.value) == 'fixed_image and moving_image must be a numpy array.' - ) - - -def test_rigid_body_registration_output_registration_matrix_success(): - img_orig = load_image(M0_ORIG) - img_rot = load_image(M0_RIGID) - - _, trans_matrix = rigid_body_registration(img_orig, img_rot) - - assert isinstance(trans_matrix, np.ndarray) - assert trans_matrix.shape == (4, 4) - - -def test_head_movement_correction_build_asldata_success(): - pcasl_orig = ASLData(pcasl=PCASL_MTE, m0=M0) - - asldata, _ = head_movement_correction(pcasl_orig) - - assert asldata.shape == pcasl_orig('pcasl').shape - - -def test_head_movement_correction_error_input_is_not_ASLData_object(): - with pytest.raises(TypeError) as e: - head_movement_correction('invalid_input') - - assert str(e.value) == 'Input must be an ASLData object.' - - -def test_head_movement_correction_error_ref_vol_is_not_int(): - pcasl_orig = ASLData(pcasl=PCASL_MTE, m0=M0) - - with pytest.raises(Exception) as e: - head_movement_correction(pcasl_orig, ref_vol='invalid_ref_vol') - - assert ( - str(e.value) - == 'ref_vol must be an positive integer based on the total asl data volumes.' - ) - - -def test_head_movement_correction_success(): - pcasl_orig = ASLData(pcasl=PCASL_MTE, m0=M0) - - pcasl_corrected, trans_mtxs = head_movement_correction( - pcasl_orig, verbose=True - ) - - assert pcasl_corrected.shape == pcasl_orig('pcasl').shape - assert any(not np.array_equal(mtx, np.eye(4)) for mtx in trans_mtxs) diff --git a/tests/test_smooth.py b/tests/test_smooth.py index 2957aa9..eaadccd 100644 --- a/tests/test_smooth.py +++ b/tests/test_smooth.py @@ -5,7 +5,7 @@ from asltk.smooth.gaussian import isotropic_gaussian from asltk.smooth.median import isotropic_median -from asltk.utils import load_image +from asltk.utils.io import load_image SEP = os.sep PCASL_MTE = f'tests' + SEP + 'files' + SEP + 'pcasl_mte.nii.gz' diff --git a/tests/utils/__init__.py b/tests/utils/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/utils/test_image_manipulation.py b/tests/utils/test_image_manipulation.py new file mode 100644 index 0000000..03197b2 --- /dev/null +++ b/tests/utils/test_image_manipulation.py @@ -0,0 +1,167 @@ +import os +import tempfile + +import numpy as np +import pytest +import SimpleITK as sitk + +from asltk import asldata +from asltk.models import signal_dynamic +from asltk.utils.image_manipulation import ( + collect_data_volumes, + select_reference_volume, +) +from asltk.utils.io import load_image + +SEP = os.sep +T1_MRI = f'tests' + SEP + 'files' + SEP + 't1-mri.nrrd' +PCASL_MTE = f'tests' + SEP + 'files' + SEP + 'pcasl_mte.nii.gz' +M0 = f'tests' + SEP + 'files' + SEP + 'm0.nii.gz' +M0_BRAIN_MASK = f'tests' + SEP + 'files' + SEP + 'm0_brain_mask.nii.gz' + + +def test_asl_model_buxton_return_sucess_list_of_values(): + buxton_values = signal_dynamic.asl_model_buxton( + tau=[1, 2, 3], w=[10, 20, 30], m0=1000, cbf=450, att=1500 + ) + assert len(buxton_values.tolist()) == 3 + assert type(buxton_values) == np.ndarray + + +@pytest.mark.parametrize( + 'input', [(['a', 'b', 'c']), (['a', 'b', 2]), ([100.1, 200.0, 'text'])] +) +def test_asl_model_buxton_tau_raise_errors_with_wrong_inputs(input): + with pytest.raises(Exception) as e: + buxton_values = signal_dynamic.asl_model_buxton( + tau=input, w=[10, 20, 30], m0=1000, cbf=450, att=1500 + ) + assert e.value.args[0] == 'tau list must contain float or int values' + + +@pytest.mark.parametrize('input', [('a'), (2), (100.1)]) +def test_asl_model_buxton_tau_raise_errors_with_wrong_inputs_type(input): + with pytest.raises(Exception) as e: + buxton_values = signal_dynamic.asl_model_buxton( + tau=input, w=[10, 20, 30], m0=1000, cbf=450, att=1500 + ) + assert ( + e.value.args[0] == 'tau parameter must be a list or tuple of values.' + ) + + +@pytest.mark.parametrize('input', [(['a']), (['2']), (['100.1'])]) +def test_asl_model_buxton_tau_raise_errors_with_wrong_inputs_values(input): + with pytest.raises(Exception) as e: + buxton_values = signal_dynamic.asl_model_buxton( + tau=input, w=[10, 20, 30], m0=1000, cbf=450, att=1500 + ) + assert e.value.args[0] == 'tau list must contain float or int values' + + +@pytest.mark.parametrize( + 'input', [(['a', 'b', 'c']), (['a', 'b', 2]), ([100.1, 200.0, np.ndarray])] +) +def test_asl_model_buxton_w_raise_errors_with_wrong_inputs(input): + with pytest.raises(Exception) as e: + buxton_values = signal_dynamic.asl_model_buxton( + tau=[10, 20, 30], w=input, m0=1000, cbf=450, att=1500 + ) + assert e.value.args[0] == 'w list must contain float or int values' + + +@pytest.mark.parametrize('input', [('a'), (1), (100.1), (np.ndarray)]) +def test_asl_model_buxton_w_raise_errors_with_wrong_inputs_not_list(input): + with pytest.raises(Exception) as e: + buxton_values = signal_dynamic.asl_model_buxton( + tau=[10, 20, 30], w=input, m0=1000, cbf=450, att=1500 + ) + assert e.value.args[0] == 'w parameter must be a list or tuple of values.' + + +def test_asl_model_buxton_runs_with_inner_if_clauses(): + buxton_values = signal_dynamic.asl_model_buxton( + tau=[170.0, 270.0, 370.0, 520.0, 670.0, 1070.0, 1870.0], + w=[100.0, 100.0, 150.0, 150.0, 400.0, 800.0, 1800.0], + m0=3761480.0, + cbf=0.00001, + att=1500, + ) + assert len(buxton_values.tolist()) == 7 + assert type(buxton_values) == np.ndarray + + +def test_asl_model_multi_te_return_sucess_list_of_values(): + multite_values = signal_dynamic.asl_model_multi_te( + tau=[170.0, 270.0, 370.0, 520.0, 670.0, 1070.0, 1870.0], + w=[100.0, 100.0, 150.0, 150.0, 400.0, 800.0, 1800.0], + te=[13.56, 67.82, 122.08, 176.33, 230.59, 284.84, 339.100, 393.36], + m0=3761480.0, + cbf=0.00001, + att=1500, + ) + assert len(multite_values) == 7 + assert type(multite_values) == np.ndarray + + +def test_collect_data_volumes_return_correct_list_of_volumes_4D_data(): + data = np.ones((2, 30, 40, 15)) + data[0, :, :, :] = data[0, :, :, :] * 10 + data[1, :, :, :] = data[1, :, :, :] * 20 + collected_volumes, _ = collect_data_volumes(data) + assert len(collected_volumes) == 2 + assert collected_volumes[0].shape == (30, 40, 15) + assert np.mean(collected_volumes[0]) == 10 + assert np.mean(collected_volumes[1]) == 20 + + +def test_collect_data_volumes_return_correct_list_of_volumes_5D_data(): + data = np.ones((2, 2, 30, 40, 15)) + data[0, 0, :, :, :] = data[0, 0, :, :, :] * 10 + data[0, 1, :, :, :] = data[0, 1, :, :, :] * 10 + data[1, 0, :, :, :] = data[1, 0, :, :, :] * 20 + data[1, 1, :, :, :] = data[1, 1, :, :, :] * 20 + collected_volumes, _ = collect_data_volumes(data) + assert len(collected_volumes) == 4 + assert collected_volumes[0].shape == (30, 40, 15) + assert np.mean(collected_volumes[0]) == 10 + assert np.mean(collected_volumes[1]) == 10 + assert np.mean(collected_volumes[2]) == 20 + assert np.mean(collected_volumes[3]) == 20 + + +def test_collect_data_volumes_error_if_input_is_not_numpy_array(): + data = [1, 2, 3] + with pytest.raises(Exception) as e: + collected_volumes, _ = collect_data_volumes(data) + assert 'data is not a numpy array' in e.value.args[0] + + +def test_collect_data_volumes_error_if_input_is_less_than_3D(): + data = np.ones((30, 40)) + with pytest.raises(Exception) as e: + collected_volumes, _ = collect_data_volumes(data) + assert 'data is a 3D volume or higher dimensions' in e.value.args[0] + + +@pytest.mark.parametrize('method', ['snr', 'mean']) +def test_select_reference_volume_returns_correct_volume_and_index_with_sample_images( + method, +): + asl = asldata.ASLData(pcasl=PCASL_MTE, m0=M0) + + ref_volume, idx = select_reference_volume(asl, method=method) + + assert ref_volume.shape == asl('pcasl')[0][0].shape + assert idx != 0 + + +@pytest.mark.parametrize( + 'method', [('invalid_method'), (123), (['mean']), ({'method': 'snr'})] +) +def test_select_reference_volume_raise_error_invalid_method(method): + asl = asldata.ASLData(pcasl=PCASL_MTE, m0=M0) + + with pytest.raises(Exception) as e: + select_reference_volume(asl, method=method) + assert 'Invalid method' in e.value.args[0] diff --git a/tests/utils/test_image_statistics.py b/tests/utils/test_image_statistics.py new file mode 100644 index 0000000..2d8f913 --- /dev/null +++ b/tests/utils/test_image_statistics.py @@ -0,0 +1,183 @@ +import os + +import numpy as np +import pytest + +from asltk.utils.image_statistics import ( + analyze_image_properties, + calculate_mean_intensity, + calculate_snr, +) +from asltk.utils.io import load_image + +SEP = os.sep +T1_MRI = f'tests{SEP}files{SEP}t1-mri.nrrd' +PCASL_MTE = f'tests{SEP}files{SEP}pcasl_mte.nii.gz' +M0 = f'tests{SEP}files{SEP}m0.nii.gz' +M0_BRAIN_MASK = f'tests{SEP}files{SEP}m0_brain_mask.nii.gz' + + +@pytest.mark.parametrize('image_path', [T1_MRI, PCASL_MTE, M0]) +def test_analyze_image_properties_returns_dict(image_path): + """Test that analyze_image_properties returns a dictionary with expected keys.""" + img = load_image(image_path) + props = analyze_image_properties(img) + assert isinstance(props, dict) + assert 'shape' in props + assert 'intensity_stats' in props + assert 'center_of_mass' in props + assert 'min' in props['intensity_stats'] + assert 'max' in props['intensity_stats'] + assert 'mean' in props['intensity_stats'] + assert 'std' in props['intensity_stats'] + + +@pytest.mark.parametrize( + 'input', + ['invalid/path/to/image.nii', 1, -2.4, (1, 2), {'wrong': 1, 'input': 2}], +) +def test_analyze_image_properties_invalid_path(input): + """Test that an invalid path raises an exception.""" + with pytest.raises(Exception) as error: + analyze_image_properties(input) + + assert len(str(error.value)) > 0 + + +@pytest.mark.parametrize('image_path', [T1_MRI, PCASL_MTE, M0]) +def test_calculate_snr_returns_float(image_path): + """Test that calculate_snr returns a float for valid images.""" + img = load_image(image_path) + snr = calculate_snr(img) + assert isinstance(snr, float) + assert snr >= 0 + + +@pytest.mark.parametrize('image_path', [T1_MRI, PCASL_MTE, M0]) +def test_calculate_snr_returns_float_using_valid_roi(image_path): + """Test that calculate_snr returns a float for valid images.""" + img = load_image(image_path) + roi = np.ones(img.shape, dtype=bool) # Create a valid ROI + snr = calculate_snr(img, roi=roi) + assert isinstance(snr, float) + assert snr >= 0 + + +def test_calculate_snr_make_zero_division_with_same_image_input(): + """Test that calculate_snr handles zero division with same image input.""" + img = np.ones((10, 10, 10)) # Create a simple image + roi = np.ones(img.shape, dtype=bool) # Create a valid ROI + snr = calculate_snr(img, roi=roi) + + assert isinstance(snr, float) + assert snr == float('inf') # SNR should be infinite for uniform image + + +@pytest.mark.parametrize( + 'input', [np.zeros((10, 10)), np.ones((5, 5, 5)), np.full((3, 3), 7)] +) +def test_calculate_snr_known_arrays(input): + """Test calculate_snr with known arrays.""" + snr = calculate_snr(input) + assert isinstance(snr, float) + + +def test_calculate_snr_invalid_input(): + """Test that calculate_snr raises an error for invalid input.""" + with pytest.raises(Exception) as error: + calculate_snr('invalid_input') + + assert len(str(error.value)) > 0 + + +@pytest.mark.parametrize('image_path', [T1_MRI, PCASL_MTE, M0]) +def test_calculate_snr_raise_error_roi_different_shape(image_path): + """Test that calculate_snr raises an error for ROI of different shape.""" + img = load_image(image_path) + # Add an extra dimension to img and create a mismatched ROI + img = np.expand_dims(img, axis=0) + roi = np.ones( + img.shape[1:], dtype=bool + ) # ROI shape does not match img shape + with pytest.raises(ValueError) as error: + calculate_snr(img, roi=roi) + + assert 'ROI shape must match image shape' in str(error.value) + + +@pytest.mark.parametrize('image_path', [T1_MRI, PCASL_MTE, M0]) +def test_calculate_snr_raise_error_roi_not_numpy_array(image_path): + """Test that calculate_snr raises an error for ROI not being a numpy array.""" + img = load_image(image_path) + roi = 'invalid_roi' + with pytest.raises(ValueError) as error: + calculate_snr(img, roi=roi) + + assert 'ROI must be a numpy array' in str(error.value) + + +@pytest.mark.parametrize('image_path', [T1_MRI, PCASL_MTE, M0]) +def test_calculate_mean_intensity_returns_float(image_path): + """Test that calculate_mean_intensity returns a float for valid images.""" + img = load_image(image_path) + mean_intensity = calculate_mean_intensity(img) + assert isinstance(mean_intensity, float) + assert mean_intensity >= 0 + + +@pytest.mark.parametrize('image_path', [T1_MRI, PCASL_MTE, M0]) +def test_calculate_mean_intensity_with_valid_roi(image_path): + """Test that calculate_mean_intensity returns a float for valid ROI.""" + img = load_image(image_path) + roi = np.ones(img.shape, dtype=bool) + mean_intensity = calculate_mean_intensity(img, roi=roi) + assert isinstance(mean_intensity, float) + assert mean_intensity >= 0 + + +def test_calculate_mean_intensity_known_arrays(): + """Test calculate_mean_intensity with known arrays.""" + arr = np.ones((5, 5, 5)) + mean_intensity = calculate_mean_intensity(arr) + assert mean_intensity == 1.0 + + arr = np.full((3, 3), 7) + mean_intensity = calculate_mean_intensity(arr) + assert mean_intensity == 7.0 + + arr = np.array([[1, 2], [3, 4]]) + mean_intensity = calculate_mean_intensity(arr) + assert mean_intensity == 2.5 + + +def test_calculate_mean_intensity_with_roi_mask(): + """Test calculate_mean_intensity with ROI mask.""" + arr = np.array([[1, 2], [3, 4]]) + roi = np.array([[0, 1], [1, 0]]) + mean_intensity = calculate_mean_intensity(arr, roi=roi) + assert mean_intensity == 2.5 # mean of [2, 3] + + +def test_calculate_mean_intensity_invalid_input(): + """Test that calculate_mean_intensity raises an error for invalid input.""" + with pytest.raises(ValueError) as error: + calculate_mean_intensity('invalid_input') + assert 'Input must be a numpy array' in str(error.value) + + +def test_calculate_mean_intensity_roi_not_numpy_array(): + """Test that calculate_mean_intensity raises an error for ROI not being a numpy array.""" + arr = np.ones((5, 5)) + roi = 'invalid_roi' + with pytest.raises(ValueError) as error: + calculate_mean_intensity(arr, roi=roi) + assert 'ROI must be a numpy array' in str(error.value) + + +def test_calculate_mean_intensity_roi_shape_mismatch(): + """Test that calculate_mean_intensity raises an error for ROI shape mismatch.""" + arr = np.ones((5, 5)) + roi = np.ones((4, 4), dtype=bool) + with pytest.raises(ValueError) as error: + calculate_mean_intensity(arr, roi=roi) + assert 'ROI shape must match image shape' in str(error.value) diff --git a/tests/test_utils.py b/tests/utils/test_io.py similarity index 77% rename from tests/test_utils.py rename to tests/utils/test_io.py index 27f134d..5fbf706 100644 --- a/tests/test_utils.py +++ b/tests/utils/test_io.py @@ -5,8 +5,9 @@ import pytest import SimpleITK as sitk -from asltk import asldata, utils +from asltk import asldata from asltk.models import signal_dynamic +from asltk.utils.io import load_asl_data, load_image, save_asl_data, save_image SEP = os.sep T1_MRI = f'tests' + SEP + 'files' + SEP + 't1-mri.nrrd' @@ -16,15 +17,25 @@ def test_load_image_pcasl_type_update_object_image_reference(): - img = utils.load_image(PCASL_MTE) + img = load_image(PCASL_MTE) assert isinstance(img, np.ndarray) def test_load_image_m0_type_update_object_image_reference(): - img = utils.load_image(M0) + img = load_image(M0) assert isinstance(img, np.ndarray) +def test_load_image_m0_with_average_m0_option(tmp_path): + multi_M0 = np.stack([load_image(M0), load_image(M0)], axis=0) + tmp_file = tmp_path / 'temp_m0.nii.gz' + save_image(multi_M0, str(tmp_file)) + img = load_image(str(tmp_file), average_m0=True) + + assert isinstance(img, np.ndarray) + assert len(img.shape) == 3 + + @pytest.mark.parametrize( 'input', [ @@ -35,7 +46,7 @@ def test_load_image_m0_type_update_object_image_reference(): ) def test_load_image_attest_fullpath_is_valid(input): with pytest.raises(Exception) as e: - utils.load_image(input) + load_image(input) assert 'does not exist.' in e.value.args[0] @@ -43,9 +54,9 @@ def test_load_image_attest_fullpath_is_valid(input): 'input', [('out.nrrd'), ('out.nii'), ('out.mha'), ('out.tif')] ) def test_save_image_success(input, tmp_path): - img = utils.load_image(T1_MRI) + img = load_image(T1_MRI) full_path = tmp_path.as_posix() + os.sep + input - utils.save_image(img, full_path) + save_image(img, full_path) assert os.path.exists(full_path) read_file = sitk.ReadImage(full_path) assert read_file.GetSize() == sitk.ReadImage(T1_MRI).GetSize() @@ -55,10 +66,10 @@ def test_save_image_success(input, tmp_path): 'input', [('out.nrr'), ('out.n'), ('out.m'), ('out.zip')] ) def test_save_image_throw_error_invalid_formatt(input, tmp_path): - img = utils.load_image(T1_MRI) + img = load_image(T1_MRI) full_path = tmp_path.as_posix() + os.sep + input with pytest.raises(Exception) as e: - utils.save_image(img, full_path) + save_image(img, full_path) def test_asl_model_buxton_return_sucess_list_of_values(): @@ -154,7 +165,7 @@ def test_asl_model_multi_te_return_sucess_list_of_values(): def test_save_asl_data_data_sucess(input_data, filename, tmp_path): obj = asldata.ASLData(pcasl=input_data) out_file = tmp_path.as_posix() + os.sep + filename - utils.save_asl_data(obj, out_file) + save_asl_data(obj, out_file) assert os.path.exists(out_file) @@ -173,7 +184,7 @@ def test_save_asl_data_raise_error_filename_not_pkl( obj = asldata.ASLData(pcasl=PCASL_MTE) out_file = tmp_path.as_posix() + os.sep + filename with pytest.raises(Exception) as e: - utils.save_asl_data(obj, out_file) + save_asl_data(obj, out_file) assert e.value.args[0] == 'Filename must be a pickle file (.pkl)' @@ -186,8 +197,8 @@ def test_save_asl_data_raise_error_filename_not_pkl( def test_load_asl_data_sucess(input_data, filename, tmp_path): obj = asldata.ASLData(pcasl=input_data) out_file = tmp_path.as_posix() + os.sep + filename - utils.save_asl_data(obj, out_file) - loaded_obj = utils.load_asl_data(out_file) + save_asl_data(obj, out_file) + loaded_obj = load_asl_data(out_file) assert isinstance(loaded_obj, asldata.ASLData) assert loaded_obj('pcasl').shape == obj('pcasl').shape @@ -202,7 +213,7 @@ def test_load_asl_data_sucess(input_data, filename, tmp_path): ], ) def test_load_image_using_BIDS_input_sucess(input_bids, sub, sess, mod, suff): - loaded_obj = utils.load_image( + loaded_obj = load_image( full_path=input_bids, subject=sub, session=sess, @@ -218,7 +229,7 @@ def test_load_image_using_BIDS_input_sucess(input_bids, sub, sess, mod, suff): ) def test_load_image_using_not_valid_BIDS_input_raise_error(input_data): with pytest.raises(Exception) as e: - loaded_obj = utils.load_image(input_data) + loaded_obj = load_image(input_data) assert 'is missing' in e.value.args[0] @@ -234,7 +245,7 @@ def test_load_image_raise_FileNotFoundError_not_matching_image_file( input_bids, sub, sess, mod, suff ): with pytest.raises(Exception) as e: - loaded_obj = utils.load_image( + loaded_obj = load_image( full_path=input_bids, subject=sub, session=sess, @@ -244,46 +255,6 @@ def test_load_image_raise_FileNotFoundError_not_matching_image_file( assert 'ASL image file is missing' in e.value.args[0] -def test_collect_data_volumes_return_correct_list_of_volumes_4D_data(): - data = np.ones((2, 30, 40, 15)) - data[0, :, :, :] = data[0, :, :, :] * 10 - data[1, :, :, :] = data[1, :, :, :] * 20 - collected_volumes, _ = utils.collect_data_volumes(data) - assert len(collected_volumes) == 2 - assert collected_volumes[0].shape == (30, 40, 15) - assert np.mean(collected_volumes[0]) == 10 - assert np.mean(collected_volumes[1]) == 20 - - -def test_collect_data_volumes_return_correct_list_of_volumes_5D_data(): - data = np.ones((2, 2, 30, 40, 15)) - data[0, 0, :, :, :] = data[0, 0, :, :, :] * 10 - data[0, 1, :, :, :] = data[0, 1, :, :, :] * 10 - data[1, 0, :, :, :] = data[1, 0, :, :, :] * 20 - data[1, 1, :, :, :] = data[1, 1, :, :, :] * 20 - collected_volumes, _ = utils.collect_data_volumes(data) - assert len(collected_volumes) == 4 - assert collected_volumes[0].shape == (30, 40, 15) - assert np.mean(collected_volumes[0]) == 10 - assert np.mean(collected_volumes[1]) == 10 - assert np.mean(collected_volumes[2]) == 20 - assert np.mean(collected_volumes[3]) == 20 - - -def test_collect_data_volumes_error_if_input_is_not_numpy_array(): - data = [1, 2, 3] - with pytest.raises(Exception) as e: - collected_volumes, _ = utils.collect_data_volumes(data) - assert 'data is not a numpy array' in e.value.args[0] - - -def test_collect_data_volumes_error_if_input_is_less_than_3D(): - data = np.ones((30, 40)) - with pytest.raises(Exception) as e: - collected_volumes, _ = utils.collect_data_volumes(data) - assert 'data is a 3D volume or higher dimensions' in e.value.args[0] - - def test_load_image_from_bids_structure_returns_valid_array(): bids_root = 'tests/files/bids-example/asl001' subject = 'Sub103' @@ -291,7 +262,7 @@ def test_load_image_from_bids_structure_returns_valid_array(): modality = 'asl' suffix = None # m0 is deleted, because it does not exist - img = utils.load_image( + img = load_image( full_path=bids_root, subject=subject, session=session,