diff --git a/asltk/data/brain_atlas/__init__.py b/asltk/data/brain_atlas/__init__.py index 8bf3bb1..cc4d9cf 100644 --- a/asltk/data/brain_atlas/__init__.py +++ b/asltk/data/brain_atlas/__init__.py @@ -7,8 +7,6 @@ import kagglehub -# TODO Fix the t1_data loading because the brain atlases will have the 1mm and 2mm options -# TODO Ajust each kagglehub dataset to have the 2mm resolution option class BrainAtlas: ATLAS_JSON_PATH = os.path.join(os.path.dirname(__file__)) diff --git a/asltk/reconstruction/multi_dw_mapping.py b/asltk/reconstruction/multi_dw_mapping.py index 8a30170..6cb6258 100644 --- a/asltk/reconstruction/multi_dw_mapping.py +++ b/asltk/reconstruction/multi_dw_mapping.py @@ -326,7 +326,7 @@ def create_map( y_axis = self._asl_data('m0').get_as_numpy().shape[1] # width z_axis = self._asl_data('m0').get_as_numpy().shape[0] # depth - # TODO Fix + # TODO Fix the reconstruction method when ASL-DWI acquisition works properly print('multiDW-ASL processing...') for i in range(x_axis): for j in range(y_axis): diff --git a/asltk/reconstruction/multi_te_mapping.py b/asltk/reconstruction/multi_te_mapping.py index 58c4191..696157d 100644 --- a/asltk/reconstruction/multi_te_mapping.py +++ b/asltk/reconstruction/multi_te_mapping.py @@ -1,3 +1,4 @@ +import warnings from multiprocessing import Array, Pool, cpu_count import numpy as np @@ -190,6 +191,7 @@ def create_map( cores=cpu_count(), smoothing=None, smoothing_params=None, + suppress_warnings=True, ): """Create multi-TE ASL maps including T1 blood-gray matter exchange (T1blGM). @@ -236,6 +238,8 @@ def create_map( smoothing_params (dict, optional): Parameters for the smoothing filter. For 'gaussian': {'sigma': float} (default: 1.0) For 'median': {'size': int} (default: 3) + suppress_warnings (bool, optional): Whether to suppress warnings during + processing. Defaults to True. Returns: dict: Dictionary containing: @@ -299,98 +303,116 @@ def create_map( set_att_map(): Provide pre-computed ATT map CBFMapping: For basic CBF/ATT mapping """ - self._basic_maps.set_brain_mask(ImageIO(image_array=self._brain_mask)) + # Use context manager to suppress warnings if requested + with warnings.catch_warnings(): + if suppress_warnings: + # Filter common warnings that might appear during fitting and processing + warnings.filterwarnings('ignore', category=RuntimeWarning) + warnings.filterwarnings('ignore', category=UserWarning) + warnings.filterwarnings( + 'ignore', category=np.VisibleDeprecationWarning + ) - basic_maps = {'cbf': self._cbf_map, 'att': self._att_map} - if np.mean(self._cbf_map) == 0 or np.mean(self._att_map) == 0: - # If the CBF/ATT maps are zero (empty), then a new one is created - print( - '[blue][INFO] The CBF/ATT map were not provided. Creating these maps before next step...' + self._basic_maps.set_brain_mask( + ImageIO(image_array=self._brain_mask) ) - basic_maps = self._basic_maps.create_map() - self._cbf_map = basic_maps['cbf'].get_as_numpy() - self._att_map = basic_maps['att'].get_as_numpy() - - global asl_data, brain_mask, cbf_map, att_map, t2bl, t2gm - asl_data = self._asl_data - brain_mask = self._brain_mask - cbf_map = self._cbf_map - att_map = self._att_map - ld_arr = self._asl_data.get_ld() - pld_arr = self._asl_data.get_pld() - te_arr = self._asl_data.get_te() - t2bl = self.T2bl - t2gm = self.T2gm - - x_axis = self._asl_data('m0').get_as_numpy().shape[2] # height - y_axis = self._asl_data('m0').get_as_numpy().shape[1] # width - z_axis = self._asl_data('m0').get_as_numpy().shape[0] # depth - - tblgm_map_shared = Array('d', z_axis * y_axis * x_axis, lock=False) - - with Pool( - processes=cores, - initializer=_multite_init_globals, - initargs=( - cbf_map, - att_map, - brain_mask, - asl_data, - ld_arr, - pld_arr, - te_arr, - tblgm_map_shared, - t2bl, - t2gm, - ), - ) as pool: - with Progress() as progress: - task = progress.add_task( - 'multiTE-ASL processing...', total=x_axis + + basic_maps = {'cbf': self._cbf_map, 'att': self._att_map} + if np.mean(self._cbf_map) == 0 or np.mean(self._att_map) == 0: + # If the CBF/ATT maps are zero (empty), then a new one is created + print( + '[blue][INFO] The CBF/ATT map were not provided. Creating these maps before next step...' ) - results = [ - pool.apply_async( - _tblgm_multite_process_slice, - args=(i, x_axis, y_axis, z_axis, par0, lb, ub), - callback=lambda _: progress.update(task, advance=1), + basic_maps = self._basic_maps.create_map() + self._cbf_map = basic_maps['cbf'].get_as_numpy() + self._att_map = basic_maps['att'].get_as_numpy() + + global asl_data, brain_mask, cbf_map, att_map, t2bl, t2gm + asl_data = self._asl_data + brain_mask = self._brain_mask + cbf_map = self._cbf_map + att_map = self._att_map + ld_arr = self._asl_data.get_ld() + pld_arr = self._asl_data.get_pld() + te_arr = self._asl_data.get_te() + t2bl = self.T2bl + t2gm = self.T2gm + + x_axis = self._asl_data('m0').get_as_numpy().shape[2] # height + y_axis = self._asl_data('m0').get_as_numpy().shape[1] # width + z_axis = self._asl_data('m0').get_as_numpy().shape[0] # depth + + tblgm_map_shared = Array('d', z_axis * y_axis * x_axis, lock=False) + + with Pool( + processes=cores, + initializer=_multite_init_globals, + initargs=( + cbf_map, + att_map, + brain_mask, + asl_data, + ld_arr, + pld_arr, + te_arr, + tblgm_map_shared, + t2bl, + t2gm, + ), + ) as pool: + with Progress() as progress: + task = progress.add_task( + 'multiTE-ASL processing...', total=x_axis ) - for i in range(x_axis) - ] - for result in results: - result.wait() + results = [ + pool.apply_async( + _tblgm_multite_process_slice, + args=(i, x_axis, y_axis, z_axis, par0, lb, ub), + callback=lambda _: progress.update( + task, advance=1 + ), + ) + for i in range(x_axis) + ] + for result in results: + result.wait() - self._t1blgm_map = np.frombuffer(tblgm_map_shared).reshape( - z_axis, y_axis, x_axis - ) + self._t1blgm_map = np.frombuffer(tblgm_map_shared).reshape( + z_axis, y_axis, x_axis + ) - # Adjusting output image boundaries - self._t1blgm_map = self._adjust_image_limits(self._t1blgm_map, par0[0]) + # Adjusting output image boundaries + self._t1blgm_map = self._adjust_image_limits( + self._t1blgm_map, par0[0] + ) - # Prepare output maps - cbf_map_image = ImageIO(self._asl_data('m0').get_image_path()) - cbf_map_image.update_image_data(self._cbf_map) + # Prepare output maps + cbf_map_image = ImageIO(self._asl_data('m0').get_image_path()) + cbf_map_image.update_image_data(self._cbf_map) - cbf_map_norm_image = ImageIO(self._asl_data('m0').get_image_path()) - cbf_map_norm_image.update_image_data(self._cbf_map * (60 * 60 * 1000)) + cbf_map_norm_image = ImageIO(self._asl_data('m0').get_image_path()) + cbf_map_norm_image.update_image_data( + self._cbf_map * (60 * 60 * 1000) + ) - att_map_image = ImageIO(self._asl_data('m0').get_image_path()) - att_map_image.update_image_data(self._att_map) + att_map_image = ImageIO(self._asl_data('m0').get_image_path()) + att_map_image.update_image_data(self._att_map) - t1blgm_map_image = ImageIO(self._asl_data('m0').get_image_path()) - t1blgm_map_image.update_image_data(self._t1blgm_map) + t1blgm_map_image = ImageIO(self._asl_data('m0').get_image_path()) + t1blgm_map_image.update_image_data(self._t1blgm_map) - # Create output maps dictionary - output_maps = { - 'cbf': cbf_map_image, - 'cbf_norm': cbf_map_norm_image, - 'att': att_map_image, - 't1blgm': t1blgm_map_image, - } + # Create output maps dictionary + output_maps = { + 'cbf': cbf_map_image, + 'cbf_norm': cbf_map_norm_image, + 'att': att_map_image, + 't1blgm': t1blgm_map_image, + } - # Apply smoothing if requested - return _apply_smoothing_to_maps( - output_maps, smoothing, smoothing_params - ) + # Apply smoothing if requested + return _apply_smoothing_to_maps( + output_maps, smoothing, smoothing_params + ) def _adjust_image_limits(self, map, init_guess): img = sitk.GetImageFromArray(map) diff --git a/asltk/reconstruction/t2_mapping.py b/asltk/reconstruction/t2_mapping.py index 7a9e2f6..03f96c5 100644 --- a/asltk/reconstruction/t2_mapping.py +++ b/asltk/reconstruction/t2_mapping.py @@ -184,7 +184,7 @@ def create_map( ) # Prepare output maps - # TODO At the moment, the T2 maps and mean T2 maps are as ImageIO object, however, the Spacing, Dimension are not given as a 4D array. The m0 image is 3D... check if this is a problem for the T2 image properties + # TODO At the moment, the T2 maps and mean T2 maps are as ImageIO object, however, the Spacing, Dimension are not given as a 4D array. Ceck if can be imported from the m0 image is 3D. t2_maps_image = ImageIO( image_array=np.array( [ diff --git a/asltk/registration/asl_normalization.py b/asltk/registration/asl_normalization.py index 0ee27c3..054b3ba 100644 --- a/asltk/registration/asl_normalization.py +++ b/asltk/registration/asl_normalization.py @@ -206,9 +206,9 @@ def head_movement_correction( If True, prints progress messages. Defaults to False. Raises: - TypeError: _description_ - ValueError: _description_ - RuntimeError: _description_ + TypeError: If the input is not an ASLData object. + ValueError: If no valid reference volume is provided. + RuntimeError: If the normalization fails. Returns: tuple: ASLData object with corrected volumes and a list of transformation matrices. @@ -286,10 +286,6 @@ def __apply_array_normalization( vol, ref_vol ) - # Adjust the transformation matrix - # if len(trans_m) > 1: - # # Non-linear transformation is being applied - trans_path = trans_m[-1] t_matrix = ants.read_transform(trans_path) if trans_proportions is None: @@ -318,10 +314,6 @@ def __apply_array_normalization( trans_mtx.append(trans_m) progress.update(task, advance=1) - # Rebuild the original ASLData object with the corrected volumes - # orig_shape = orig_shape[1:4] - # corrected_vols = np.stack(corrected_vols).reshape(orig_shape) - if isinstance(trans_mtx[0], list): # If the transformation list has a inner list, then take the first one trans_mtx = trans_mtx[0]