def _generate_report(self): """ Generates the visual report """ from niworkflows.viz.utils import compose_view, plot_registration NIWORKFLOWS_LOG.info('Generating visual report') anat = load_img(self._anat_file) contour_nii = load_img( self._contour) if self._contour is not None else None if self._mask_file: anat = unmask(apply_mask(anat, self._mask_file), self._mask_file) mask_nii = load_img(self._mask_file) else: mask_nii = threshold_img(anat, 1e-3) n_cuts = 7 if not self._mask_file and contour_nii: cuts = cuts_from_bbox(contour_nii, cuts=n_cuts) else: cuts = cuts_from_bbox(mask_nii, cuts=n_cuts) # Call composer compose_view(plot_registration(anat, 'fixed-image', estimate_brightness=True, cuts=cuts, contour=contour_nii, compress=self.inputs.compress_report), [], out_file=self._out_report)
def _run_interface(self, runtime): """ delegates to base interface run method, then attempts to generate reports """ if not self._mock_run: try: runtime = super(ReportCapableInterface, self)._run_interface(runtime) except NotImplementedError: pass # the interface is derived from BaseInterface else: runtime.returncode = 0 # leave early if there's nothing to do if not self.inputs.generate_report: return runtime self._out_report = os.path.abspath(self.inputs.out_report) self._post_run_hook(runtime) # check exit code and act consequently NIWORKFLOWS_LOG.debug('Running report generation code') if hasattr(runtime, 'returncode') and runtime.returncode not in [0, None]: self._generate_error_report(errno=runtime.get('returncode', None)) else: self._generate_report() NIWORKFLOWS_LOG.info('Successfully created report (%s)', self._out_report) return runtime
def _generate_report(self): """Generate a reportlet.""" NIWORKFLOWS_LOG.info('Generating visual report') refnii = load_img(self.inputs.reference) fmapnii = load_img(self.inputs.fieldmap) contour_nii = load_img(self.inputs.mask) if isdefined( self.inputs.mask) else None mask_nii = threshold_img(refnii, 1e-3) cuts = cuts_from_bbox(contour_nii or mask_nii, cuts=self._n_cuts) fmapdata = fmapnii.get_fdata() vmax = max(fmapdata.max(), abs(fmapdata.min())) # Call composer compose_view(plot_registration(refnii, 'fixed-image', estimate_brightness=True, cuts=cuts, label='reference', contour=contour_nii, compress=False), plot_registration(fmapnii, 'moving-image', estimate_brightness=True, cuts=cuts, label='fieldmap (Hz)', contour=contour_nii, compress=False, plot_params={ 'cmap': coolwarm_transparent(), 'vmax': vmax, 'vmin': -vmax }), out_file=self._out_report)
def _post_run_hook(self, runtime): self._fixed_image = self.inputs.reference self._moving_image = self.aggregate_outputs().out_file self._contour = self.inputs.wm_seg if isdefined( self.inputs.wm_seg) else None NIWORKFLOWS_LOG.info( 'Report - setting fixed (%s) and moving (%s) images', self._fixed_image, self._moving_image)
def _post_run_hook(self, runtime): ''' generates a report showing nine slices, three per axis, of an arbitrary volume of `in_files`, with the resulting segmentation overlaid ''' outputs = self.aggregate_outputs() self._melodic_dir = outputs.out_dir NIWORKFLOWS_LOG.info('Generating report for MELODIC')
def _post_run_hook(self, runtime): mri_dir = os.path.join(self.inputs.subjects_dir, self.inputs.subject_id, 'mri') self._fixed_image = os.path.join(mri_dir, 'brainmask.mgz') self._moving_image = self.aggregate_outputs().registered_file self._contour = os.path.join(mri_dir, 'ribbon.mgz') NIWORKFLOWS_LOG.info( 'Report - setting fixed (%s) and moving (%s) images', self._fixed_image, self._moving_image)
def _post_run_hook(self, runtime): # We need to dig into the internal ants.Registration interface self._fixed_image = self.norm.inputs.fixed_image[ 0] # and get first item if isdefined(self.norm.inputs.fixed_image_mask): self._fixed_image_mask = self.norm.inputs.fixed_image_mask self._moving_image = self.aggregate_outputs().warped_image NIWORKFLOWS_LOG.info( 'Report - setting fixed (%s) and moving (%s) images', self._fixed_image, self._moving_image)
def _post_run_hook(self, runtime): self._fixed_image_label = "after" self._moving_image_label = "before" self._fixed_image = self.aggregate_outputs().unwarped_file self._moving_image = self.inputs.in_file self._contour = self.inputs.wm_seg if isdefined( self.inputs.wm_seg) else None NIWORKFLOWS_LOG.info( 'Report - setting corrected (%s) and warped (%s) images', self._fixed_image, self._moving_image)
def _generate_error_report(self, errno=None): """ Saves an html snippet """ # as of now we think this will be the same for every interface NIWORKFLOWS_LOG.warn('Report was not generated') errorstr = '<div><span class="error">Failed to generate report!</span>.\n' if errno: errorstr += (' <span class="error">Interface returned exit ' 'code %d</span>.\n') % errno errorstr += '</div>\n' with open(self._out_report, 'w' if PY3 else 'wb') as outfile: outfile.write(errorstr)
def _post_run_hook(self, runtime): ''' generates a report showing slices from each axis ''' self._anat_file = self.inputs.realigned_file self._mask_file = self.inputs.mask_file self._seg_files = [self.inputs.mask_file] self._masked = False self._report_title = 'aCompCor ROI' NIWORKFLOWS_LOG.info( 'Generating report for aCompCor. file "%s", mask "%s"', self.inputs.realigned_file, self._mask_file)
def _generate_report(self): """Generate a reportlet.""" NIWORKFLOWS_LOG.info('Generating visual report') movnii = refnii = load_img(self.inputs.reference) fmapnii = load_img(self.inputs.fieldmap) if isdefined(self.inputs.moving): movnii = load_img(self.inputs.moving) contour_nii = mask_nii = None if isdefined(self.inputs.mask): contour_nii = load_img(self.inputs.mask) maskdata = contour_nii.get_fdata() > 0 else: mask_nii = threshold_img(refnii, 1e-3) maskdata = mask_nii.get_fdata() > 0 cuts = cuts_from_bbox(contour_nii or mask_nii, cuts=self._n_cuts) fmapdata = fmapnii.get_fdata() vmax = max(abs(np.percentile(fmapdata[maskdata], 99.8)), abs(np.percentile(fmapdata[maskdata], 0.2))) fmap_overlay = [{ 'overlay': fmapnii, 'overlay_params': { 'cmap': coolwarm_transparent(max_alpha=self.inputs.max_alpha), 'vmax': vmax, 'vmin': -vmax, } }] * 2 if self.inputs.show != 'both': fmap_overlay[not self.inputs.show] = {} # Call composer compose_view(plot_registration(movnii, 'moving-image', estimate_brightness=True, cuts=cuts, label=self.inputs.moving_label, contour=contour_nii, compress=False, **fmap_overlay[1]), plot_registration(refnii, 'fixed-image', estimate_brightness=True, cuts=cuts, label=self.inputs.reference_label, contour=contour_nii, compress=False, **fmap_overlay[0]), out_file=self._out_report)
def _config_ants(self, ants_settings): NIWORKFLOWS_LOG.info('Loading settings from file %s.', ants_settings) self.norm = Registration( moving_image=self.inputs.moving_image, num_threads=self.inputs.num_threads, from_file=ants_settings, terminal_output='file', write_composite_transform=True ) if isdefined(self.inputs.moving_mask): if self.inputs.explicit_masking: self.norm.inputs.moving_image = mask( self.inputs.moving_image[0], self.inputs.moving_mask, "moving_masked.nii.gz") else: self.norm.inputs.moving_image_mask = self.inputs.moving_mask if isdefined(self.inputs.reference_image): self.norm.inputs.fixed_image = self.inputs.reference_image if isdefined(self.inputs.reference_mask): if self.inputs.explicit_masking: self.norm.inputs.fixed_image = mask( self.inputs.reference_image[0], self.inputs.mreference_mask, "fixed_masked.nii.gz") else: self.norm.inputs.fixed_image_mask = self.inputs.reference_mask else: get_template = getattr(getters, 'get_{}'.format(self.inputs.template)) mni_template = get_template() if self.inputs.orientation == 'LAS': raise NotImplementedError resolution = self.inputs.template_resolution if self.inputs.testing: resolution = 2 if self.inputs.explicit_masking: self.norm.inputs.fixed_image = mask(op.join( mni_template, '%dmm_%s.nii.gz' % (resolution, self.inputs.reference)), op.join( mni_template, '%dmm_brainmask.nii.gz' % resolution), "fixed_masked.nii.gz") else: self.norm.inputs.fixed_image = op.join( mni_template, '%dmm_%s.nii.gz' % (resolution, self.inputs.reference)) self.norm.inputs.fixed_image_mask = op.join( mni_template, '%dmm_brainmask.nii.gz' % resolution)
def _get_settings(self): if isdefined(self.inputs.settings): NIWORKFLOWS_LOG.info('User-defined settings, overriding defaults') return self.inputs.settings filestart = '{}-mni_registration_'.format(self.inputs.moving.lower()) if self.inputs.testing: filestart += 'testing_' filenames = [i for i in pkgr.resource_listdir('niworkflows', 'data') if i.startswith(filestart) and i.endswith('.json')] return [pkgr.resource_filename('niworkflows.data', f) for f in sorted(filenames)]
def _post_run_hook(self, runtime): ''' generates a report showing slices from each axis ''' self._anat_file = self.inputs.realigned_file self._mask_file = self.aggregate_outputs().high_variance_mask self._seg_files = [self.aggregate_outputs().high_variance_mask] self._masked = False self._report_title = 'tCompCor - high variance voxels' NIWORKFLOWS_LOG.info( 'Generating report for tCompCor. file "%s", mask "%s"', self.inputs.realigned_file, self.aggregate_outputs().high_variance_mask)
def _post_run_hook(self, runtime): ''' generates a report showing slices from each axis of an arbitrary volume of in_file, with the resulting binary brain mask overlaid ''' self._anat_file = self.inputs.in_file self._mask_file = self.aggregate_outputs().mask_file self._seg_files = [self._mask_file] self._masked = self.inputs.mask self._report_title = "BET: brain mask over anatomical input" NIWORKFLOWS_LOG.info( 'Generating report for BET. file "%s", and mask file "%s"', self._anat_file, self._mask_file)
def _chunk_read_(response, local_file, chunk_size=8192, report_hook=None, initial_size=0, total_size=None, verbose=1): """Download a file chunk by chunk and show advancement :param urllib.response.addinfourl response: response to the download request in order to get file size :param str local_file: hard disk file where data should be written :param int chunk_size: size of downloaded chunks. Default: 8192 :param bool report_hook: whether or not to show downloading advancement :param int initial_size: if resuming, indicate the initial size of the file :param int total_size: Expected final size of download (None means it is unknown). :param int verbose: verbosity level (0 means no message). :returns: the downloaded file path. :rtype: string """ try: if total_size is None: total_size = response.info().get('Content-Length').strip() total_size = int(total_size) + initial_size except Exception as exc: if verbose > 2: NIWORKFLOWS_LOG.warn('Total size of chunk could not be determined') if verbose > 3: NIWORKFLOWS_LOG.warn("Full stack trace: %s", str(exc)) total_size = None bytes_so_far = initial_size t_0 = time_last_display = time.time() while True: chunk = response.read(chunk_size) bytes_so_far += len(chunk) time_last_read = time.time() if (report_hook and # Refresh report every half second or when download is # finished. (time_last_read > time_last_display + 0.5 or not chunk)): _chunk_report_(bytes_so_far, total_size, initial_size, t_0) time_last_display = time_last_read if chunk: local_file.write(chunk) else: break return
def _post_run_hook(self, runtime): ''' generates a report showing slices from each axis ''' brain_extraction_mask = self.aggregate_outputs().BrainExtractionMask self._anat_file = self.inputs.anatomical_image self._mask_file = brain_extraction_mask self._seg_files = [brain_extraction_mask] self._masked = False self._report_title = 'ANTS BrainExtraction: brain mask over anatomical input' NIWORKFLOWS_LOG.info( 'Generating report for ANTS BrainExtraction. file "%s", mask "%s"', self._anat_file, self._mask_file)
def _post_run_hook(self, runtime): ''' generates a report showing nine slices, three per axis, of an arbitrary volume of `in_files`, with the resulting segmentation overlaid ''' outputs = self.aggregate_outputs() self._anat_file = os.path.join(outputs.subjects_dir, outputs.subject_id, 'mri', 'brain.mgz') self._contour = os.path.join(outputs.subjects_dir, outputs.subject_id, 'mri', 'ribbon.mgz') self._masked = False self._report_title = "ReconAll: segmentation over anatomical" NIWORKFLOWS_LOG.info('Generating report for ReconAll (subject %s)', outputs.subject_id)
def _post_run_hook(self, runtime): ''' generates a report showing nine slices, three per axis, of an arbitrary volume of `in_files`, with the resulting segmentation overlaid ''' self._anat_file = self.inputs.in_files[0], self._mask_file = self.aggregate_outputs().tissue_class_map self._seg_files = self.aggregate_outputs().tissue_class_files self._masked = False self._report_title = "FAST: segmentation over anatomical" NIWORKFLOWS_LOG.info( 'Generating report for FAST (in_files %s, ' 'segmentation %s, individual tissue classes %s).', self.inputs.in_files, self.aggregate_outputs().tissue_class_map, self.aggregate_outputs().tissue_class_files)
def _generate_report(self): """ Generates the visual report """ from niworkflows.viz.utils import compose_view, plot_registration NIWORKFLOWS_LOG.info('Generating visual report') fixed_image_nii = load_img(self._fixed_image) moving_image_nii = load_img(self._moving_image) contour_nii = load_img( self._contour) if self._contour is not None else None if self._fixed_image_mask: fixed_image_nii = unmask( apply_mask(fixed_image_nii, self._fixed_image_mask), self._fixed_image_mask) # since the moving image is already in the fixed image space we # should apply the same mask moving_image_nii = unmask( apply_mask(moving_image_nii, self._fixed_image_mask), self._fixed_image_mask) mask_nii = load_img(self._fixed_image_mask) else: mask_nii = threshold_img(fixed_image_nii, 1e-3) n_cuts = 7 if not self._fixed_image_mask and contour_nii: cuts = cuts_from_bbox(contour_nii, cuts=n_cuts) else: cuts = cuts_from_bbox(mask_nii, cuts=n_cuts) # Call composer compose_view(plot_registration(fixed_image_nii, 'fixed-image', estimate_brightness=True, cuts=cuts, label=self._fixed_image_label, contour=contour_nii, compress=self.inputs.compress_report), plot_registration(moving_image_nii, 'moving-image', estimate_brightness=True, cuts=cuts, label=self._moving_image_label, contour=contour_nii, compress=self.inputs.compress_report), out_file=self._out_report)
def _run_interface(self, runtime): """ there is not inner interface to run """ self._out_report = os.path.abspath(self.inputs.out_report) self._fixed_image_label = "after" self._moving_image_label = "before" self._fixed_image = self.inputs.after self._moving_image = self.inputs.before self._contour = self.inputs.wm_seg if isdefined( self.inputs.wm_seg) else None NIWORKFLOWS_LOG.info( 'Report - setting before (%s) and after (%s) images', self._fixed_image, self._moving_image) self._generate_report() NIWORKFLOWS_LOG.info('Successfully created report (%s)', self._out_report) return runtime
def _run_interface(self, runtime): settings_files = self._get_settings() for ants_settings in settings_files: interface_result = None self._config_ants(ants_settings) NIWORKFLOWS_LOG.info( 'Retry #%d, commandline: \n%s', self.retry, self.norm.cmdline) try: interface_result = self.norm.run() except Exception as exc: NIWORKFLOWS_LOG.warn( 'Retry #%d failed: %s.', self.retry, exc) errfile = op.join(runtime.cwd, 'stderr.nipype') outfile = op.join(runtime.cwd, 'stdout.nipype') shutil.move(errfile, errfile + '.%03d' % self.retry) shutil.move(outfile, outfile + '.%03d' % self.retry) if interface_result is not None: runtime.returncode = 0 self._results.update(interface_result.outputs.get()) NIWORKFLOWS_LOG.info( 'Successful spatial normalization (retry #%d).', self.retry) return runtime self.retry += 1 raise RuntimeError( 'Robust spatial normalization failed after %d retries.' % (self.retry - 1))
def extract_svg(display_object, dpi=300, compress='auto'): """ Removes the preamble of the svg files generated with nilearn """ image_svg = svg2str(display_object, dpi) if compress is True or compress == 'auto': image_svg = svg_compress(image_svg, compress) image_svg = re.sub(' height="[0-9]+[a-z]*"', '', image_svg, count=1) image_svg = re.sub(' width="[0-9]+[a-z]*"', '', image_svg, count=1) image_svg = re.sub(' viewBox', ' preseveAspectRation="xMidYMid meet" viewBox', image_svg, count=1) start_tag = '<svg ' start_idx = image_svg.find(start_tag) end_tag = '</svg>' end_idx = image_svg.rfind(end_tag) if start_idx is -1 or end_idx is -1: NIWORKFLOWS_LOG.info('svg tags not found in extract_svg') # rfind gives the start index of the substr. We want this substr # included in our return value so we add its length to the index. end_idx += len(end_tag) return image_svg[start_idx:end_idx]
def _get_dataset_dir(dataset_name, data_dir=None, default_paths=None, verbose=1): """ Create if necessary and returns data directory of given dataset. :param str dataset_name: The unique name of the dataset. :param str data_dir: Path of the data directory. Used to force data storage in a specified location. :param list(str) default_paths: Default system paths in which the dataset may already have been installed by a third party software. They will be checked first. :param int verbose: verbosity level (0 means no message). :returns: the path of the given dataset directory. :rtype: str .. note:: This function retrieves the datasets directory (or data directory) using the following priority : 1. defaults system paths 2. the keyword argument data_dir 3. the global environment variable CRN_SHARED_DATA 4. the user environment variable CRN_DATA 5. ~/.cache/stanford-crn in the user home folder """ # We build an array of successive paths by priority # The boolean indicates if it is a pre_dir: in that case, we won't add the # dataset name to the path. paths = [] # Check data_dir which force storage in a specific location if data_dir is not None: paths.extend([(d, False) for d in data_dir.split(os.pathsep)]) # Search possible system paths if default_paths is not None: for default_path in default_paths: paths.extend([(d, True) for d in default_path.split(os.pathsep)]) # If data_dir has not been specified, then we crawl default locations if data_dir is None: global_data = os.getenv('CRN_SHARED_DATA') if global_data is not None: paths.extend([(d, False) for d in global_data.split(os.pathsep)]) local_data = os.getenv('CRN_DATA') if local_data is not None: paths.extend([(d, False) for d in local_data.split(os.pathsep)]) paths.append((NIWORKFLOWS_CACHE_DIR, False)) if verbose > 2: NIWORKFLOWS_LOG.info('Dataset search paths: %s', str(paths)) # Check if the dataset exists somewhere for path, is_pre_dir in paths: if not is_pre_dir: path = op.join(path, dataset_name) if op.islink(path): # Resolve path path = readlinkabs(path) if op.exists(path) and op.isdir(path): if verbose > 1: NIWORKFLOWS_LOG.info('Dataset already cached in %s', path) return path # If not, create a folder in the first writeable directory errors = [] for (path, is_pre_dir) in paths: if not is_pre_dir: path = op.join(path, dataset_name) if not op.exists(path): try: os.makedirs(path) if verbose > 0: NIWORKFLOWS_LOG.info('Dataset created in %s', path) return path except Exception as exc: short_error_message = getattr(exc, 'strerror', str(exc)) errors.append('\n -{0} ({1})'.format(path, short_error_message)) raise OSError('niworkflows tried to store the dataset in the following ' 'directories, but:' + ''.join(errors))
def plot_registration(anat_nii, div_id, plot_params=None, order=('z', 'x', 'y'), cuts=None, estimate_brightness=False, label=None, contour=None, compress='auto'): """ Plots the foreground and background views Default order is: axial, coronal, sagittal """ plot_params = {} if plot_params is None else plot_params # Use default MNI cuts if none defined if cuts is None: raise NotImplementedError # TODO out_files = [] if estimate_brightness: plot_params = robust_set_limits(anat_nii.get_data().reshape(-1), plot_params) # FreeSurfer ribbon.mgz ribbon = contour is not None and \ np.array_equal(np.unique(contour.get_data()), [0, 2, 3, 41, 42]) if ribbon: contour_data = contour.get_data() % 39 white = nlimage.new_img_like(contour, contour_data == 2) pial = nlimage.new_img_like(contour, contour_data >= 2) # Plot each cut axis for i, mode in enumerate(list(order)): out_file = '{}_{}.svg'.format(div_id, mode) plot_params['display_mode'] = mode plot_params['cut_coords'] = cuts[mode] if i == 0: plot_params['title'] = label else: plot_params['title'] = None # Generate nilearn figure display = plot_anat(anat_nii, **plot_params) if ribbon: kwargs = {'levels': [0.5], 'linewidths': 0.5} display.add_contours(white, colors='b', **kwargs) display.add_contours(pial, colors='r', **kwargs) elif contour is not None: display.add_contours(contour, colors='b', levels=[0.5], linewidths=0.5) svg = extract_svg(display, compress=compress) display.close() # Find and replace the figure_1 id. try: xml_data = etree.fromstring(svg) except etree.XMLSyntaxError as e: NIWORKFLOWS_LOG.info(e) return find_text = etree.ETXPath("//{%s}g[@id='figure_1']" % (SVGNS)) find_text(xml_data)[0].set('id', '%s-%s-%s' % (div_id, mode, uuid4())) out_files.append(etree.tostring(xml_data)) return out_files
def _post_run_hook(self, runtime): self._fixed_image = self.inputs.reference_image self._moving_image = self.aggregate_outputs().output_image NIWORKFLOWS_LOG.info( 'Report - setting fixed (%s) and moving (%s) images', self._fixed_image, self._moving_image)
def _fetch_file(url, dataset_dir, filetype=None, resume=True, overwrite=False, md5sum=None, username=None, password=None, retry=0, verbose=1, temp_downloads=None): """Load requested file, downloading it if needed or requested. :param str url: contains the url of the file to be downloaded. :param str dataset_dir: path of the data directory. Used for data storage in the specified location. :param bool resume: if true, try to resume partially downloaded files :param overwrite: if bool true and file already exists, delete it. :param str md5sum: MD5 sum of the file. Checked if download of the file is required :param str username: username used for basic HTTP authentication :param str password: password used for basic HTTP authentication :param int verbose: verbosity level (0 means no message). :returns: absolute path of downloaded file :rtype: str ..note:: If, for any reason, the download procedure fails, all downloaded files are removed. """ if not overwrite and os.listdir(dataset_dir): return True data_dir, _ = op.split(dataset_dir) if temp_downloads is None: temp_downloads = op.join(NIWORKFLOWS_CACHE_DIR, 'downloads') # Determine data path if not op.exists(temp_downloads): os.makedirs(temp_downloads) # Determine filename using URL parse = urlparse(url) file_name = op.basename(parse.path) if file_name == '': file_name = _md5_hash(parse.path) if filetype is not None: file_name += filetype temp_full_name = op.join(temp_downloads, file_name) temp_part_name = temp_full_name + ".part" if overwrite: shutil.rmtree(dataset_dir, ignore_errors=True) if op.exists(temp_full_name): if overwrite: os.remove(temp_full_name) t_0 = time.time() local_file = None initial_size = 0 # Download data request = Request(url) request.add_header('Connection', 'Keep-Alive') if username is not None and password is not None: if not url.startswith('https'): raise ValueError( 'Authentication was requested on a non secured URL ({0!s}).' 'Request has been blocked for security reasons.'.format(url)) # Note: HTTPBasicAuthHandler is not fitted here because it relies # on the fact that the server will return a 401 error with proper # www-authentication header, which is not the case of most # servers. encoded_auth = base64.b64encode((username + ':' + password).encode()) request.add_header(b'Authorization', b'Basic ' + encoded_auth) if verbose > 0: displayed_url = url.split('?')[0] if verbose == 1 else url NIWORKFLOWS_LOG.info('Downloading data from %s ...', displayed_url) if resume and op.exists(temp_part_name): # Download has been interrupted, we try to resume it. local_file_size = op.getsize(temp_part_name) # If the file exists, then only download the remainder request.add_header("Range", "bytes={}-".format(local_file_size)) try: data = urlopen(request) content_range = data.info().get('Content-Range') if (content_range is None or not content_range.startswith( 'bytes {}-'.format(local_file_size))): raise IOError('Server does not support resuming') except Exception: # A wide number of errors can be raised here. HTTPError, # URLError... I prefer to catch them all and rerun without # resuming. if verbose > 0: NIWORKFLOWS_LOG.warn( 'Resuming failed, try to download the whole file.') return _fetch_file(url, dataset_dir, resume=False, overwrite=overwrite, md5sum=md5sum, username=username, password=password, verbose=verbose) local_file = open(temp_part_name, "ab") initial_size = local_file_size else: try: data = urlopen(request) except (HTTPError, URLError): if retry < MAX_RETRIES: if verbose > 0: NIWORKFLOWS_LOG.warn( 'Download failed, retrying (attempt %d)', retry + 1) time.sleep(5) return _fetch_file(url, dataset_dir, resume=False, overwrite=overwrite, md5sum=md5sum, username=username, password=password, verbose=verbose, retry=retry + 1) else: raise local_file = open(temp_part_name, "wb") _chunk_read_(data, local_file, report_hook=(verbose > 0), initial_size=initial_size, verbose=verbose) # temp file must be closed prior to the move if not local_file.closed: local_file.close() shutil.move(temp_part_name, temp_full_name) delta_t = time.time() - t_0 if verbose > 0: # Complete the reporting hook sys.stderr.write(' ...done. ({0:.0f} seconds, {1:.0f} min)\n'.format( delta_t, delta_t // 60)) if md5sum is not None: if _md5_sum_file(temp_full_name) != md5sum: raise ValueError("File {} checksum verification has failed." " Dataset fetching aborted.".format(local_file)) if filetype is None: fname, filetype = op.splitext(op.basename(temp_full_name)) if filetype == '.gz': fname, ext = op.splitext(fname) filetype = ext + filetype if filetype.startswith('.'): filetype = filetype[1:] if filetype == 'tar': sp.check_call(['tar', 'xf', temp_full_name], cwd=data_dir) os.remove(temp_full_name) return True return True
def _generate_report(self): """Generate a reportlet.""" NIWORKFLOWS_LOG.info("Generating visual report") movnii = load_img(self.inputs.reference) canonical_r = rotation2canonical(movnii) movnii = refnii = rotate_affine(movnii, rot=canonical_r) fmapnii = nb.squeeze_image( rotate_affine(load_img(self.inputs.fieldmap), rot=canonical_r)) if fmapnii.dataobj.ndim == 4: for i, tstep in enumerate(nb.four_to_three(fmapnii)): if np.any(np.asanyarray(tstep.dataobj) != 0): fmapnii = tstep break if isdefined(self.inputs.moving): movnii = rotate_affine(load_img(self.inputs.moving), rot=canonical_r) contour_nii = mask_nii = None if isdefined(self.inputs.mask): contour_nii = rotate_affine(load_img(self.inputs.mask), rot=canonical_r) maskdata = contour_nii.get_fdata() > 0 else: mask_nii = threshold_img(refnii, 1e-3) maskdata = mask_nii.get_fdata() > 0 cuts = cuts_from_bbox(contour_nii or mask_nii, cuts=self._n_cuts) fmapdata = fmapnii.get_fdata() vmax = max( abs(np.percentile(fmapdata[maskdata], 99.8)), abs(np.percentile(fmapdata[maskdata], 0.2)), ) if self.inputs.apply_mask: fmapdata[~maskdata] = 0 fmapnii = fmapnii.__class__(fmapdata, fmapnii.affine, fmapnii.header) fmap_overlay = [{ "overlay": fmapnii, "overlay_params": { "cmap": coolwarm_transparent(max_alpha=self.inputs.max_alpha), "vmax": vmax, "vmin": -vmax, }, }] * 2 if self.inputs.show != "both": fmap_overlay[not self.inputs.show] = {} # Call composer compose_view( plot_registration(movnii, "moving-image", estimate_brightness=True, cuts=cuts, label=self.inputs.moving_label, contour=contour_nii, compress=False, **fmap_overlay[1]), plot_registration(refnii, "fixed-image", estimate_brightness=True, cuts=cuts, label=self.inputs.reference_label, contour=contour_nii, compress=False, **fmap_overlay[0]), out_file=self._out_report, )