def check_params_multiedge(base_feature_list, input_dir, atlas, smoothing_param, node_size, out_dir, return_results): """Validation of parameters and appropriate type casting if necessary.""" check_features(base_feature_list) check_atlas(atlas) if not pexists(input_dir): raise IOError( 'Input directory at {} does not exist.'.format(input_dir)) if out_dir is None and return_results is False: raise ValueError( 'Results are neither saved to disk, ' 'nor being received when returned.\n' 'Specify out_dir (not None) or make return_results=True') if out_dir is not None and not pexists(out_dir): os.mkdir(out_dir) # no checks on subdivison size yet, as its not implemented return
def check_input_dir_T1(fs_dir, user_dir): """Ensures proper input is specified.""" in_dir = fs_dir if fs_dir is None and user_dir is None: raise ValueError( 'At least one of --fs_dir or --user_dir must be specified.') if fs_dir is not None: if user_dir is not None: raise ValueError( 'Only one of --fs_dir or --user_dir can be specified.') if user_dir is None: if not pexists(fs_dir): raise IOError('Freesurfer directory specified does not exist!') else: in_dir = fs_dir type_of_features = 'freesurfer' elif fs_dir is None: if not pexists(user_dir): raise IOError('User-specified input directory does not exist!') else: in_dir = user_dir type_of_features = 'generic' if not pexists(in_dir): raise IOError( 'Invalid specification - check proper combination of --fs_dir and --user_dir' ) return in_dir, type_of_features
def check_id_list_with_regex(id_list_in, in_dir, name_pattern): """Checks to ensure each subject listed has the required files and returns only those that can be processed.""" if id_list_in is not None: if not pexists(id_list_in): raise IOError('Given ID list does not exist!') try: id_list = read_id_list(id_list_in) except: raise IOError('unable to read the ID list.') else: # get all IDs in the given folder id_list = [ folder for folder in os.listdir(in_dir) if os.path.isdir(pjoin(in_dir, folder)) ] id_list_out = list() id_list_err = list() invalid_list = list() # this dict contains existing files for each ID # useful to open external programs like tkmedit images_for_id = dict() for subject_id in id_list: results = expand_regex_paths(in_dir, subject_id, name_pattern) if len(results) < 1: print('No results for {} - skipping it.'.format(subject_id)) continue for dp in results: if not pexists(dp) or os.path.getsize(dp) <= 0: id_list_err.append(subject_id) invalid_list.append(dp) else: new_id = splitext(basename(dp))[0] if subject_id not in new_id: new_id = '{}_{}'.format(subject_id, new_id) id_list_out.append(new_id) images_for_id[new_id] = dp if len(id_list_err) > 0: warnings.warn( 'The following subjects do NOT have all the required files ' 'or some are empty - skipping them!') print('\n'.join(id_list_err)) print( '\n\nThe following files do not exist or empty: \n {} \n\n'.format( '\n'.join(invalid_list))) if len(id_list_out) < 1: raise ValueError( 'All the subject IDs do not have the required files - unable to proceed.' ) print('{} subjects/sessions/units are usable for review.'.format( len(id_list_out))) return np.array(id_list_out), images_for_id
def get_ratings(out_dir, id_list): """Creates a separate folder for ratings, backing up any previous sessions.""" # making a copy incomplete_list = list(id_list) prev_done = [] # empty list ratings_dir = pjoin(out_dir, suffix_ratings_dir) if pexists(ratings_dir): prev_ratings = pjoin(ratings_dir, file_name_ratings) prev_ratings_backup = pjoin(ratings_dir, file_name_ratings_backup) if pexists(prev_ratings): ratings = load_ratings_csv(prev_ratings) copyfile(prev_ratings, prev_ratings_backup) # finding the remaining prev_done = set(ratings.keys()) incomplete_list = list(set(id_list) - prev_done) else: ratings = dict() else: makedirs(ratings_dir, exist_ok=True) ratings = dict() if len(prev_done) > 0: print('Ratings for {} subjects were restored from previous backup'.format(len(prev_done))) print('To be reviewed : {}'.format(len(incomplete_list))) return ratings, ratings_dir, incomplete_list, prev_done
def check_input_dir(fs_dir, user_dir, vis_type): """Ensures proper input is specified.""" in_dir = fs_dir if fs_dir is None and user_dir is None: raise ValueError('At least one of --fs_dir or --user_dir must be specified.') if fs_dir is not None: if user_dir is not None: raise ValueError('Only one of --fs_dir or --user_dir can be specified.') if not freesurfer_installed(): raise EnvironmentError( 'Freesurfer functionality is requested(e.g. visualizing annotations), but is not installed!') if fs_dir is None and vis_type in freesurfer_vis_types: raise ValueError('vis_type depending on Freesurfer organization is specified, but --fs_dir is not provided.') if user_dir is None: if not pexists(fs_dir): raise IOError('Freesurfer directory specified does not exist!') else: in_dir = fs_dir elif fs_dir is None: if not pexists(user_dir): raise IOError('User-specified input directory does not exist!') else: in_dir = user_dir if not pexists(in_dir): raise IOError('Invalid specification - check proper combination of --fs_dir and --user_dir') return in_dir
def check_atlas(atlas): """Validation of atlas input.""" # when its a name for pre-defined atlas if isinstance(atlas, str): if not pexists(atlas): # just a name atlas = atlas.lower() if atlas not in cfg.atlas_list: raise ValueError( 'Invalid choice of atlas. Accepted : {}'.format( cfg.atlas_list)) elif os.path.isdir(atlas): # cortical atlas in Freesurfer org if not check_atlas_annot_exist(atlas): raise ValueError( 'Given atlas folder does not contain Freesurfer label annot files. ' 'Needed : given_atlas_dir/label/?h.aparc.annot') elif pexists(atlas): # may be a volumetric atlas? try: atlas = nibabel.load(atlas) except: traceback.print_exc() raise ValueError( 'Unable to read the provided image volume. ' 'Must be a nifti 2d volume, readable by nibabel.') else: raise ValueError('Unable to decipher or use the given atlas.') else: raise NotImplementedError( 'Atlas must be a string, providing a name or ' 'path to Freesurfer folder or a 3D nifti volume.') return atlas
def check_params_single_edge(base_features, in_dir, atlas, smoothing_param, node_size, out_dir, return_results): """""" check_features(base_features) check_atlas(atlas) if not pexists(in_dir): raise IOError('Input directory at {} does not exist.'.format(in_dir)) if out_dir is None and return_results is False: raise ValueError( 'Results are neither saved to disk, ' 'nor being received when returned!\n' 'Specify out_dir (not None) or make return_results=True') if out_dir is not None and not pexists(out_dir): os.mkdir(out_dir) if node_size not in cfg.allowed_mvpp: raise ValueError('Invalid min_vtx_per_patch. Choose one of {}' ''.format(cfg.allowed_mvpp)) return
def check_id_list(id_list_in, in_dir, vis_type, mri_name, seg_name=None, in_dir_type=None): """Checks to ensure each subject listed has the required files and returns only those that can be processed.""" if id_list_in is not None: if not pexists(id_list_in): raise IOError('Given ID list does not exist!') try: id_list = read_id_list(id_list_in) except: raise IOError('unable to read the ID list.') else: # get all IDs in the given folder id_list = [folder for folder in os.listdir(in_dir) if os.path.isdir(pjoin(in_dir, folder))] if seg_name is not None: required_files = {'mri': mri_name, 'seg': seg_name} else: required_files = {'mri': mri_name} id_list_out = list() id_list_err = list() invalid_list = list() # this dict contains existing files for each ID # useful to open external programs like tkmedit images_for_id = dict() for subject_id in id_list: path_list = { img: get_path_for_subject(in_dir, subject_id, name, vis_type, in_dir_type) for img, name in required_files.items() } invalid = [pfile for pfile in path_list.values() if not pexists(pfile) or os.path.getsize(pfile) <= 0] if len(invalid) > 0: id_list_err.append(subject_id) invalid_list.extend(invalid) else: id_list_out.append(subject_id) images_for_id[subject_id] = path_list if len(id_list_err) > 0: warnings.warn( 'The following subjects do NOT have all the required files or some are empty - skipping them!') print('\n'.join(id_list_err)) print('\n\nThe following files do not exist or empty: \n {} \n\n'.format( '\n'.join(invalid_list))) if len(id_list_out) < 1: raise ValueError( 'All the subject IDs do not have the required files - unable to proceed.') print('{} subjects are usable for review.'.format(len(id_list_out))) return np.array(id_list_out), images_for_id
def save_per_subject_graph(graph_nx, out_dir, subject, str_suffix=None): "Saves the features to disk." if out_dir is not None: # get outpath returned from hiwenet, based on dist name and all other parameters # choose out_dir name based on dist name and all other parameters out_subject_dir = pjoin(out_dir, subject) if not pexists(out_subject_dir): os.mkdir(out_subject_dir) if str_suffix is not None: out_file_name = '{}_graynet.graphml'.format(str_suffix) else: out_file_name = 'graynet.graphml' out_weights_path = pjoin(out_subject_dir, out_file_name) try: nx.info(graph_nx) nx.write_graphml(graph_nx, out_weights_path, encoding='utf-8') print('\nSaved the graph to \n{}'.format(out_weights_path)) except: print('\nUnable to save graph to \n{}'.format(out_weights_path)) traceback.print_exc() return
def save_summary_stats(roi_values, roi_labels, stat_name, out_dir, subject, str_suffix=None): "Saves the ROI medians to disk." if out_dir is not None: # get outpath returned from hiwenet, based on dist name and all other parameters # choose out_dir name based on dist name and all other parameters out_subject_dir = pjoin(out_dir, subject) if not pexists(out_subject_dir): os.mkdir(out_subject_dir) if str_suffix is not None: out_file_name = '{}_roi_stats.csv'.format(str_suffix) else: out_file_name = 'roi_stats.csv' out_weights_path = pjoin(out_subject_dir, out_file_name) try: with open(out_weights_path, 'w') as of: of.write('#roi,{}\n'.format(stat_name)) for name, value in zip(roi_labels, roi_values): of.write('{},{}\n'.format(name, value)) # np.savetxt(out_weights_path, roi_values, fmt='%.5f') print('\nSaved roi stats to \n{}'.format(out_weights_path)) except: print('\nUnable to save extracted features to {}'.format( out_weights_path)) traceback.print_exc() return
def save(weight_vec, out_dir, subject, str_suffix=None): "Saves the features to disk." if out_dir is not None: # get outpath returned from hiwenet, based on dist name and all other parameters # choose out_dir name based on dist name and all other parameters out_subject_dir = pjoin(out_dir, subject) if not pexists(out_subject_dir): os.mkdir(out_subject_dir) if str_suffix is not None: out_file_name = '{}_graynet.csv'.format(str_suffix) else: out_file_name = 'graynet.csv' out_weights_path = pjoin(out_subject_dir, out_file_name) try: np.savetxt(out_weights_path, weight_vec, fmt='%.5f') print('\nSaved the features to \n{}'.format(out_weights_path)) except: print('\nUnable to save features to {}'.format(out_weights_path)) traceback.print_exc() return
def check_bids_dir(dir_path): """Checks if its a BIDS folder or not""" descr_file_name = 'dataset_description.json' descr_path = pjoin(dir_path, descr_file_name) if not pexists(descr_path): raise ValueError( 'There is no {} file at the root\n ' 'Ensure folder is formatted according to BIDS spec.'.format( descr_file_name)) try: import json with open(descr_path) as df: descr = json.load(df) except: raise IOError('{} could not be read'.format(descr_path)) ver_tag = 'BIDSVersion' if 'BIDSVersion' not in descr: raise IOError('There is no field {} in \n\t {}'.format( ver_tag, descr_path)) in_dir = realpath(dir_path) dir_type = 'BIDSVersion:' + descr['BIDSVersion'] return in_dir, dir_type
def check_subjects(subjects_info): "Ensure subjects are provided and their data exist." if isinstance(subjects_info, str): if not pexists(subjects_info): raise IOError('path to subject list does not exist: {}'.format( subjects_info)) subjects_list = np.genfromtxt(subjects_info, dtype=str) elif isinstance(subjects_info, collections.Iterable): if len(subjects_info) < 1: raise ValueError('Empty subject list.') subjects_list = subjects_info else: raise ValueError( 'Invalid value provided for subject list. \n ' 'Must be a list of paths, or ' 'path to a file containing one path per line for each subject.') subject_id_list = np.atleast_1d(subjects_list) num_subjects = subject_id_list.size if num_subjects < 1: raise ValueError('Input subject list is empty.') num_digits_id_size = len(str(num_subjects)) max_id_width = max(map(len, subject_id_list)) return subject_id_list, num_subjects, max_id_width, num_digits_id_size
def read_image(img_spec, error_msg='image', num_dims=3): """Image reader. Removes stray values close to zero (smaller than 5 %ile).""" if isinstance(img_spec, str): if pexists(realpath(img_spec)): hdr = nib.load(img_spec) # trying to stick to an orientation hdr = nib.as_closest_canonical(hdr) img = hdr.get_data() else: raise IOError('Given path to {} does not exist!\n\t{}'.format( error_msg, img_spec)) elif isinstance(img_spec, np.ndarray): img = img_spec else: raise ValueError( 'Invalid input specified! ' 'Input either a path to image data, or provide 3d Matrix directly.' ) if num_dims == 3: img = check_image_is_3d(img) elif num_dims == 4: check_image_is_4d(img) else: raise ValueError('Requested check for {} dims - allowed: 3 or 4!') if not np.issubdtype(img.dtype, np.float64): img = img.astype('float32') return img
def restore_previous_ratings(qcw): """Creates a separate folder for ratings, backing up any previous sessions.""" # making a copy incomplete_list = list(qcw.id_list) prev_done = [] # empty list ratings_file, backup_name_ratings = get_ratings_path_info(qcw) if pexists(ratings_file): ratings, notes = load_ratings_csv(ratings_file) # finding the remaining prev_done = set(ratings.keys()) incomplete_list = list(set(qcw.id_list) - prev_done) else: ratings = dict() notes = dict() if len(prev_done) > 0: print('\nRatings for {}/{} subjects were restored.'.format( len(prev_done), len(qcw.id_list))) if len(incomplete_list) < 1: print('No subjects to review/rate - exiting.') sys.exit(0) else: print('To be reviewed : {}\n'.format(len(incomplete_list))) return ratings, notes, incomplete_list
def check_atlas(atlas_spec): """Validation of atlas input.""" # when its a name for pre-defined atlas if isinstance(atlas_spec, str): if not pexists(atlas_spec): # just a name atlas_spec = atlas_spec.lower() if atlas_spec not in cfg.atlas_list: raise ValueError('Invalid choice of atlas {}.' ' Accepted : {}'.format( atlas_spec, cfg.atlas_list)) atlas_name = atlas_spec elif os.path.isdir(atlas_spec): # cortical atlas in Freesurfer org if not check_atlas_annot_exist(atlas_spec): raise ValueError( 'Given atlas folder does not contain Freesurfer label annot files. ' 'Needed : given_atlas_dir/label/?h.aparc.annot') atlas_name = filename_without_ext(atlas_spec) elif pexists(atlas_spec): # may be a volumetric atlas? atlas_name = filename_without_ext(atlas_spec) try: atlas_spec = nibabel.load(atlas_spec) except: traceback.print_exc() raise ValueError( 'Unable to read the provided image volume. ' 'Must be a nifti 2d volume, readable by nibabel.') else: raise ValueError('Unable to decipher or use the given atlas.') elif is_image(atlas_spec): if not is_image_3D(atlas_spec): raise ValueError('An image is supplied for atlas. ' 'But is not 3D, ' 'or one/more dimensions seem to be empty') if atlas_spec.__class__ in nibabel.all_image_classes: atlas_name = filename_without_ext(atlas_spec.get_filename()) else: # when the input is an ndarray w/o a way to specify a name atlas_name = 'UnnamedAtlas' else: raise NotImplementedError( 'Atlas must be a string, providing a name or ' 'path to Freesurfer folder or a 3D nifti volume.') return atlas_spec, atlas_name
def check_input_dir_alignment(in_dir): """Ensures proper input is specified.""" if in_dir is None or not pexists(in_dir): raise IOError('Invalid dir is None or does not exist!') type_of_features = 'generic' return in_dir, type_of_features
def load_ratings_csv(prev_ratings): """read CSV into a dict""" if pexists(prev_ratings): info_dict = dict([line.strip().split(',') for line in open(prev_ratings).readlines()]) else: info_dict = dict() return info_dict
def outlier_advisory(qcw): """ Performs outlier detection based on chosen types of data and technique. Returns ------- outliers_by_sample : dict Keyed in by sample id, each element is a list of features that identified a given ID as a possible outlier. outliers_by_feature : dict Keyed in by feature, each element is a list of IDs that feature identified as possible outliers. """ if not pexists(qcw.out_dir): makedirs(qcw.out_dir) outliers_by_feature = dict() outliers_by_sample = dict() if qcw.disable_outlier_detection: print('outlier detection: disabled, as requested.') return outliers_by_sample, outliers_by_feature for feature_type in qcw.outlier_feat_types: print('\nRunning outlier detection based on {} measures:'.format( feature_type)) features = gather_freesurfer_data(qcw, feature_type) out_file = pjoin( qcw.out_dir, '{}_{}_{}.txt'.format(cfg.outlier_list_prefix, qcw.outlier_method, feature_type)) outliers_by_feature[feature_type] = detect_outliers( features, qcw.id_list, method=qcw.outlier_method, out_file=out_file, fraction_of_outliers=qcw.outlier_fraction) # re-organizing the identified outliers by sample for sid in qcw.id_list: # each id contains a list of all feature types that flagged it as an outlier outliers_by_sample[sid] = [ feat for feat in qcw.outlier_feat_types if sid in outliers_by_feature[feat] ] # dropping the IDs that were not flagged by any feature # so a imple ID in dict would reveal whether it was ever suspected as an outlier outliers_by_sample = { id: flag_list for id, flag_list in outliers_by_sample.items() if flag_list } return outliers_by_sample, outliers_by_feature
def check_id_list(id_list_in, in_dir, vis_type, mri_name, seg_name): """Checks to ensure each subject listed has the required files and returns only those that can be processed.""" if id_list_in is not None: if not pexists(id_list_in): raise IOError('Given ID list does not exist!') try: # read all lines and strip them of newlines/spaces id_list = [line.strip('\n ') for line in open(id_list_in)] except: raise IOError('unable to read the ID list.') else: # get all IDs in the given folder id_list = [folder for folder in os.listdir(in_dir) if os.path.isdir(pjoin(in_dir, folder))] required_files = (mri_name, seg_name) id_list_out = list() id_list_err = list() invalid_list = list() for subject_id in id_list: path_list = [get_path_for_subject(in_dir, subject_id, req_file, vis_type) for req_file in required_files] invalid = [this_file for this_file in path_list if not pexists(this_file) or os.path.getsize(this_file) <= 0] if len(invalid) > 0: id_list_err.append(subject_id) invalid_list.extend(invalid) else: id_list_out.append(subject_id) if len(id_list_err) > 0: warnings.warn('The following subjects do NOT have all the required files or some are empty - skipping them!') print('\n'.join(id_list_err)) print('\n\nThe following files do not exist or empty: \n {} \n\n'.format('\n'.join(invalid_list))) if len(id_list_out) < 1: raise ValueError('All the subject IDs do not have the required files - unable to proceed.') print('{} subjects are usable for review.'.format(len(id_list_out))) return id_list_out
def check_input_dir_T1(fs_dir, user_dir, bids_dir): """Ensures proper input is specified.""" if fs_dir is None and user_dir is None and bids_dir is None: raise ValueError( 'At least one of --bids_dir or --fs_dir or --user_dir must ' 'be specified, and only one can be specified.') if bids_dir is not None: if fs_dir is not None or user_dir is not None: raise ValueError('fs_dir and user_dir can NOT be specified when ' 'specifying BIDS') in_dir = realpath(bids_dir) if not pexists(in_dir): raise IOError('BIDS directory specified does not exist!') type_of_features = 'BIDS' return in_dir, type_of_features if fs_dir is not None: if user_dir is not None: raise ValueError( '--user_dir can not be specified when --fs_dir is ') if user_dir is None: if not pexists(fs_dir): raise IOError('Freesurfer directory specified does not exist!') else: in_dir = fs_dir type_of_features = 'freesurfer' elif fs_dir is None: if not pexists(user_dir): raise IOError('User-specified input directory does not exist!') else: in_dir = user_dir type_of_features = 'generic' if not pexists(in_dir): raise IOError('Invalid specification: check proper combination of ' '--bids_dir, --fs_dir, --user_dir') return realpath(in_dir), type_of_features
def save_ratings(ratings, out_dir): """Save ratings before closing shop.""" ratings_dir = pjoin(out_dir, suffix_ratings_dir) if not pexists(ratings_dir): makedirs(ratings_dir) ratings_file = pjoin(ratings_dir, file_name_ratings) prev_ratings_backup = pjoin(ratings_dir, file_name_ratings_backup) if pexists(ratings_file): copyfile(ratings_file, prev_ratings_backup) lines = '\n'.join(['{},{}'.format(sid, rating) for sid, rating in ratings.items()]) try: with open(ratings_file, 'w') as cf: cf.write(lines) except: raise IOError( 'Error in saving ratings to file!!\nBackup might be helpful at:\n\t{}'.format(prev_ratings_backup)) return
def check_atlas_annot_exist(atlas_dir, hemi_list=None): " Checks for the presence of atlas annotations " if hemi_list is None: hemi_list = ['lh', 'rh'] for hemi in hemi_list: annot_path = pjoin(atlas_dir, 'label', '{}.aparc.annot'.format(hemi)) if not pexists(annot_path) or os.path.getsize(annot_path) == 0: return False return True
def check_params_single_edge(base_features, in_dir, atlas, smoothing_param, node_size, out_dir, return_results): """""" check_features(base_features) check_atlas(atlas) if not pexists(in_dir): raise IOError('Input directory at {} does not exist.'.format(in_dir)) if out_dir is None and return_results is False: raise ValueError( 'Results are neither saved to disk or being received when returned.\n' 'Specify out_dir (not None) or make return_results=True') if out_dir is not None and not pexists(out_dir): os.mkdir(out_dir) # no checks on subdivison size yet, as its not implemented return
def get_ratings_path_info(qcw): """Common routine to construct the same names""" ratings_dir = pjoin(qcw.out_dir, cfg.suffix_ratings_dir) if not pexists(ratings_dir): makedirs(ratings_dir) file_name_ratings = '{}_{}_{}'.format(qcw.vis_type, qcw.suffix, cfg.file_name_ratings) ratings_file = pjoin(ratings_dir, file_name_ratings) prev_ratings_backup = pjoin(ratings_dir, '{}_{}'.format(cfg.prefix_backup, file_name_ratings)) return ratings_file, prev_ratings_backup
def load_ratings_csv(prev_ratings_file): """Reads ratings from a CSV file into a dict. Format expected in each line: subject_id,ratings,notes """ if pexists(prev_ratings_file): csv_values = [line.strip().split(',') for line in open(prev_ratings_file).readlines()] ratings = {item[0]: item[1] for item in csv_values} notes = {item[0]: item[2] for item in csv_values} else: ratings = dict() notes = dict() return ratings, notes
def save_ratings_to_disk(ratings, notes, qcw): """Save ratings before closing shop.""" ratings_file, prev_ratings_backup = get_ratings_path_info(qcw) if pexists(ratings_file): copyfile(ratings_file, prev_ratings_backup) lines = '\n'.join( ['{},{},{}'.format(sid, rating, notes[sid]) for sid, rating in ratings.items()]) try: with open(ratings_file, 'w') as cf: cf.write(lines) except: raise IOError( 'Error in saving ratings to file!!\nBackup might be helpful at:\n\t{}'.format( prev_ratings_backup)) return
def load_image_from_disk(img_spec): """Vanilla image loader.""" if isinstance(img_spec, str): if pexists(realpath(img_spec)): hdr = nib.load(img_spec) # trying to stick to an orientation hdr = nib.as_closest_canonical(hdr) img = hdr.get_data() else: raise IOError('Given path to image does not exist!') elif isinstance(img_spec, np.ndarray): img = img_spec else: raise ValueError( 'Invalid input specified! ' 'Input either a path to image data, or provide 3d Matrix directly.' ) return img
def make_output_path_graph(out_dir, subject, str_prefixes): "Constructs path to save a multigraph to disk." if out_dir is not None: # get outpath returned from hiwenet, based on dist name and all other params # choose out_dir name based on dist name and all other parameters out_subject_dir = pjoin(out_dir, subject) if not pexists(out_subject_dir): os.mkdir(out_subject_dir) if isinstance(str_prefixes, str): str_prefixes = [ str_prefixes, ] out_file_name = '{}_graynet.graphml'.format('_'.join(str_prefixes)) out_weights_path = pjoin(out_subject_dir, out_file_name) else: out_weights_path = None return out_weights_path
def check_inputs_defacing(in_dir, defaced_name, mri_name, render_name, id_list_in): """Validates the integrity of the inputs""" in_dir = realpath(in_dir) if not pexists(in_dir): raise ValueError('user_dir does not exist : {}'.format(in_dir)) defaced_name = check_string_is_nonempty(defaced_name, 'defaced MRI scan') mri_name = check_string_is_nonempty(mri_name, 'original MRI scan') render_name = check_string_is_nonempty(render_name, '3D rendered image') if id_list_in is not None: if not pexists(id_list_in): raise IOError('Given ID list does not exist @ \n' ' {}'.format(id_list_in)) try: id_list = read_id_list(id_list_in) except: raise IOError('unable to read the ID list @ {}'.format(id_list_in)) else: # get all IDs in the given folder id_list = [folder for folder in os.listdir(in_dir) if os.path.isdir(pjoin(in_dir, folder))] required_files = {'original': mri_name, 'defaced': defaced_name} # 'render': render_name id_list_out = list() id_list_err = list() invalid_list = list() # this dict contains existing files for each ID # useful to open external programs like tkmedit images_for_id = dict() for subject_id in id_list: path_list = { img_type: pjoin(in_dir, subject_id, name) for img_type, name in required_files.items() } # finding all rendered screenshots import fnmatch rendered_images = fnmatch.filter(os.listdir(pjoin(in_dir, subject_id)), '{}*'.format(render_name)) rendered_images = [pjoin(in_dir, subject_id, img) for img in rendered_images] rendered_images = [ path for path in rendered_images if pexists(path) and os.path.getsize(path) > 0] invalid = [pfile for pfile in path_list.values() if not pexists(pfile) or os.path.getsize(pfile) <= 0] if len(invalid) > 0: id_list_err.append(subject_id) invalid_list.extend(invalid) else: id_list_out.append(subject_id) if len(rendered_images) < 1: raise ValueError( 'Atleast 1 non-empty rendered image is required!') path_list['render'] = rendered_images images_for_id[subject_id] = path_list if len(id_list_err) > 0: warnings.warn('The following subjects do NOT have all the required files, ' 'or some are empty - skipping them!') print('\n'.join(id_list_err)) print('\n\nThe following files do not exist or empty: \n {} \n\n'.format( '\n'.join(invalid_list))) if len(id_list_out) < 1: raise ValueError('All the subject IDs do not have the required files - ' 'unable to proceed.') print('{} subjects are usable for review.'.format(len(id_list_out))) return in_dir, np.array(id_list_out), images_for_id, \ defaced_name, mri_name, render_name