def get_training_shape(type='tuple', idx=False): workflow_config = json_to_dict('data/config.json') anat_model_config = json_to_dict( workflow_config['masking_config']['masking_config_anat'] ['model_config_path']) scale_size = anat_model_config['augmentation']['mlebe']['scale_size'] if idx: return scale_size[type] elif type == 'tuple': return '({}, {})'.format(scale_size[0], scale_size[1]) elif type == 'triple': return '({}, {}, {})'.format(scale_size[0], scale_size[1], scale_size[2]) else: return scale_size[0]
def print_dice(): reg_results_df = pd.read_csv('prepare/classifier/reg_results.csv') config = json_to_dict('data/config.json') uid = config['workflow_config']['uid'] dice_score = reg_results_df.loc[reg_results_df['uid'] == uid, 'anat_model_dice'].item() return np.round(float(dice_score), 3)
def get_masking_opts(masking_config_path: Optional[str], input_type: str): """Read the json config from the masking_config_path and fill the defaults with a schema.""" config = json_to_dict( masking_config_path)['masking_config'] if masking_config_path else {} return get_masking_opts_defaults( config, input_type)[f'masking_config_{input_type}']
def verify_config_paths(anat_config, func_config): assert os.path.exists( anat_config), 'model_config_path ' + anat_config + ' does not exist' assert os.path.exists( func_config), 'model_config_path ' + func_config + ' does not exist' for model_config in [anat_config, func_config]: model_config_dict = json_to_dict(model_config) # attempt to fix path (wrapper for old paths) if not os.path.exists( model_config_dict['model']['path_pre_trained_model']): new_path = model_config_dict['model'][ 'path_pre_trained_model'].replace('three_D/', '') if os.path.exists(new_path): write_to_jsonfile(model_config, [('model.path_pre_trained_model', new_path)]) model_config_dict = json_to_dict(model_config) assert os.path.exists( model_config_dict['model']['path_pre_trained_model']), 'model path ' + \ model_config_dict['model'][ 'path_pre_trained_model'] + ' does not exist'
def test_masker(): test_dir = os.path.join(os.path.dirname(mlebe.__file__), 'masking/tests/temp/') os.makedirs(test_dir, exist_ok=True) test_config = json_to_dict('configs/test_config.json') with open(os.path.join(test_dir, 'test_config.json'), 'w') as jsonfile: json.dump(test_config, jsonfile, indent=4) test_in_file = np.ones((63, 96, 48)) test_in_file = nib.Nifti1Image(test_in_file, np.eye(4)) test_in_file_dir = os.path.join(test_dir, 'test_in_file.nii.gz') nib.save(test_in_file, test_in_file_dir) _, _, _ = predict_mask(test_in_file_dir, os.path.join(test_dir, 'test_config.json')) shutil.rmtree(test_dir)
def prepare_config(json_config_path: Path, scratch_dir: Path, additional_args: list = None): mkdir(os.path.expanduser(scratch_dir)) config = json_to_dict(json_config_path) # copy the json configuration file to the scratch directory new_config_path = scratch_dir / 'config.json' verify_config_path(new_config_path) copyfile(json_config_path, new_config_path) workflow_uid = uuid.uuid4().hex parameters = [ ('workflow_config.uid', workflow_uid), *additional_args ] if additional_args else [('workflow_config.uid', workflow_uid)] # write workflow uid and model dice scores to new workflow config write_to_jsonfile(new_config_path, parameters) return config, workflow_uid
"""Creates a dataselection dataframe.""" import os import pandas as pd from mlebe.training.configs.utils import json_to_dict from make_config import CONFIG_PATH as config_path config = json_to_dict(config_path) data_dir = config['workflow_config']['data_path'] studies = ["drlfom", "mgtdbs", "opfvta", "ztau", "hendrik_nvcz", 'irsabi'] def make_dataselection_anat(data_dir, studies): data_selection = pd.DataFrame() for o in os.listdir(data_dir): if (not studies or o in studies) and not o.startswith('.') and not o.endswith( '.xz'): # i.e. if o in studies or if studies empty data_set = o for x in os.listdir(os.path.join(data_dir, o)): if (x.endswith('preprocessed') or x.startswith('preprocess') or x.endswith('preprocessing') ) and not x.endswith('work'): for root, dirs, files in os.walk( os.path.join(data_dir, o, x)): for file in files: if not file.startswith('.') and ( file.endswith("_T2w.nii.gz") or file.endswith("_T1w.nii.gz")):
def get_epochs(): # todo is this needed? reg_results_df = pd.read_csv('prepare/classifier/reg_results.csv') config = json_to_dict('data/config.json') uid = config['workflow_config']['uid'] dice_score = reg_results_df.loc[reg_results_df['uid'] == uid, 'anat_model_dice'].item()
model_results_table_anat = model_results_table.loc[model_results_table[ 'data_type'] == 'anat'].sort_values('Overall_Dice', ascending=False)[:nbr_tries] model_results_table_func = model_results_table.loc[model_results_table[ 'data_type'] == 'func'].sort_values('Overall_Dice', ascending=False)[:nbr_tries] config_dict_path = 'configs/noBiascorr_noCrop.json' scratch_dir = Path('~/.scratch/mlebe').expanduser() for index in range(nbr_tries): norby.send_msg(f'Starting workflow preparation nbr {index}.', add_loc_name=True) # load workflow results table workflow_results = pd.read_csv('classifier/reg_results.csv') # load config for the workflow config_dict = json_to_dict(config_dict_path) anat_model = model_results_table_anat.iloc[index]['config_path'] func_model = model_results_table_func.iloc[index]['config_path'] try: verify_config_paths(anat_model, func_model) except Exception as e: norby.send_msg(str(e)) continue anat_model_uid = model_results_table_anat.iloc[index]['uid'] func_model_uid = model_results_table_func.iloc[index]['uid'] # check if this combination of anat_model and func_model has not been tried before: if not ((workflow_results['anat_model_uid'] == anat_model) & ( workflow_results['func_model_uid'] == func_model)).any():