def random_sample_for_oai_inter_txt(txt_path,num_patient,num_pair_per_patient, output_path,mod,switcher): pair_list = read_txt_into_list(txt_path) num_per_m = int(num_patient/2) random.shuffle(pair_list) aug_pair_list = [] sampled_list = [] num_s = 0 for i in range(len(pair_list)): while num_s<num_per_m: pair = pair_list[i] if mod in pair[0]: sampled_list.append([pair[0],pair[2]]) sampled_list.append([pair[1],pair[3]]) num_s += 1 else: continue for i,sampled_source in enumerate(sampled_list): index = list(range(len(sampled_list))) index.remove(i) sampled_index = random.sample(index,num_pair_per_patient) for j in range(num_pair_per_patient): sampled_target = sampled_list[sampled_index[j]] aug_pair_list.append([sampled_source[0],sampled_target[0],sampled_source[1],sampled_target[1]]) aug_pair_list = [[pth.replace(*switcher) for pth in pths] for pths in aug_pair_list] write_list_into_txt(output_path, aug_pair_list)
def get_pair_txt_for_color_net(atlas_path,atlas_label_path,inv_warped_folder,inv_w_type,output_txt): """the image label path is not needed for training, we use atlas_label_path to meet input format""" inv_warped_file_list = glob(os.path.join(inv_warped_folder,inv_w_type)) pair_list = [] for file in inv_warped_file_list: pair_list.append([atlas_path,file,atlas_label_path,atlas_label_path]) write_list_into_txt(output_txt,pair_list)
def generate_moving_momentum_txt(pair_path_list_txt, momentum_path, output_path_txt_path, output_name_txt_path, pair_name_list_txt, affine_path): moving_target_dict = generate_moving_target_dict(pair_path_list_txt, pair_name_list_txt) moving_momentum_list = [] fname_list = [] for moving_name, item in moving_target_dict.items(): label_path = "None" if item['l_pth'] is None else item['l_pth'] fname_list.append([item["name"][0][1]] + [name[2] for name in item["name"]]) momentum_name_list = [ name[0] + "_0000_Momentum.nii.gz" for name in item["name"] ] momentum_path_list = [ os.path.join(momentum_path, momentum_name) for momentum_name in momentum_name_list ] affine_path_list = [] if affine_path is not None: affine_path_list = [ os.path.join(affine_path, name[0] + "_affine_param.npy") for name in item["name"] ] moving_momentum_list_tmp = [item['m_pth']] + [ label_path ] + momentum_path_list + affine_path_list moving_momentum_list.append(moving_momentum_list_tmp) write_list_into_txt(output_path_txt_path, moving_momentum_list) write_list_into_txt(output_name_txt_path, fname_list)
def get_pair_list_txt_by_line(file_txt, name_txt, output_path, pair_num_limit=-1, per_num_limit=-1): pair_list_path = os.path.join(output_path, "pair_path_list.txt") pair_name_path = os.path.join(output_path, "pair_name_list.txt") if not os.path.isfile(pair_list_path): img_label_list = read_img_label_into_list(file_txt) num_img = len(img_label_list) set_size_list = [ int(len(img_label) / 2) for img_label in img_label_list ] img_list = [[img_label_list[i][j] for j in range(set_size_list[i])] for i in range(num_img)] label_list = [[ img_label_list[i][j + set_size_list[i]] for j in range(set_size_list[i]) ] for i in range(num_img)] if name_txt is not None: fname_list = read_fname_list_from_pair_fname_txt(name_txt, detail=True) else: fname_list = None pair_list, pair_name_list = gen_intra_pair_list( img_list, fname_list, label_list, pair_num_limit, per_num_limit) write_list_into_txt(pair_list_path, pair_list) write_list_into_txt(pair_name_path, pair_name_list) else: print("the file {} has already exist, now read it".format( pair_list_path)) return pair_list_path, pair_name_path
def transfer_txt_file_to_altas_txt_file(txt_path, atlas_path,output_txt,atlas_label_path,sever_switcher=("","")): """we would remove the seg info here""" img_label_list = read_txt_into_list(txt_path) img_label_list =[[pth.replace(*sever_switcher) for pth in pths] for pths in img_label_list] img_atlas_list = [[img_label[0],atlas_path,img_label[1],atlas_label_path] for img_label in img_label_list] img_atlas_list += [[atlas_path,img_label[0],atlas_label_path,img_label[1]] for img_label in img_label_list] write_list_into_txt(output_txt, img_atlas_list)
def generate_atlas_set(original_txt_path,atlas_path,l_atlas_path, output_path,phase='train',test_phase_path_list=None, test_phase_l_path_list=None): if phase!="test": source_path_list,target_path_list,l_source_path_list, l_target_path_list=loading_img_list_from_files(original_txt_path) else: source_path_list =test_phase_path_list l_source_path_list = test_phase_l_path_list target_path_list = [] l_target_path_list = [] source_path_list =source_path_list+target_path_list file_num = len(source_path_list) l_source_path_list = l_source_path_list+l_target_path_list target_path_list = [atlas_path for _ in range(file_num)] l_target_path_list = [l_atlas_path for _ in range(file_num)] if l_source_path_list is not None and l_target_path_list is not None: assert len(source_path_list) == len(l_source_path_list) file_list = [[source_path_list[i], target_path_list[i],l_source_path_list[i],l_target_path_list[i]] for i in range(file_num)] else: file_list = [[source_path_list[i], target_path_list[i]] for i in range(file_num)] output_phase_path = os.path.join(output_path,phase) os.makedirs(output_phase_path,exist_ok=True) pair_txt_path = os.path.join(output_phase_path,'pair_path_list.txt') fn_txt_path = os.path.join(output_phase_path,'pair_name_list.txt') fname_list = [generate_pair_name([file_list[i][0],file_list[i][1]]) for i in range(file_num)] write_list_into_txt(pair_txt_path,file_list) write_list_into_txt(fn_txt_path,fname_list)
def get_pair_txt_for_oai_reg_net(train_txt_path,warped_folder,warped_type, num_train,output_txt): train_pair_list = read_txt_into_list(train_txt_path) warped_file_list = glob(os.path.join(warped_folder,warped_type)) name_set = [get_file_name(pair[0]).split("_")[0] for pair in train_pair_list] name_set = set(name_set) name_file_dict = {name:[] for name in name_set} extra_weight = 2 for pair in train_pair_list: fname = get_file_name(pair[0]).split("_")[0] for i in range(extra_weight): name_file_dict[fname].append(pair[0]) name_file_dict[fname].append(pair[1]) for file in warped_file_list: fname = get_file_name(file).split("_")[0] name_file_dict[fname].append(file) num_per_patient = int(num_train/len(name_set)) train_list = [] for name,values in name_file_dict.items(): num_sample = 0 while num_sample < num_per_patient: pair = random.sample(name_file_dict[name],2) if get_file_name(pair[0])==get_file_name(pair[1]) or get_file_name(pair[0]).split("_")[1]==get_file_name(pair[1]).split("_")[1]: continue else: train_list.append(pair) num_sample += 1 write_list_into_txt(output_txt, train_list)
def split_txt(input_txt,num_split, output_folder): os.makedirs(output_folder,exist_ok=True) pairs = read_txt_into_list(input_txt) output_splits = np.split(np.array(range(len(pairs))), num_split) output_splits = list(output_splits) for i in range(num_split): split = [pairs[ind] for ind in output_splits[i]] write_list_into_txt(os.path.join(output_folder, 'p{}.txt'.format(i)),split)
def get_test_file_for_brainstorm_color(test_path,transfer_path,output_txt): #atlas_image_9023193_image_test_iter_0_warped.nii.gz file_label_list = read_txt_into_list(test_path) file_list, label_list = [file[0] for file in file_label_list],[file[1] for file in file_label_list] f = lambda x: "atlas_image_"+x+"_test_iter_0_warped.nii.gz" new_file_list = [os.path.join(transfer_path,f(get_file_name(file))) for file in file_list] new_file_label_list = [[new_file_list[i],label_list[i]] for i in range(len(file_label_list))] write_list_into_txt(output_txt,new_file_label_list)
def random_sample_from_txt(txt_path,num, output_path,switcher): pair_list = read_txt_into_list(txt_path) if num>0: sampled_list_rand = random.sample(pair_list, num) sampled_list = [] for sample in sampled_list_rand: sampled_list.append([sample[0], sample[1], sample[2], sample[3]]) sampled_list.append([sample[1], sample[0], sample[3], sample[2]]) else: sampled_list = pair_list sampled_list = [[pth.replace(*switcher) for pth in pths] for pths in sampled_list] write_list_into_txt(output_path,sampled_list)
def compose_file_to_file_reg(source_txt, target_txt,output_txt,sever_switcher=('','')): source_img_label = read_txt_into_list(source_txt) target_img_label = read_txt_into_list(target_txt) num_s = len(source_img_label) num_t = len(target_img_label) pair = [] for i in range(num_s): for j in range(num_t): line = [source_img_label[i][0],target_img_label[j][0],source_img_label[i][1],target_img_label[j][1]] line =[item.replace(*sever_switcher) for item in line] pair.append(line) write_list_into_txt(output_txt,pair)
def get_img_label_txt_file(path, ftype,switcher, output_txt=None): import subprocess f_pth = os.path.join(path,ftype) file_list = glob(f_pth,recursive=True) file_list = [[f, f.replace(*switcher)] for f in file_list] for pair in file_list: if not os.path.isfile(pair[0]): print(pair[0]) if not os.path.isfile(pair[1]): print(pair[1]) cmd = "rm {}".format(pair[0]) process = subprocess.Popen(cmd, shell=True) process.wait() write_list_into_txt(output_txt, file_list)
def split_txt(input_txt, num_split, output_folder, sub_fname="p"): import math os.makedirs(output_folder, exist_ok=True) pairs = read_img_label_into_list(input_txt) if len(pairs): if isinstance(pairs[0], list): pairs = [["None" if item is None else item for item in pair] for pair in pairs] else: pairs = ["None" if item is None else item for item in pairs] num_pairs = len(pairs) chunk_size = max(math.ceil(num_pairs / num_split), 1) index = list(range(0, num_pairs, chunk_size)) for i, ind in enumerate(index): split = pairs[ind:ind + chunk_size] write_list_into_txt( os.path.join(output_folder, '{}{}.txt'.format(sub_fname, i)), split) return len(index)
def get_input_file(refer_folder, output_txt): source_image_path_list = glob.glob( os.path.join(refer_folder, "**", "*EXP*img*")) source_label_path_list = [ path.replace("_img.nii.gz", "_label.nii.gz") for path in source_image_path_list ] target_image_path_list = [ path.replace("_EXP_", "_INSP_") for path in source_image_path_list ] target_label_path_list = [ path.replace("_img.nii.gz", "_label.nii.gz") for path in target_image_path_list ] num_file = len(source_image_path_list) file_list = [[ source_image_path_list[i], target_image_path_list[i], source_label_path_list[i], target_label_path_list[i] ] for i in range(num_file)] write_list_into_txt(output_txt, file_list)
def init_test_env(setting_path, output_path, file_list, fname_list): """ create test environment, the file list would be saved into output_path/reg/test/file_path_list.txt, a corresponding auto-parsed filename list would also be saved in output/path/reg/test/file_name_list.txt :param setting_path: the path to load 'cur_task_setting.json' and 'cur_data_setting.json' (optional if the related settings are in cur_task_setting) :param output_path: the output path of the task :param image_path_list: the image list, each item refers to the abstract path of the image :param l_path_list:optional, the label of image list, each item refers to the abstract path of the image :return: tuple of ParameterDict, datapro (optional) and tsk_set """ dm_json_path = os.path.join(setting_path, 'cur_data_setting.json') tsm_json_path = os.path.join(setting_path, 'cur_task_setting.json') assert os.path.isfile(tsm_json_path), "task setting not exists" dm = DataTask('task_reg', dm_json_path) if os.path.isfile(dm_json_path) else None tsm = ModelTask('task_reg', tsm_json_path) file_num = len(file_list) os.makedirs(os.path.join(output_path, 'seg/test'), exist_ok=True) os.makedirs(os.path.join(output_path, 'seg/res'), exist_ok=True) file_txt_path = os.path.join(output_path, 'seg/test/file_path_list.txt') fn_txt_path = os.path.join(output_path, 'seg/test/file_name_list.txt') has_label = len(file_list[0]) == 2 if fname_list is None: if has_label: fname_list = [ get_file_name(file_list[i][0]) for i in range(file_num) ] else: fname_list = [get_file_name(file_list[i]) for i in range(file_num)] write_list_into_txt(file_txt_path, file_list) write_list_into_txt(fn_txt_path, fname_list) data_task_name = 'seg' cur_task_name = 'res' if dm is not None: dm.data_par['datapro']['dataset']['output_path'] = output_path dm.data_par['datapro']['dataset']['task_name'] = data_task_name tsm.task_par['tsk_set']['task_name'] = cur_task_name tsm.task_par['tsk_set']['output_root_path'] = os.path.join( output_path, data_task_name) return dm, tsm
def generate_file_for_xu(): folder_path = "/playpen-raid1/xhs400/Research/data/r21/data/ct-cbct/images/" paths = glob(os.path.join(folder_path,"**","image_normalized.nii.gz"),recursive=True) outpath="/playpen-raid1/zyshen/debug/xu/" f = lambda x: "_OG" in x og_paths = list(filter(f,paths)) em_paths = [og_path.replace("_OG","_EM") for og_path in og_paths] sm_paths = [og_path.replace("_OG","_SM") for og_path in og_paths] og_em_name = [[path1.split("/")[-4]+"_"+path1.split("/")[-2],path2.split("/")[-4]+"_"+path2.split("/")[-2]] for path1, path2 in zip(og_paths,em_paths)] og_sm_name = [[path1.split("/")[-4]+"_"+path1.split("/")[-2],path2.split("/")[-4]+"_"+path2.split("/")[-2]] for path1, path2 in zip(og_paths,sm_paths)] og_l_paths = [og_path.replace("image_normalized.nii.gz","SmBowel_label.nii.gz") for og_path in og_paths] em_l_paths =[og_path.replace("image_normalized.nii.gz","SmBowel_label.nii.gz") for og_path in em_paths] sm_l_paths =[og_path.replace("image_normalized.nii.gz","SmBowel_label.nii.gz") for og_path in sm_paths] pair_path_list = [[og_path,em_path,og_l_path,em_l_path] for og_path,em_path,og_l_path,em_l_path in zip(og_paths,em_paths,og_l_paths,em_l_paths)] pair_path_list += [[og_path,sm_path,og_l_path,sm_l_path] for og_path,sm_path,og_l_path,sm_l_path in zip(og_paths,sm_paths,og_l_paths,sm_l_paths)] fname_list = og_em_name + og_sm_name os.makedirs(outpath,exist_ok=True) pair_outpath = os.path.join(outpath,"source_target_set.txt") fname_outpath = os.path.join(outpath,"source_target_name.txt") write_list_into_txt(pair_outpath,pair_path_list) write_list_into_txt(fname_outpath,fname_list)
def init_env(output_path, source_path_list, target_path_list, l_source_path_list=None, l_target_path_list=None): """ :param task_full_path: the path of a completed task :param source_path: path of the source image :param target_path: path of the target image :param l_source: path of the label of the source image :param l_target: path of the label of the target image :return: None """ file_num = len(source_path_list) assert len(source_path_list) == len(target_path_list) if l_source_path_list is not None and l_target_path_list is not None: assert len(source_path_list) == len(l_source_path_list) file_list = [[ source_path_list[i], target_path_list[i], l_source_path_list[i], l_target_path_list[i] ] for i in range(file_num)] else: file_list = [[source_path_list[i], target_path_list[i]] for i in range(file_num)] os.makedirs(os.path.join(output_path, 'reg/test'), exist_ok=True) os.makedirs(os.path.join(output_path, 'reg/res'), exist_ok=True) pair_txt_path = os.path.join(output_path, 'reg/test/pair_path_list.txt') fn_txt_path = os.path.join(output_path, 'reg/test/pair_name_list.txt') fname_list = [ generate_pair_name([file_list[i][0], file_list[i][1]]) for i in range(file_num) ] write_list_into_txt(pair_txt_path, file_list) write_list_into_txt(fn_txt_path, fname_list) root_path = output_path data_task_name = 'reg' cur_task_name = 'res' return root_path
def get_pair_list_txt_by_file(file_txt, name_txt, output_path, pair_num_limit=-1, per_num_limit=-1): pair_list_path = os.path.join(output_path, "pair_path_list.txt") pair_name_path = os.path.join(output_path, "pair_name_list.txt") if not os.path.isfile(pair_list_path): img_label_list = read_img_label_into_list(file_txt) num_image = len(img_label_list) img_list = [img_label_list[i][0] for i in range(num_image)] label_list = [img_label_list[i][1] for i in range(num_image)] if name_txt is not None: fname_list = read_fname_list_from_pair_fname_txt(name_txt) else: fname_list = None pair_list, pair_name_list = gen_inter_pair_list( img_list, fname_list, label_list, pair_num_limit, per_num_limit) write_list_into_txt(pair_list_path, pair_list) write_list_into_txt(pair_name_path, pair_name_list) else: print("the file {} has already exist, now read it".format( pair_list_path)) return pair_list_path, pair_name_path
def get_txt_file(path, ftype, output_txt): f_pth = os.path.join(path,"**",ftype) file_list = glob(f_pth,recursive=True) file_list = [[f] for f in file_list] write_list_into_txt(output_txt, file_list)
def get_file_txt_from_pair_txt(txt_path, output_path): pair_list = read_txt_into_list(txt_path) file_list = [[pair[0], pair[2]] for pair in pair_list] file_list += [[pair[1], pair[3]] for pair in pair_list] write_list_into_txt(output_path,file_list)
def init_test_env(setting_path, output_path, registration_pair_list, pair_name_list=None): """ create test environment, the pair list would be saved into output_path/reg/test/pair_path_list.txt, a corresponding auto-parsed filename list would also be saved in output/path/reg/test/pair_name_list.txt :param setting_path: the path to load 'cur_task_setting.json' and 'cur_data_setting.json' (optional if the related settings are in cur_task_setting) :param output_path: the output path of the task :param registration_pair_list: including source_path_list, target_path_list, l_source_path_list, l_target_path_list :return: tuple of ParameterDict, datapro (optional) and tsk_set """ source_path_list, target_path_list, l_source_path_list, l_target_path_list = registration_pair_list dm_json_path = os.path.join(setting_path, 'cur_data_setting.json') tsm_json_path = os.path.join(setting_path, 'cur_task_setting.json') assert os.path.isfile(tsm_json_path), "task setting {} not exists".format( tsm_json_path) dm = DataTask('task_reg', dm_json_path) if os.path.isfile(dm_json_path) else None tsm = ModelTask('task_reg', tsm_json_path) file_num = len(source_path_list) if l_source_path_list is not None and l_target_path_list is not None: file_list = [[ source_path_list[i], target_path_list[i], l_source_path_list[i], l_target_path_list[i] ] for i in range(file_num)] else: file_list = [[source_path_list[i], target_path_list[i]] for i in range(file_num)] os.makedirs(os.path.join(output_path, 'reg/test'), exist_ok=True) os.makedirs(os.path.join(output_path, 'reg/res'), exist_ok=True) pair_txt_path = os.path.join(output_path, 'reg/test/pair_path_list.txt') fn_txt_path = os.path.join(output_path, 'reg/test/pair_name_list.txt') if pair_name_list is None: pair_name_list = [ generate_pair_name([file_list[i][0], file_list[i][1]], detail=True) for i in range(file_num) ] write_list_into_txt(pair_txt_path, file_list) write_list_into_txt(fn_txt_path, pair_name_list) data_task_name = 'reg' cur_task_name = 'res' if dm is not None: dm.data_par['datapro']['dataset']['output_path'] = output_path dm.data_par['datapro']['dataset']['task_name'] = data_task_name tsm.task_par['tsk_set']['task_name'] = cur_task_name tsm.task_par['tsk_set']['output_root_path'] = os.path.join( output_path, data_task_name) if tsm.task_par['tsk_set']['model'] == 'reg_net': tsm.task_par['tsk_set']['reg']['mermaid_net'][ 'mermaid_net_json_pth'] = os.path.join( setting_path, 'mermaid_nonp_settings.json') if tsm.task_par['tsk_set']['model'] == 'mermaid_iter': tsm.task_par['tsk_set']['reg']['mermaid_iter'][ 'mermaid_affine_json'] = os.path.join( setting_path, 'mermaid_affine_settings.json') tsm.task_par['tsk_set']['reg']['mermaid_iter'][ 'mermaid_nonp_json'] = os.path.join(setting_path, 'mermaid_nonp_settings.json') return dm, tsm
train_aug_output_path = "/playpen-raid1/zyshen/data/brain_35/non_resize/data_aug_train" train_aug_output_full_path = train_aug_output_path + "/aug" output_folder = "/playpen-raid1/zyshen/data/brain_35/non_resize/seg_aug_train_k2/train" os.makedirs(output_folder, exist_ok=True) output_path = os.path.join(output_folder, "file_path_list.txt") train_aug_img_list = get_file_list(train_aug_output_full_path, "*_image.nii.gz") train_aug_label_list = [ path.replace("_image.nii.gz", "_label.nii.gz") for path in train_aug_img_list ] img_label_path_list = [[ img_path, label_path ] for img_path, label_path in zip(train_aug_img_list, train_aug_label_list)] write_list_into_txt(output_path, img_label_path_list) # # # train_aug_output_path = "/playpen-raid1/zyshen/data/brain_35/non_resize/data_aug_train_random" # train_aug_output_full_path = train_aug_output_path+"/aug" # output_folder = "/playpen-raid1/zyshen/data/brain_35/non_resize/seg_aug_train_random/train" # os.makedirs(output_folder,exist_ok=True) # output_path = os.path.join(output_folder,"file_path_list.txt") # train_aug_img_list = get_file_list(train_aug_output_full_path,"*_image.nii.gz") # train_aug_label_list = [path.replace("_image.nii.gz","_label.nii.gz") for path in train_aug_img_list] # img_label_path_list = [[img_path, label_path] for img_path, label_path in zip(train_aug_img_list,train_aug_label_list)] # write_list_into_txt(output_path,img_label_path_list) # # # # train_aug_output_path = "/playpen-raid1/zyshen/data/brain_35/non_resize/data_aug_train_bspline" # train_aug_output_full_path = train_aug_output_path+"/aug"
output_path=out_path_list, fixed_sz=desired_sz) indexes = list(range(len(raw_path_list))) number_of_workers = 8 boundary_list = [] config = dict() index_partitions = np.array_split(indexes, number_of_workers) if process_img: with Pool(processes=number_of_workers) as pool: pool.map(f, index_partitions) if generate_txt: source_list = [f['source'] for f in out_path_list] target_list = [f['target'] for f in out_path_list] lsource_list = [f['lsource'] for f in out_path_list] ltarget_list = [f['ltarget'] for f in out_path_list] file_num = len(source_list) file_list = [[ source_list[i], target_list[i], lsource_list[i], ltarget_list[i] ] for i in range(file_num)] os.makedirs(os.path.join(output_txt_path, 'reg/test'), exist_ok=True) os.makedirs(os.path.join(output_txt_path, 'reg/res'), exist_ok=True) pair_txt_path = os.path.join(output_txt_path, 'reg/test/pair_path_list.txt') fn_txt_path = os.path.join(output_txt_path, 'reg/test/pair_name_list.txt') fname_list = [ generate_pair_name([file_list[i][0], file_list[i][1]]) for i in range(file_num) ] write_list_into_txt(pair_txt_path, file_list) write_list_into_txt(fn_txt_path, fname_list)
test_path_list = read_txt_into_list(test_file_path) train_name_list = read_txt_into_list(train_name_path) test_name_list = read_txt_into_list(test_name_path) test_img_path_list = [path[0] for path in test_path_list] test_label_path_list = [path[1] for path in test_path_list] if isinstance(train_path_list[0],list): train_img_path_list = [path[0] for path in train_path_list] train_label_path_list = [path[1] for path in train_path_list] else: train_img_path_list = train_path_list train_label_path_list = None img_pair_list, pair_name_list = gen_post_aug_pair_list(test_img_path_list,train_img_path_list, test_fname_list=test_name_list,train_fname_list=train_name_list, test_label_path_list=test_label_path_list,train_label_path_list=train_label_path_list, pair_num_limit=-1, per_num_limit=5) pair_name_list = [pair_name[1:] for pair_name in pair_name_list] write_list_into_txt(output_file_path,img_pair_list) write_list_into_txt(output_name_path,pair_name_list) train_aug_output_path = "/playpen-raid1/zyshen/data/brain_35/corrected/data_aug_train" train_aug_output_full_path = train_aug_output_path+"/aug" output_folder = "/playpen-raid1/zyshen/data/brain_35/corrected/seg_aug_train_k2/train" os.makedirs(output_folder,exist_ok=True) output_path = os.path.join(output_folder,"file_path_list.txt") train_aug_img_list = get_file_list(train_aug_output_full_path,"*_image.nii.gz") train_aug_label_list = [path.replace("_image.nii.gz","_label.nii.gz") for path in train_aug_img_list] img_label_path_list = [[img_path, label_path] for img_path, label_path in zip(train_aug_img_list,train_aug_label_list)] write_list_into_txt(output_path,img_label_path_list) # train_aug_output_path = "/playpen-raid1/zyshen/data/brain_35/corrected/data_aug_train_random"
def remove_label_info(pair_path_txt,output_txt): pair_list = read_txt_into_list(pair_path_txt) pair_remove_label = [[pair[0],pair[1]] for pair in pair_list] write_list_into_txt(output_txt, pair_remove_label)