learning_rate = opt.lr #0.0001 num_channels = 15 piece_map = {} piece_map['1_1_1'] = [0, 96, 0, 96, 0, 60] train_source_dir = os.path.join(train_data_dir, 'source') train_target_dir = os.path.join(train_data_dir, 'target') test_source_dir = os.path.join(test_data_dir, 'source') working_dir = os.path.join(working_root_dir, piece) # define paths out = os.path.join(working_dir, 'finetune_out') mkdir(out) train_source_subs, train_source_files = subl.get_sub_list(train_source_dir) train_target_subs, train_target_files = subl.get_sub_list(train_target_dir) train_dict = {} train_dict['source_subs'] = train_source_subs train_dict['source_files'] = train_source_files train_dict['target_subs'] = train_target_subs train_dict['target_files'] = train_target_files test_source_subs, test_source_files = subl.get_sub_list(test_source_dir) test_dict = {} test_dict['source_subs'] = test_source_subs test_dict['source_files'] = test_source_files # load image train_set = torchsrc.imgloaders.pytorch_loader_allpiece( train_dict, num_channels=num_channels, piece=piece, piece_map=piece_map)
# train_dict = {} # train_dict['img_subs'] = train_img_subs # train_dict['img_files'] = train_img_files # train_dict['seg_subs'] = train_seg_subs # train_dict['seg_files'] = train_seg_files else: out = os.path.join(working_dir, 'test_out') mkdir(out) # train_img_subs, train_img_files, train_seg_subs, train_seg_files = subl.get_sub_from_txt(train_list_file, trainnii_list_file, label_list_file, labelnii_list_file) #train_dict = {} #train_dict['img_subs'] = train_img_subs #train_dict['img_files'] = train_img_files #train_dict['seg_subs'] = train_seg_subs #train_dict['seg_files'] = train_seg_files test_img_subs, test_img_files = subl.get_sub_list(test_img_dir) test_dict = {} test_dict['img_subs'] = test_img_subs test_dict['img_files'] = test_img_files # load image #train_set = torchsrc.imgloaders.pytorch_loader_allpiece(train_dict, num_labels=lmk_num, piece=piece, piece_map=piece_map) #train_loader = torch.utils.data.DataLoader(train_set, batch_size=batch_size, shuffle=True, num_workers=1) test_set = torchsrc.imgloaders.pytorch_loader_allpiece(test_dict, num_labels=lmk_num, piece=piece, piece_map=piece_map) test_loader = torch.utils.data.DataLoader(test_set, batch_size=batch_size, shuffle=False, num_workers=1)
piece_map['2_2_1'] = [38, 134, 46, 174, 0, 88] piece_map['2_2_3'] = [38, 134, 46, 174, 68, 156] # define paths train_list_file = '/share4/huoy1/Deep_5000_Brain/sublist/sublist_mni.txt' working_dir = os.path.join('/share4/huoy1/Deep_5000_Brain/working_dir/', piece) test_img_dir = '/share4/huoy1/Deep_5000_Brain/testing/mni/T1' finetune_img_dir = '/share4/huoy1/Deep_5000_Brain/finetune_training/aladin-reg-images-normalized' finetune_seg_dir = '/share4/huoy1/Deep_5000_Brain/finetune_training/aladin-reg-labels/' # make img list if finetune == True: out = os.path.join(working_dir, 'finetune_out') mkdir(out) train_img_subs, train_img_files = subl.get_sub_list(finetune_img_dir) train_seg_subs, train_seg_files = subl.get_sub_list(finetune_seg_dir) train_dict = {} train_dict['img_subs'] = train_img_subs train_dict['img_files'] = train_img_files train_dict['seg_subs'] = train_seg_subs train_dict['seg_files'] = train_seg_files else: out = os.path.join(working_dir, 'test_out') mkdir(out) train_img_subs, train_img_files, train_seg_subs, train_seg_files = subl.get_sub_from_txt( train_list_file) train_dict = {} train_dict['img_subs'] = train_img_subs train_dict['img_files'] = train_img_files train_dict['seg_subs'] = train_seg_subs