def main(): if(len(sys.argv) < 3): print('Number of arguments should be 3. e.g.') print(' python coplenet_run.py test config.cfg') exit() cfg_file = str(sys.argv[1]) config = parse_config(cfg_file) stage = str(sys.argv[1]) cfg_file = str(sys.argv[2]) config = parse_config(cfg_file) # use custormized CNN and loss function agent = NetRunAgent(config, stage) agent.set_network_dict(my_net_dict) agent.run()
def main(): if (len(sys.argv) < 3): print('Number of arguments should be 3. e.g.') print(' python train_infer.py train config.cfg') exit() stage = str(sys.argv[1]) cfg_file = str(sys.argv[2]) config = parse_config(cfg_file) agent = SegmentationAgent(config, stage) agent.run()
def main(): if(len(sys.argv) < 3): print('Number of arguments should be 3. e.g.') print(' python coplenet_run.py test config.cfg') exit() cfg_file = str(sys.argv[1]) config = parse_config(cfg_file) stage = str(sys.argv[1]) cfg_file = str(sys.argv[2]) config = parse_config(cfg_file) # use custormized CNN and loss function agent = SegmentationAgent(config, stage) net_name = config['network']['net_type'] if(net_name in net_dict): net = net_dict[net_name](config['network']) agent.set_network(net) agent.run() else: raise ValueError("undefined network {0:}".format(net_name))
def get_transform_list(trans_config_file): config = parse_config(trans_config_file) transform_list = [] transform_param = config['dataset'] transform_param['task'] = 'segmentation' transform_names = config['dataset']['transform'] for name in transform_names: print(name) if (name not in TransformDict): raise (ValueError("Undefined transform {0:}".format(name))) one_transform = TransformDict[name](transform_param) transform_list.append(one_transform) return transform_list
def main(): if (len(sys.argv) < 3): print('Number of arguments should be 3. e.g.') print(' python net_run_jsrt.py train config.cfg') exit() stage = str(sys.argv[1]) cfg_file = str(sys.argv[2]) config = parse_config(cfg_file) agent = SegmentationAgent(config, stage) # use custormized CNN and loss function mynet = MyUNet2D(config['network']) agent.set_network(mynet) agent.set_loss_dict(loss_dict) agent.run()
def evaluation(config_file): config = parse_config(config_file)['evaluation'] metric = config['metric'] gt_csv = config['ground_truth_csv'] lab_csv = config['prediction_csv'] gt_items = pd.read_csv(gt_csv) lab_items = pd.read_csv(lab_csv) assert (len(gt_items) == len(lab_items)) for i in range(len(gt_items)): assert (gt_items.iloc[i, 0] == lab_items.iloc[i, 0]) gt_data = np.asarray(gt_items.iloc[:, -1]) lab_data = np.asarray(lab_items.iloc[:, -1]) correct_pred = gt_data == lab_data acc = (correct_pred.sum() + 0.0) / len(gt_items) print("accuracy {}".format(acc))
def main(): if (len(sys.argv) < 3): print('Number of arguments should be 3. e.g.') print(' pymic_net_run train config.cfg') exit() stage = str(sys.argv[1]) cfg_file = str(sys.argv[2]) config = parse_config(cfg_file) task = config['dataset']['task_type'] assert task in ['cls', 'cls_nexcl', 'seg'] if (task == 'cls' or task == 'cls_nexcl'): agent = ClassificationAgent(config, stage) else: agent = SegmentationAgent(config, stage) agent.run()
def main(): if (len(sys.argv) < 2): print('Number of arguments should be 2. e.g.') print(' python net_run.py config.cfg') exit() cfg_file = str(sys.argv[1]) config = parse_config(cfg_file) # parameters of COPLENet net_param = { "class_num": 2, "in_chns": 1, "bilinear": True, "feature_chns": [32, 64, 128, 256, 512], "dropout": [0.0, 0.0, 0.3, 0.4, 0.5] } config['network'] = net_param net = COPLENet(net_param) agent = TrainInferAgent(config, 'test') agent.set_network(net) agent.run()
def main(): if (len(sys.argv) < 3): print('Number of arguments should be 3. e.g.') print(' python train_infer.py train config.cfg') exit() stage = str(sys.argv[1]) cfg_file = str(sys.argv[2]) config = parse_config(cfg_file) # use custormized CNN net_param = { 'in_chns': 1, 'feature_chns': [4, 16, 32, 64, 128], 'dropout': [0, 0, 0.3, 0.4, 0.5], 'class_num': 2, 'bilinear': True } config['network'] = net_param net = MyUNet2D(net_param) agent = TrainInferAgent(config, stage) agent.set_network(net) agent.run()
for c in range(0, class_num): temp_prob = prob[c] prob_save_name = "{0:}_prob_{1:}.{2:}".format( save_prefix, c, save_format) save_nd_array_as_image(temp_prob, prob_save_name, root_dir + '/' + names[0]) avg_time = (time.time() - start_time) / len(self.test_loder) print("average testing time {0:}".format(avg_time)) def run(self): agent.__create_dataset() agent.__create_network() if (self.stage == 'train'): self.__train() else: self.__infer() if __name__ == "__main__": if (len(sys.argv) < 3): print('Number of arguments should be 3. e.g.') print(sys.argv) print(' python train_infer.py train config.cfg') exit() stage = str(sys.argv[1]) cfg_file = str(sys.argv[2]) config = parse_config(cfg_file) agent = TrainInferAgent(config, stage) agent.run()
def evaluation(config_file): config = parse_config(config_file)['evaluation'] metric = config['metric'] labels = config['label_list'] organ_name = config['organ_name'] gt_root = config['ground_truth_folder_root'] seg_root = config['segmentation_folder_root'] if(not(isinstance(seg_root, tuple) or isinstance(seg_root, list))): seg_root = [seg_root] image_pair_csv = config['evaluation_image_pair'] ground_truth_label_convert_source = config.get('ground_truth_label_convert_source', None) ground_truth_label_convert_target = config.get('ground_truth_label_convert_target', None) segmentation_label_convert_source = config.get('segmentation_label_convert_source', None) segmentation_label_convert_target = config.get('segmentation_label_convert_target', None) image_items = pd.read_csv(image_pair_csv) item_num = len(image_items) for seg_root_n in seg_root: score_all_data = [] name_score_list= [] for i in range(item_num): gt_name = image_items.iloc[i, 0] seg_name = image_items.iloc[i, 1] gt_full_name = gt_root + '/' + gt_name seg_full_name = seg_root_n + '/' + seg_name s_dict = load_image_as_nd_array(seg_full_name) g_dict = load_image_as_nd_array(gt_full_name) s_volume = s_dict["data_array"]; s_spacing = s_dict["spacing"] g_volume = g_dict["data_array"]; g_spacing = g_dict["spacing"] # for dim in range(len(s_spacing)): # assert(s_spacing[dim] == g_spacing[dim]) if((ground_truth_label_convert_source is not None) and \ ground_truth_label_convert_target is not None): g_volume = convert_label(g_volume, ground_truth_label_convert_source, \ ground_truth_label_convert_target) if((segmentation_label_convert_source is not None) and \ segmentation_label_convert_target is not None): s_volume = convert_label(s_volume, segmentation_label_convert_source, \ segmentation_label_convert_target) # fuse multiple labels s_volume_sub = np.zeros_like(s_volume) g_volume_sub = np.zeros_like(g_volume) for lab in labels: s_volume_sub = s_volume_sub + np.asarray(s_volume == lab, np.uint8) g_volume_sub = g_volume_sub + np.asarray(g_volume == lab, np.uint8) # get evaluation score temp_score = get_evaluation_score(s_volume_sub > 0, g_volume_sub > 0, s_spacing, metric) score_all_data.append(temp_score) name_score_list.append([seg_name, temp_score]) print(seg_name, temp_score) score_all_data = np.asarray(score_all_data) score_mean = [score_all_data.mean(axis = 0)] score_std = [score_all_data.std(axis = 0)] # save the result as csv and txt np.savetxt("{0:}/{1:}_{2:}_mean.txt".format(seg_root_n, organ_name, metric), score_mean) np.savetxt("{0:}/{1:}_{2:}_std.txt".format(seg_root_n, organ_name, metric), score_std) score_csv = "{0:}/{1:}_{2:}_all.csv".format(seg_root_n, organ_name, metric) with open(score_csv, mode='w') as csv_file: csv_writer = csv.writer(csv_file, delimiter=',', quotechar='"',quoting=csv.QUOTE_MINIMAL) csv_writer.writerow(['image', metric]) for item in name_score_list: csv_writer.writerow(item) print(seg_root_n) print("{0:} mean ".format(metric), score_mean) print("{0:} std ".format(metric), score_std)
def evaluation(config_file): config = parse_config(config_file)['evaluation'] metric = config['metric'] labels = config['label_list'] organ_name = config['organ_name'] ground_truth_label_convert_source = config.get( 'ground_truth_label_convert_source', None) ground_truth_label_convert_target = config.get( 'ground_truth_label_convert_target', None) segmentation_label_convert_source = config.get( 'segmentation_label_convert_source', None) segmentation_label_convert_target = config.get( 'segmentation_label_convert_target', None) s_folder_list = config['segmentation_folder_list'] g_folder_list = config['ground_truth_folder_list'] s_format = config['segmentation_format'] g_format = config['ground_truth_format'] s_postfix = config.get('segmentation_postfix', None) g_postfix = config.get('ground_truth_postfix', None) s_postfix_long = '.' + s_format if (s_postfix is not None): s_postfix_long = '_' + s_postfix + s_postfix_long g_postfix_long = '.' + g_format if (g_postfix is not None): g_postfix_long = '_' + g_postfix + g_postfix_long patient_names_file = config['patient_file_names'] with open(patient_names_file) as f: content = f.readlines() patient_names = [x.strip() for x in content] score_all_data = [] for i in range(len(patient_names)): # load segmentation and ground truth for s_folder in s_folder_list: s_name = os.path.join(s_folder, patient_names[i] + s_postfix_long) if (os.path.isfile(s_name)): break for g_folder in g_folder_list: g_name = os.path.join(g_folder, patient_names[i] + g_postfix_long) if (os.path.isfile(g_name)): break s_dict = load_image_as_nd_array(s_name) g_dict = load_image_as_nd_array(g_name) s_volume = s_dict["data_array"] s_spacing = s_dict["spacing"] g_volume = g_dict["data_array"] g_spacing = g_dict["spacing"] # for dim in range(len(s_spacing)): # assert(s_spacing[dim] == g_spacing[dim]) if((ground_truth_label_convert_source is not None) and \ ground_truth_label_convert_target is not None): g_volume = convert_label(g_volume, ground_truth_label_convert_source, \ ground_truth_label_convert_target) if((segmentation_label_convert_source is not None) and \ segmentation_label_convert_target is not None): s_volume = convert_label(s_volume, segmentation_label_convert_source, \ segmentation_label_convert_target) # fuse multiple labels s_volume_sub = np.zeros_like(s_volume) g_volume_sub = np.zeros_like(g_volume) for lab in labels: s_volume_sub = s_volume_sub + np.asarray(s_volume == lab, np.uint8) g_volume_sub = g_volume_sub + np.asarray(g_volume == lab, np.uint8) # get evaluation score temp_score = get_evaluation_score(s_volume_sub > 0, g_volume_sub > 0, g_spacing, metric) score_all_data.append(temp_score) print(patient_names[i], temp_score) score_all_data = np.asarray(score_all_data) score_mean = [score_all_data.mean(axis=0)] score_std = [score_all_data.std(axis=0)] np.savetxt("{0:}/{1:}_{2:}_all.txt".format(s_folder, organ_name, metric), score_all_data) np.savetxt("{0:}/{1:}_{2:}_mean.txt".format(s_folder, organ_name, metric), score_mean) np.savetxt("{0:}/{1:}_{2:}_std.txt".format(s_folder, organ_name, metric), score_std) print("{0:} mean ".format(metric), score_mean) print("{0:} std ".format(metric), score_std)
def evaluation(config_file): config = parse_config(config_file)['evaluation'] metric = config['metric'] labels = config['label_list'] organ_name = config['organ_name'] gt_root = config['ground_truth_folder_root'] seg_root = config['segmentation_folder_root'] image_pair_csv = config['evaluation_image_pair'] ground_truth_label_convert_source = config.get( 'ground_truth_label_convert_source', None) ground_truth_label_convert_target = config.get( 'ground_truth_label_convert_target', None) segmentation_label_convert_source = config.get( 'segmentation_label_convert_source', None) segmentation_label_convert_target = config.get( 'segmentation_label_convert_target', None) image_items = pd.read_csv(image_pair_csv) item_num = len(image_items) score_all_data = [] for i in range(item_num): gt_name = image_items.iloc[i, 0] seg_name = image_items.iloc[i, 1] gt_full_name = gt_root + '/' + gt_name seg_full_name = seg_root + '/' + seg_name s_dict = load_image_as_nd_array(seg_full_name) g_dict = load_image_as_nd_array(gt_full_name) s_volume = s_dict["data_array"] s_spacing = s_dict["spacing"] g_volume = g_dict["data_array"] g_spacing = g_dict["spacing"] # for dim in range(len(s_spacing)): # assert(s_spacing[dim] == g_spacing[dim]) if((ground_truth_label_convert_source is not None) and \ ground_truth_label_convert_target is not None): g_volume = convert_label(g_volume, ground_truth_label_convert_source, \ ground_truth_label_convert_target) if((segmentation_label_convert_source is not None) and \ segmentation_label_convert_target is not None): s_volume = convert_label(s_volume, segmentation_label_convert_source, \ segmentation_label_convert_target) # fuse multiple labels s_volume_sub = np.zeros_like(s_volume) g_volume_sub = np.zeros_like(g_volume) for lab in labels: s_volume_sub = s_volume_sub + np.asarray(s_volume == lab, np.uint8) g_volume_sub = g_volume_sub + np.asarray(g_volume == lab, np.uint8) # get evaluation score temp_score = get_evaluation_score(s_volume_sub > 0, g_volume_sub > 0, s_spacing, metric) score_all_data.append(temp_score) print(seg_name, temp_score) score_all_data = np.asarray(score_all_data) score_mean = [score_all_data.mean(axis=0)] score_std = [score_all_data.std(axis=0)] np.savetxt("{0:}/{1:}_{2:}_all.txt".format(seg_root, organ_name, metric), score_all_data) np.savetxt("{0:}/{1:}_{2:}_mean.txt".format(seg_root, organ_name, metric), score_mean) np.savetxt("{0:}/{1:}_{2:}_std.txt".format(seg_root, organ_name, metric), score_std) print("{0:} mean ".format(metric), score_mean) print("{0:} std ".format(metric), score_std)
def evaluation(config_file): config = parse_config(config_file)['evaluation'] metric = config['metric'] label_list = config['label_list'] label_fuse = config.get('label_fuse', False) organ_name = config['organ_name'] gt_root = config['ground_truth_folder_root'] seg_root = config['segmentation_folder_root'] if (not (isinstance(seg_root, tuple) or isinstance(seg_root, list))): seg_root = [seg_root] image_pair_csv = config['evaluation_image_pair'] ground_truth_label_convert_source = config.get( 'ground_truth_label_convert_source', None) ground_truth_label_convert_target = config.get( 'ground_truth_label_convert_target', None) segmentation_label_convert_source = config.get( 'segmentation_label_convert_source', None) segmentation_label_convert_target = config.get( 'segmentation_label_convert_target', None) image_items = pd.read_csv(image_pair_csv) item_num = len(image_items) for seg_root_n in seg_root: score_all_data = [] name_score_list = [] for i in range(item_num): gt_name = image_items.iloc[i, 0] seg_name = image_items.iloc[i, 1] gt_full_name = gt_root + '/' + gt_name seg_full_name = seg_root_n + '/' + seg_name s_dict = load_image_as_nd_array(seg_full_name) g_dict = load_image_as_nd_array(gt_full_name) s_volume = s_dict["data_array"] s_spacing = s_dict["spacing"] g_volume = g_dict["data_array"] g_spacing = g_dict["spacing"] # for dim in range(len(s_spacing)): # assert(s_spacing[dim] == g_spacing[dim]) if((ground_truth_label_convert_source is not None) and \ ground_truth_label_convert_target is not None): g_volume = convert_label(g_volume, ground_truth_label_convert_source, \ ground_truth_label_convert_target) if((segmentation_label_convert_source is not None) and \ segmentation_label_convert_target is not None): s_volume = convert_label(s_volume, segmentation_label_convert_source, \ segmentation_label_convert_target) score_vector = get_multi_class_evaluation_score( s_volume, g_volume, label_list, label_fuse, s_spacing, metric) if (len(label_list) > 1): score_vector.append(np.asarray(score_vector).mean()) score_all_data.append(score_vector) name_score_list.append([seg_name] + score_vector) print(seg_name, score_vector) score_all_data = np.asarray(score_all_data) score_mean = score_all_data.mean(axis=0) score_std = score_all_data.std(axis=0) name_score_list.append(['mean'] + list(score_mean)) name_score_list.append(['std'] + list(score_std)) # save the result as csv score_csv = "{0:}/{1:}_{2:}_all.csv".format(seg_root_n, organ_name, metric) with open(score_csv, mode='w') as csv_file: csv_writer = csv.writer(csv_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL) head = ['image'] + ["class_{0:}".format(i) for i in label_list] if (len(label_list) > 1): head = head + ["average"] csv_writer.writerow(head) for item in name_score_list: csv_writer.writerow(item) print("{0:} mean ".format(metric), score_mean) print("{0:} std ".format(metric), score_std)