def test_accuracy(): app = QApplication([]) app.setApplicationName("cidan") widget = MainWindow(dev=True, preload=False) main_widget = widget.table_widget load_new_dataset(main_widget, file_path="test_files/small_dataset1.tif", save_dir_path="test_files/save_dir", load_into_mem=True) main_widget.open_dataset_thread.wait() time.sleep(10) data_handler = main_widget.data_handler data_handler.change_filter_param("median_filter", True) data_handler.change_roi_extraction_param("roi_circ_threshold", 20) main_widget.thread_list[1].run() assert main_widget.tabs[1].image_view.magic_wand(70, 127) data_handler.export() a = neurofinder.load("test_files/roi_list.json") b = neurofinder.load("test_files/save_dir/roi_list.json") test = neurofinder.match(a, b) percision, recall = neurofinder.centers(a, b) inclusion, exclusion = neurofinder.shapes(a, b) assert percision > .8 assert recall > .8 assert exclusion > .6 assert inclusion > .6 image_good = io.imread("test_files/embedding_norm_image.png") image_test = io.imread( "test_files/save_dir/embedding_norm_images/embedding_norm_image.png") assert structural_similarity(image_good, image_test) > .90 data_handler.change_box_param("total_num_spatial_boxes", 4) main_widget.thread_list[1].run() data_handler.export() image_test = io.imread( "test_files/save_dir/embedding_norm_images/embedding_norm_image.png") assert structural_similarity(image_good, image_test) > .60 a = neurofinder.load("test_files/roi_list.json") b = neurofinder.load("test_files/save_dir/roi_list.json") percision, recall = neurofinder.centers(a, b) inclusion, exclusion = neurofinder.shapes(a, b) assert percision > .4 assert recall > .4 assert exclusion > .4 assert inclusion > .3 app.quit() shutil.rmtree("test_files/save_dir")
def compute_accuracy(gt, predictions, threshold=np.inf): """ compare predicted rois vs ground truth rois """ a = neurofinder.load( json.dumps([{ 'coordinates': r.coordinates.tolist() } for r in gt])) b = neurofinder.load( json.dumps([{ 'coordinates': r.coordinates.tolist() } for r in predictions])) recall, precision = neurofinder.centers(a, b, threshold=threshold) inclusion, exclusion = neurofinder.shapes(a, b, threshold=threshold) if recall == 0 and precision == 0: combined = 0 else: combined = 2 * (recall * precision) / (recall + precision) #result = {'combined': round(combined, 4), 'inclusion': round(inclusion, 4), 'precision': round(precision, 4), 'recall': round(recall, 4), 'exclusion': round(exclusion, 4)} #print(json.dumps(result)) print "Combined: %f" % (combined, ) print "Precision: %f" % (precision, ) print "Recall: %f" % (recall, ) print "Inclusion: %f" % (inclusion, ) print "Exclusion: %f" % (exclusion, )
def compare(model, modelReference, threshold): """ Compare two extraction models. Parameters ---------- model : ExtractionModel Model for comparision. modelReferrence : ExtractionModel Reference model to be compared to, can be ground truth. threshold : float Distance threshold for matching sources. """ recall, precision = centers(modelReference.regions, model.regions, threshold) inclusion, exclusion = shapes(modelReference.regions, model.regions, threshold) if recall == 0 and precision == 0: combined = 0 else: combined = 2 * (recall * precision) / (recall + precision) count = model.regions.count return { 'count': count, 'combined': combined, 'recall': recall, 'precision': precision, 'inclusion': inclusion, 'exclusion': exclusion, 'threshold': threshold }
def compare(model, modelReference, threshold): """ Compare two extraction models. Parameters ---------- model : ExtractionModel Model for comparision. modelReferrence : ExtractionModel Reference model to be compared to, can be ground truth. threshold : float Distance threshold for matching sources. """ recall, precision = centers(modelReference.regions, model.regions, threshold) inclusion, exclusion = shapes(modelReference.regions, model.regions, threshold) if recall == 0 and precision == 0: combined = 0 else: combined = 2 * (recall * precision) / (recall + precision) count = model.regions.count return {'count': count, 'combined': combined, 'recall':recall, 'precision':precision, 'inclusion':inclusion, 'exclusion':exclusion, 'threshold':threshold}
def nf_mask_metrics(m, mp): """Computes precision, recall, inclusion, exclusion, and combined (F1) score for the given mask (m) and predicted mask (mp). Note that this does assumes single 2D masks and does not aaccount for overlapping neurons. # Arguments m: ground-truth (height x width) binary numpy mask. mp: predicted (height x width) binary numpy mask. # Returns p,r,i,e,f1: precision, recall, inclusion, exclusion, and F1 scores. """ # Return all zeros if the predicted mask is empty. if np.sum(mp.round()) == 0: return 0., 0., 0., 0., 0. # Convert masks to regional format and compute their metrics. m = _mask_to_regional(m) mp = _mask_to_regional(mp) r, p = centers(m, mp) i, e = shapes(m, mp) f1 = 2. * (r * p) / (r + p) return (p, r, i, e, f1)
def lt_evaluate(file1, file2, threshold=5): a = load(file1) b = load(file2) if a != 0 and b != 0: recall, precision = centers(a, b, threshold=threshold) inclusion, exclusion = shapes(a, b, threshold=threshold) if recall == 0 and precision == 0: combined = 0 else: combined = 2 * (recall * precision) / (recall + precision) result = { 'combined': round(combined, 4), 'inclusion': round(inclusion, 4), 'precision': round(precision, 4), 'recall': round(recall, 4), 'exclusion': round(exclusion, 4) } return (result) else: return {}
def compute_accuracy(gt, predictions, threshold=np.inf): """ compare predicted rois vs ground truth rois """ a = neurofinder.load(json.dumps([{'coordinates' : r.coordinates.tolist()} for r in gt])) b = neurofinder.load(json.dumps([{'coordinates' : r.coordinates.tolist()} for r in predictions])) recall, precision = neurofinder.centers(a, b, threshold=threshold) inclusion, exclusion = neurofinder.shapes(a, b, threshold=threshold) if recall == 0 and precision == 0: combined = 0 else: combined = 2 * (recall * precision) / (recall + precision) #result = {'combined': round(combined, 4), 'inclusion': round(inclusion, 4), 'precision': round(precision, 4), 'recall': round(recall, 4), 'exclusion': round(exclusion, 4)} #print(json.dumps(result)) print "Combined: %f" % (combined,) print "Precision: %f" % (precision,) print "Recall: %f" % (recall,) print "Inclusion: %f" % (inclusion,) print "Exclusion: %f" % (exclusion,)
# regions_CNMF=cse.utilities.nf_masks_to_json( masks_ws[idcomps],os.path.join('/tmp/regions_CNMF_2.json')) # b=load(os.path.join('/tmp/regions_CNMF_2.json')) # pl.imshow(np.sum(masks_ws[idcomps],0),alpha=.3,cmap='hot') pl.imshow(np.sum(masks_2,0),alpha=.3,cmap='hot') pl.title('M_2') pl.subplot(2,2,4) # pl.imshow(Cn,cmap='gray') # pl.imshow(np.sum(masks_nf,0)+2*np.sum(masks_ws[idcomps],0)) pl.imshow(np.sum(masks_nf,0)+2*np.sum(masks_2,0)) # pl.imshow(np.sum(masks_2,0),alpha=.2,cmap='hot') pl.title('M_overlap') #print mtc=match(a,b,threshold=5) re,pr=centers(a,b,threshold=5) incl,excl=shapes(a,b,threshold=5) fscore=2*(pr*re)/(pr+re) print(('Exclusion %.3f\nRecall %.3f\nCombined %.3f\nPrecision %.3f\nInclusion %.3f' % (excl,re,fscore,pr,incl))) else: print((ref_file + ' DO NOT EXIST!')) #%% from neurofinder import load, centers, shapes results=[] for folder_in_check in folders_in: a=load(os.path.join(folder_in_check,'regions_CNMF.json')) dset='.'.join(folder_in_check[:-1].split('.')[1:]) print (dset) with np.load(os.path.join(folder_in_check,'regions_CNMF.npz')) as ld: masks_ws=ld['masks_ws']
# regions_CNMF=cse.utilities.nf_masks_to_json( masks_ws[idcomps],os.path.join('/tmp/regions_CNMF_2.json')) # b=load(os.path.join('/tmp/regions_CNMF_2.json')) # pl.imshow(np.sum(masks_ws[idcomps],0),alpha=.3,cmap='hot') pl.imshow(np.sum(masks_2, 0), alpha=.3, cmap='hot') pl.title('M_2') pl.subplot(2, 2, 4) # pl.imshow(Cn,cmap='gray') # pl.imshow(np.sum(masks_nf,0)+2*np.sum(masks_ws[idcomps],0)) pl.imshow(np.sum(masks_nf, 0) + 2 * np.sum(masks_2, 0)) # pl.imshow(np.sum(masks_2,0),alpha=.2,cmap='hot') pl.title('M_overlap') # print mtc = match(a, b, threshold=5) re, pr = centers(a, b, threshold=5) incl, excl = shapes(a, b, threshold=5) fscore = 2 * (pr * re) / (pr + re) print(( 'Exclusion %.3f\nRecall %.3f\nCombined %.3f\nPrecision %.3f\nInclusion %.3f' % (excl, re, fscore, pr, incl))) else: print((ref_file + ' DO NOT EXIST!')) #%% from neurofinder import load, centers, shapes results = [] for folder_in_check in folders_in: a = load(os.path.join(folder_in_check, 'regions_CNMF.json')) dset = '.'.join(folder_in_check[:-1].split('.')[1:]) print(dset)
def main(): parser = argparse.ArgumentParser() parser.add_argument("--task_list", type=str, default='', required=True, help="Path to all datasets") parser.add_argument( "-odir", "--output_dir", type=str, default='', required=True, help="Where to output all eigen vector images and csv data", ) parser.add_argument("--task_log", type=str, default='', required=True) args = parser.parse_args() task_list = [] with open(args.task_list, newline='') as csvfile: reader = csv.reader(csvfile, delimiter=' ', quotechar='|') for row in reader: task_list.append(row[0]) parameters_to_search = { "median_filter": [True, False], "hist_eq": [True, False], "pca": [True, False], "total_num_spatial_boxes": [1, 4, 9], "num_eig": [50], "trial_split": [True], "z_score": [True, False], "trial_length": [250, 500, 1000], "localSpatialDenoising": [True], "knn": [7], "normalize_w_k": [1E-2, 1E-3, 1E-4, 1E-5] } total_parameters_combinations = reduce( lambda x, y: x * y, [len(parameters_to_search[x]) for x in parameters_to_search]) print(total_parameters_combinations) parameter_keys = list(parameters_to_search) parameter_remainders = [] for num, key in enumerate(parameter_keys): remainder = 1 for x in range(num + 1): remainder *= len(parameters_to_search[parameter_keys[x]]) parameter_remainders.append(remainder) rows = [] for num, path in enumerate([os.path.dirname(x) for x in task_list]): if os.path.isfile(os.path.join(path, "roi_list.json")): a = neurofinder.load(os.path.join(path, "roi_true.json")) b = neurofinder.load(os.path.join(path, "roi_list.json")) percision, recall = neurofinder.centers(a, b) inclusion, exclusion = neurofinder.shapes(a, b) else: percision, recall, inclusion, exclusion = -1, -1, -1, -1 current_row = [num + 1, percision, recall, inclusion, exclusion] for remainder, key in zip(parameter_remainders, parameter_keys): val = parameters_to_search[key][num % remainder // (remainder // len(parameters_to_search[key]))] current_row.append(val) rows.append(current_row) if os.path.isfile( os.path.join( path, "embedding_norm_images/embedding_norm_image.png")): Image.open( os.path.join( path, "embedding_norm_images/embedding_norm_image.png")).save( os.path.join(args.output_dir, os.path.basename(path) + ".png")) df = pandas.DataFrame( rows, columns=["Seq", "Percision", "Recall", "Inclusion", "Exclusion"] + parameter_keys) task_log = pandas.read_csv(args.task_log) result = pandas.concat([df, task_log], axis=1) result.to_csv(os.path.join(args.output_dir, "out.csv"))
def test_overlap_perfect_flipped(): a = many([[[0, 0], [0, 1]], [[10, 10], [10, 11]]]) b = many([[[10, 10], [10, 11]], [[0, 0], [0, 1]]]) assert shapes(a, b) == (1.0, 1.0)
def test_overlap_too_many(): a = many([[[0, 0], [0, 1]], [[10, 10], [10, 11]]]) b = many([[[0, 0], [0, 1]], [[10, 10], [10, 11], [11, 10], [11, 12]]]) assert shapes(a, b) == (1.0, 0.75)
def test_overlap_too_few(): a = many([[[0, 0], [0, 1], [1, 0], [1, 1]], [[10, 10], [10, 11], [11, 10], [11, 11]]]) b = many([[[0, 0], [0, 1], [1, 0], [1, 1]], [[10, 10], [11, 11]]]) assert shapes(a, b) == (0.75, 1.0)