示例#1
0
def compute_accuracy(gt, predictions, threshold=np.inf):
    """
  compare predicted rois vs ground truth rois
  """
    a = neurofinder.load(
        json.dumps([{
            'coordinates': r.coordinates.tolist()
        } for r in gt]))
    b = neurofinder.load(
        json.dumps([{
            'coordinates': r.coordinates.tolist()
        } for r in predictions]))

    recall, precision = neurofinder.centers(a, b, threshold=threshold)
    inclusion, exclusion = neurofinder.shapes(a, b, threshold=threshold)

    if recall == 0 and precision == 0:
        combined = 0
    else:
        combined = 2 * (recall * precision) / (recall + precision)

    #result = {'combined': round(combined, 4), 'inclusion': round(inclusion, 4), 'precision': round(precision, 4), 'recall': round(recall, 4), 'exclusion': round(exclusion, 4)}
    #print(json.dumps(result))

    print "Combined: %f" % (combined, )
    print "Precision: %f" % (precision, )
    print "Recall: %f" % (recall, )
    print "Inclusion: %f" % (inclusion, )
    print "Exclusion: %f" % (exclusion, )
示例#2
0
def test_accuracy():
    app = QApplication([])
    app.setApplicationName("cidan")
    widget = MainWindow(dev=True, preload=False)

    main_widget = widget.table_widget
    load_new_dataset(main_widget,
                     file_path="test_files/small_dataset1.tif",
                     save_dir_path="test_files/save_dir",
                     load_into_mem=True)
    main_widget.open_dataset_thread.wait()
    time.sleep(10)
    data_handler = main_widget.data_handler

    data_handler.change_filter_param("median_filter", True)
    data_handler.change_roi_extraction_param("roi_circ_threshold", 20)
    main_widget.thread_list[1].run()
    assert main_widget.tabs[1].image_view.magic_wand(70, 127)

    data_handler.export()
    a = neurofinder.load("test_files/roi_list.json")
    b = neurofinder.load("test_files/save_dir/roi_list.json")
    test = neurofinder.match(a, b)
    percision, recall = neurofinder.centers(a, b)
    inclusion, exclusion = neurofinder.shapes(a, b)
    assert percision > .8
    assert recall > .8
    assert exclusion > .6
    assert inclusion > .6
    image_good = io.imread("test_files/embedding_norm_image.png")
    image_test = io.imread(
        "test_files/save_dir/embedding_norm_images/embedding_norm_image.png")

    assert structural_similarity(image_good, image_test) > .90
    data_handler.change_box_param("total_num_spatial_boxes", 4)
    main_widget.thread_list[1].run()
    data_handler.export()
    image_test = io.imread(
        "test_files/save_dir/embedding_norm_images/embedding_norm_image.png")

    assert structural_similarity(image_good, image_test) > .60
    a = neurofinder.load("test_files/roi_list.json")
    b = neurofinder.load("test_files/save_dir/roi_list.json")
    percision, recall = neurofinder.centers(a, b)
    inclusion, exclusion = neurofinder.shapes(a, b)
    assert percision > .4
    assert recall > .4
    assert exclusion > .4
    assert inclusion > .3
    app.quit()
    shutil.rmtree("test_files/save_dir")
示例#3
0
 def __init__(self, name, regions):
     """
     Result objects represent the result of running a model against a single example
     :param name: name of the dataset corresponding to these results
     :param regions: list of dictionaries specifying the ROI
                     Each dictionary should contain a "coordinates" key that gives a list of (x, y) coordinates
     """
     self.name = name
     self.raw_regions = regions
     regions_json = json.dumps(regions)
     self.regions = neurofinder.load(regions_json)
示例#4
0
def lt_evaluate(file1, file2, threshold=5):
    a = load(file1)
    b = load(file2)
    if a != 0 and b != 0:
        recall, precision = centers(a, b, threshold=threshold)
        inclusion, exclusion = shapes(a, b, threshold=threshold)

        if recall == 0 and precision == 0:
            combined = 0
        else:
            combined = 2 * (recall * precision) / (recall + precision)

        result = {
            'combined': round(combined, 4),
            'inclusion': round(inclusion, 4),
            'precision': round(precision, 4),
            'recall': round(recall, 4),
            'exclusion': round(exclusion, 4)
        }
        return (result)
    else:
        return {}
示例#5
0
def compute_accuracy(gt, predictions, threshold=np.inf):
  """
  compare predicted rois vs ground truth rois
  """
  a = neurofinder.load(json.dumps([{'coordinates' : r.coordinates.tolist()} for r in gt]))
  b = neurofinder.load(json.dumps([{'coordinates' : r.coordinates.tolist()} for r in predictions]))

  recall, precision = neurofinder.centers(a, b, threshold=threshold)
  inclusion, exclusion = neurofinder.shapes(a, b, threshold=threshold)

  if recall == 0 and precision == 0:
    combined = 0
  else:
    combined = 2 * (recall * precision) / (recall + precision)

  #result = {'combined': round(combined, 4), 'inclusion': round(inclusion, 4), 'precision': round(precision, 4), 'recall': round(recall, 4), 'exclusion': round(exclusion, 4)}
  #print(json.dumps(result))
  
  print "Combined: %f" % (combined,)
  print "Precision: %f" % (precision,)
  print "Recall: %f" % (recall,)
  print "Inclusion: %f" % (inclusion,)
  print "Exclusion: %f" % (exclusion,) 
#%%
def tomask(coords,dims):
    mask = np.zeros(dims)
    mask[list(zip(*coords))] = 1
    return mask


#%%
import json
from neurofinder import load, centers, shapes,match
for folder_in in base_folders[-2:-1]:
    #%
    ref_file=os.path.join(folder_in,'regions','regions_CNMF_1.json')
    if os.path.exists(ref_file):
        print(folder_in)
        b=load(ref_file)
    #    a=load(os.path.join(folder_in,'regions','regions_wesley.json'))
        a=load(os.path.join(folder_in,'regions/regions_ben.json'))
#        
        with open(os.path.join(folder_in,'regions/regions_ben.json')) as f:
            regions = json.load(f)

        masks_nf = np.array([tomask(s['coordinates'],dims) for s in regions])

        with open(os.path.join(folder_in,'regions/regions_CNMF_1.json')) as f:
            regions = json.load(f)    

        masks_1 = np.array([tomask(s['coordinates'],dims) for s in regions])

        with open(os.path.join(folder_in,'regions/regions_CNMF_2.json')) as f:
            regions = json.load(f)    
def tomask(coords, dims):
    mask = np.zeros(dims)
    mask[list(zip(*coords))] = 1
    return mask


#%%
import json
from neurofinder import load, centers, shapes, match
for folder_in in base_folders[-2:-1]:
    #%
    ref_file = os.path.join(folder_in, 'regions', 'regions_CNMF_1.json')
    if os.path.exists(ref_file):
        print(folder_in)
        b = load(ref_file)
        #    a=load(os.path.join(folder_in,'regions','regions_wesley.json'))
        a = load(os.path.join(folder_in, 'regions/regions_ben.json'))
        #
        with open(os.path.join(folder_in, 'regions/regions_ben.json')) as f:
            regions = json.load(f)

        masks_nf = np.array([tomask(s['coordinates'], dims) for s in regions])

        with open(os.path.join(folder_in, 'regions/regions_CNMF_1.json')) as f:
            regions = json.load(f)

        masks_1 = np.array([tomask(s['coordinates'], dims) for s in regions])

        with open(os.path.join(folder_in, 'regions/regions_CNMF_2.json')) as f:
            regions = json.load(f)
示例#8
0
def main():
    parser = argparse.ArgumentParser()

    parser.add_argument("--task_list",
                        type=str,
                        default='',
                        required=True,
                        help="Path to all datasets")
    parser.add_argument(
        "-odir",
        "--output_dir",
        type=str,
        default='',
        required=True,
        help="Where to output all eigen vector images and csv data",
    )
    parser.add_argument("--task_log", type=str, default='', required=True)
    args = parser.parse_args()
    task_list = []
    with open(args.task_list, newline='') as csvfile:
        reader = csv.reader(csvfile, delimiter=' ', quotechar='|')

        for row in reader:
            task_list.append(row[0])

    parameters_to_search = {
        "median_filter": [True, False],
        "hist_eq": [True, False],
        "pca": [True, False],
        "total_num_spatial_boxes": [1, 4, 9],
        "num_eig": [50],
        "trial_split": [True],
        "z_score": [True, False],
        "trial_length": [250, 500, 1000],
        "localSpatialDenoising": [True],
        "knn": [7],
        "normalize_w_k": [1E-2, 1E-3, 1E-4, 1E-5]
    }
    total_parameters_combinations = reduce(
        lambda x, y: x * y,
        [len(parameters_to_search[x]) for x in parameters_to_search])
    print(total_parameters_combinations)
    parameter_keys = list(parameters_to_search)
    parameter_remainders = []
    for num, key in enumerate(parameter_keys):
        remainder = 1
        for x in range(num + 1):
            remainder *= len(parameters_to_search[parameter_keys[x]])
        parameter_remainders.append(remainder)

    rows = []
    for num, path in enumerate([os.path.dirname(x) for x in task_list]):
        if os.path.isfile(os.path.join(path, "roi_list.json")):
            a = neurofinder.load(os.path.join(path, "roi_true.json"))
            b = neurofinder.load(os.path.join(path, "roi_list.json"))
            percision, recall = neurofinder.centers(a, b)
            inclusion, exclusion = neurofinder.shapes(a, b)
        else:
            percision, recall, inclusion, exclusion = -1, -1, -1, -1
        current_row = [num + 1, percision, recall, inclusion, exclusion]

        for remainder, key in zip(parameter_remainders, parameter_keys):
            val = parameters_to_search[key][num % remainder //
                                            (remainder //
                                             len(parameters_to_search[key]))]
            current_row.append(val)
        rows.append(current_row)
        if os.path.isfile(
                os.path.join(
                    path, "embedding_norm_images/embedding_norm_image.png")):
            Image.open(
                os.path.join(
                    path,
                    "embedding_norm_images/embedding_norm_image.png")).save(
                        os.path.join(args.output_dir,
                                     os.path.basename(path) + ".png"))

    df = pandas.DataFrame(
        rows,
        columns=["Seq", "Percision", "Recall", "Inclusion", "Exclusion"] +
        parameter_keys)
    task_log = pandas.read_csv(args.task_log)
    result = pandas.concat([df, task_log], axis=1)
    result.to_csv(os.path.join(args.output_dir, "out.csv"))