Ejemplo n.º 1
0
    if args.watershed is None:
        args.watershed = watershed(probs, show_progress=args.show_progress)
    ws = args.watershed

    thickness = args.thickness
    zcrop1 = [0, thickness]
    overlaps = [2**i + 1 for i in range(1, 8)]
    results_table = zeros([len(args.thresholds), len(range(1, 8))], dtype=bool)
    for j, overlap in enumerate(overlaps):
        zcrop2 = [thickness - overlap, 2 * thickness - overlap]
        # pdb.set_trace()
        probs1, ws1 = crop_probs_and_ws(args.xy_crop + zcrop1, probs, ws)
        probs2, ws2 = crop_probs_and_ws(args.xy_crop + zcrop2, probs, ws)
        g1 = Rag(ws1,
                 probs1,
                 show_progress=args.show_progress,
                 lowmem=args.low_memory)
        g2 = Rag(ws2,
                 probs2,
                 show_progress=args.show_progress,
                 lowmem=args.low_memory)
        for i, t in enumerate(args.thresholds):
            g1.agglomerate(t)
            g2.agglomerate(t)
            results_table[i, j] = (juicy_center(
                g1.segmentation,
                2)[..., -overlap / 2].astype(bool) == juicy_center(
                    g2.segmentation, 2)[..., overlap / 2].astype(bool)).all()
    savetxt('debug.txt', results_table, delimiter='\t')
    results_table = hstack([array(args.thresholds)[:, newaxis], results_table])
    results_table = \
Ejemplo n.º 2
0
    parser.add_argument('--save-node-training-data', metavar='FILE',
        help='Save node features and labels to FILE.'
    )
    parser.add_argument('--node-classifier', metavar='FILE',
        help='Train and output a node split classifier.'
    )
    args = parser.parse_args()

    feature_map_function = eval(args.feature_map_function)
    if args.load_classifier is not None:
        mpf = classifier_probability(eval(args.feature_map_function), 
                                                        args.load_classifier)
    else:
        mpf = eval(args.objective_function)

    wsg = Rag(args.ws, args.probs, mpf)
    features, labels, weights, history, ave_sizes = \
                        wsg.learn_agglomerate(args.gt, feature_map_function)

    print 'shapes: ', features.shape, labels.shape

    if args.load_classifier is not None:
        try:
            f = h5py.File(args.save_training_data)
            old_features = array(f['samples'])
            old_labels = array(f['labels'])
            features = concatenate((features, old_features), 0)
            labels = concatenate((labels, old_labels), 0)
        except:
            pass
    print "fitting classifier of size, pos: ", labels.size, (labels==1).sum()
Ejemplo n.º 3
0
        probs = median_filter(probs, 3)
    elif args.gaussian_filter is not None:
        probs = gaussian_filter(probs, args.gaussian_filter)

    if args.watershed is None:
        args.watershed = watershed(probs, show_progress=args.show_progress)
    ws = args.watershed

    thickness = args.thickness
    zcrop1 = [0, thickness]
    overlaps = [2 ** i + 1 for i in range(1, 8)]
    results_table = zeros([len(args.thresholds), len(range(1, 8))], dtype=bool)
    for j, overlap in enumerate(overlaps):
        zcrop2 = [thickness - overlap, 2 * thickness - overlap]
        # pdb.set_trace()
        probs1, ws1 = crop_probs_and_ws(args.xy_crop + zcrop1, probs, ws)
        probs2, ws2 = crop_probs_and_ws(args.xy_crop + zcrop2, probs, ws)
        g1 = Rag(ws1, probs1, show_progress=args.show_progress, lowmem=args.low_memory)
        g2 = Rag(ws2, probs2, show_progress=args.show_progress, lowmem=args.low_memory)
        for i, t in enumerate(args.thresholds):
            g1.agglomerate(t)
            g2.agglomerate(t)
            results_table[i, j] = (
                juicy_center(g1.segmentation, 2)[..., -overlap / 2].astype(bool)
                == juicy_center(g2.segmentation, 2)[..., overlap / 2].astype(bool)
            ).all()
    savetxt("debug.txt", results_table, delimiter="\t")
    results_table = hstack([array(args.thresholds)[:, newaxis], results_table])
    results_table = vstack([array([0] + overlaps), results_table, results_table.all(axis=0)])
    savetxt(args.fout, results_table, delimiter="\t", fmt="%i")
Ejemplo n.º 4
0
    parser.add_argument('--save-node-training-data',
                        metavar='FILE',
                        help='Save node features and labels to FILE.')
    parser.add_argument('--node-classifier',
                        metavar='FILE',
                        help='Train and output a node split classifier.')
    args = parser.parse_args()

    feature_map_function = eval(args.feature_map_function)
    if args.load_classifier is not None:
        mpf = classifier_probability(eval(args.feature_map_function),
                                     args.load_classifier)
    else:
        mpf = eval(args.objective_function)

    wsg = Rag(args.ws, args.probs, mpf)
    features, labels, weights, history, ave_sizes = \
                        wsg.learn_agglomerate(args.gt, feature_map_function)

    print 'shapes: ', features.shape, labels.shape

    if args.load_classifier is not None:
        try:
            f = h5py.File(args.save_training_data)
            old_features = array(f['samples'])
            old_labels = array(f['labels'])
            features = concatenate((features, old_features), 0)
            labels = concatenate((labels, old_labels), 0)
        except:
            pass
    print "fitting classifier of size, pos: ", labels.size, (labels == 1).sum()