Exemple #1
0
def edges_diblasi(img, gauss=5, details=1, plot=[]):

    # RGB to gray ("Luminance channel" in Di Blasi)
    img_gray = sk.color.rgb2gray(img)

    # equalize histogram
    img_eq = sk.exposure.equalize_hist(img_gray)

    # soften image
    img_gauss = filters.gaussian(img_eq, sigma=16, truncate=gauss / 16)

    # segment bright areas to blobs
    variance = img_gauss.std()**2  #  evtl. direkt die std verwenden
    img_seg = np.ones((img.shape[0], img.shape[1]))
    threshold = variance / 4 * 2 * details
    img_seg[abs(img_gauss - img_gauss.mean()) > threshold] = 0

    ### 5. Kanten finden
    img_edge = filters.laplace(img_seg, ksize=3)
    img_edge[img_edge != 0] = 1

    if 'edges' in plot:
        plotting.plot_image(img_edge, inverted=True, title='Di Blasi')

    return img_edge
Exemple #2
0
def edges_hed(img, gauss=None, plot=[]):

    if gauss:
        img = filters.gaussian(img,
                               sigma=16,
                               truncate=gauss / 16,
                               multichannel=True)

    img = img / np.amax(img) * 255
    img = img.astype(np.uint8)

    hed_matrix = hed_edges(img)

    # gray to binary
    hed_seg = np.ones((hed_matrix.shape[0], hed_matrix.shape[1]))
    hed_seg[hed_matrix < 0.5] = 0

    # skeletonize to get inner lines
    img_edges = sk.morphology.skeletonize(hed_seg).astype(int)

    # option to make plot lines thicker:
    #from skimage.morphology import square,dilation
    #img_edges = dilation(img_edges, square(3))

    if 'edges' in plot:
        plotting.plot_image(img_edges, inverted=True, title='HED')

    return img_edges
Exemple #3
0
def chains_into_gaps(polygons, h, w, half_tile, CHAIN_SPACING, plot=[]):
    # get area which are already occupied
    img_chains = np.zeros((h, w), dtype=np.uint8)
    for p in polygons:
        y, x = p.exterior.coords.xy
        rr, cc = draw.polygon(x, y, shape=img_chains.shape)
        img_chains[rr, cc] = 1
    distance_to_tile = morphology.distance_transform_edt(img_chains == 0)
    d = distance_to_tile.astype(int)

    # define new guidelines
    chain_spacing = int(round(half_tile * CHAIN_SPACING))
    if chain_spacing <= 1:  # would select EVERY pixel inside gap
        chain_spacing = 2
    # first condition (d==1) => chains around all (even the smallest) gap borders
    # (set e.g. d==2 for faster calculations)
    # second condition (...) => more chains inside larger gaps
    mask = (d == 1) | ((d % chain_spacing == 0) & (d > 0))

    guidelines2 = np.zeros((h, w), dtype=np.uint8)
    guidelines2[mask] = 1
    chains2 = pixellines_to_ordered_points(guidelines2, half_tile)

    if 'used_up_space' in plot: plotting.plot_image(img_chains, title='gaps')
    if 'distance_to_tile' in plot:
        plotting.plot_image(distance_to_tile, inverted=True)
    if 'filler_guidelines' in plot:
        plotting.plot_image(guidelines2, inverted=True, title='new guidelines')

    return chains2
Exemple #4
0
def load_image(fname, width=900, plot=[]):

    if fname:
        img0 = imread(fname)
    else:
        img0 = sk.data.coffee()  # coffee (example image)

    # ensure image is rgb (for consistency)
    if len(img0.shape) < 3:
        img0 = sk.color.gray2rgb(img0)

    # resize to same image width => tile size has always similar effect
    if width is not None:
        factor = width / img0.shape[1]
        img0 = transform.resize(
            img0, (int(img0.shape[0] * factor), int(img0.shape[1] * factor)),
            anti_aliasing=True)
    img0 = (img0 * 255).astype(int)
    if 'original' in plot: plotting.plot_image(img0)
    print(f'Size of input image: {img0.shape[0]}px * {img0.shape[1]}px')

    return img0
Exemple #5
0
def chains_and_angles(img_edges, half_tile, plot=[]):

    # for each pixel get distance to closest edge
    distances = morphology.distance_transform_edt(img_edges == 0, )

    # tiles will be placed centered along guidelines (closed lines)
    """     tile
           xxxxxx
           xxxxxx
    ---------------------- guideline
           xxxxxx
           xxxxxx
    """
    w, h = img_edges.shape[0], img_edges.shape[1]
    guidelines = np.zeros((w, h), dtype=np.uint8)
    mask = ((distances.astype(int) + half_tile) % (2 * half_tile) == 0)
    guidelines[mask] = 1
    # break into chains and order the points
    t0 = time.time()
    chains = pixellines_to_ordered_points(guidelines, half_tile)
    print('Pixel guidelines to chains with sorted points:',
          f'{time.time()-t0:.1f}s')

    # use distances to calculate gradients => rotation of tiles when placed later
    t0 = time.time()
    gradient = np.zeros((w, h))
    for x in range(1, w - 1):
        for y in range(1, h - 1):
            numerator = distances[x, y + 1] - distances[x, y - 1]
            denominator = distances[x + 1, y] - distances[x - 1, y]
            gradient[x, y] = np.arctan2(numerator, denominator)
    angles_0to180 = (gradient * 180 / np.pi + 180) % 180
    print('Calculation of angle matrix:', f'{time.time()-t0:.1f}s')
    # Remark: it would be enough to calculate only x,y inside the chain => faster
    # interim_stages = dict(distances=distances, guidelines=guidelines, chains=chains,
    #                       gradient=gradient, angles_0to180=angles_0to180)

    if 'distances' in plot: plotting.plot_image(distances, title='distances')
    if 'guidelines' in plot:
        plotting.plot_image(guidelines, inverted=True, title='guidelines')
    if 'gradient' in plot: plotting.plot_image(gradient, title='gradients')
    if 'angles_0to180' in plot: plotting.plot_image(angles_0to180)

    return chains, angles_0to180  #, interim_stages
#detector = gv.Detector.load(model_file)
data = np.load(results_file)

#p, r = data['precisions'], data['recalls']
detections = data['detections']
detections.sort(order='confidence')
detections = detections[::-1]

# TODO:
try:
    contest = str(data['contest'])
    obj_class = data['obj_class']
except KeyError:
    contest = 'voc'
    obj_class = 'car'

for i, det in enumerate(detections[:count]):
    bb = (det['top'], det['left'], det['bottom'], det['right'])
    bbobj = gv.bb.DetectionBB(bb, score=det['confidence'], confidence=det['confidence'], mixcomp=det['mixcomp'], correct=det['correct'])

    img_id = det['img_id']
    fileobj = gv.datasets.load_file(contest, img_id, obj_class=obj_class)
    
    # Replace bounding boxes with this single one
    fileobj.boxes[:] = [bbobj]
    
    fn = 'det-{0}.png'.format(i)
    path = os.path.join(output_dir, fn)
    plot_image(fileobj, filename=path, show_corrects=True)
    print 'Saved {0}'.format(path)
detections = data['detections']
detections.sort(order='confidence')
detections = detections[::-1]

# TODO:
try:
    contest = str(data['contest'])
    obj_class = data['obj_class']
except KeyError:
    contest = 'voc'
    obj_class = 'car'

for i, det in enumerate(detections[:count]):
    bb = (det['top'], det['left'], det['bottom'], det['right'])
    bbobj = gv.bb.DetectionBB(bb,
                              score=det['confidence'],
                              confidence=det['confidence'],
                              mixcomp=det['mixcomp'],
                              correct=det['correct'])

    img_id = det['img_id']
    fileobj = gv.datasets.load_file(contest, img_id, obj_class=obj_class)

    # Replace bounding boxes with this single one
    fileobj.boxes[:] = [bbobj]

    fn = 'det-{0}.png'.format(i)
    path = os.path.join(output_dir, fn)
    plot_image(fileobj, filename=path, show_corrects=True)
    print 'Saved {0}'.format(path)
Exemple #8
0
if __name__ == '__main__':
    import argparse

    parser = argparse.ArgumentParser(description='View image and its annotation')
    parser.add_argument('imgname', metavar='<image name>', nargs='?', type=str, help='Name of image in VOC repository')
    parser.add_argument('--class', dest='obj_class', default='bicycle', type=str, help='Object class for showing positives')
    parser.add_argument('--contest', type=str, choices=gv.datasets.datasets(), default='voc-val', help='Contest to try on')
    #parser.add_argument('-c', '--continue', action='store_true', help='List all')

    args = parser.parse_args()
    imagename = args.imgname
    obj_class = args.obj_class
    contest = args.contest
    
    if imagename is None:
        fileobjs, tot = gv.datasets.load_files(contest, obj_class)
        for f in fileobjs:
            if len(f.boxes) > 0:
                #print("{0:20} {1} ({2})".format(os.path.basename(f.path), len(f.boxes), sum([bbobj.difficult for bbobj in f.boxes])))
                print("Showing ", f.img_id)
                plot_image(f, filename='an/data-{0}.png'.format(f.img_id))
                #if raw_input("Continue? (Y/n): ") == 'n':
                #    break
                    
                
    else:
        fileobj = gv.datasets.load_file(contest, imagename, obj_class=obj_class)
        print(fileobj)
        plot_image(fileobj, bare=True)