def match_layers_pmcc_matching(tiles_fname1, tiles_fname2, pre_matches_fname, out_fname, conf_fname=None):
    params = utils.conf_from_file(conf_fname, 'MatchLayersBlockMatching')
    if params is None:
        params = {}

    # Parameters for the matching
    hex_spacing = params.get("hexspacing", 1500)
    scaling = params.get("scaling", 0.2)
    template_size = params.get("template_size", 200)

    template_size *= scaling
    print("Actual template size (after scaling): {}".format(template_size))

    # Parameters for PMCC filtering
    min_corr = params.get("min_correlation", 0.2)
    max_curvature = params.get("maximal_curvature_ratio", 10)
    max_rod = params.get("maximal_ROD", 0.9)

    print(params)
    debug_save_matches = False
    if "debug_save_matches" in params.keys():
        print("Debug mode - on")
        debug_save_matches = True
    if debug_save_matches:
        # Create a debug directory
        import datetime
        debug_dir = os.path.join(os.path.dirname(out_fname), 'debug_matches_{}'.format(datetime.datetime.now().isoformat()))
        os.mkdir(debug_dir)

    print("Block-Matching+PMCC layers: {} and {}".format(tiles_fname1, tiles_fname2))

    # Read the tilespecs
    ts1 = utils.load_tilespecs(tiles_fname1)
    ts2 = utils.load_tilespecs(tiles_fname2)
    indexed_ts1 = utils.index_tilespec(ts1)
    indexed_ts2 = utils.index_tilespec(ts2)

    # num_mfovs1 = len(indexed_ts1)
    # num_mfovs2 = len(indexed_ts2)

    # Get the tiles centers for each section
    tile_centers1 = get_tile_centers_from_json(ts1)
    tile_centers1tree = spatial.KDTree(tile_centers1)
    tile_centers2 = get_tile_centers_from_json(ts2)
    mfov_centers1 = get_mfov_centers_from_json(indexed_ts1)

    # Load the preliminary matches
    with open(pre_matches_fname, 'r') as data_matches:
        mfov_pre_matches = json.load(data_matches)

    out_jsonfile = {}
    out_jsonfile['tilespec1'] = tiles_fname1
    out_jsonfile['tilespec2'] = tiles_fname2

    # Generate an hexagonal grid according to the first section's bounding box
    bb = BoundingBox.read_bbox(tiles_fname1)
    hexgr = generatehexagonalgrid(bb, hex_spacing)

    if len(mfov_pre_matches["matches"]) == 0:
        print("No matches were found in pre-matching, saving an empty matches output file")
        out_jsonfile['runtime'] = 0
        out_jsonfile['mesh'] = hexgr
        finalpointmatches = []
        out_jsonfile['pointmatches'] = finalpointmatches
        with open(out_fname, 'w') as out:
            json.dump(out_jsonfile, out, indent=4)
        return

#    global datadir
#    global imgdir
#    global workdir
#    global outdir
#    script, slice1, slice2, conffile = sys.argv
#    slice1 = int(slice1)
#    slice2 = int(slice2)
#    slicestring1 = ("%03d" % slice1)
#    slicestring2 = ("%03d" % slice2)
#    with open(conffile) as conf_file:
#        conf = json.load(conf_file)
#    datadir = conf["driver_args"]["datadir"]
#    imgdir = conf["driver_args"]["imgdir"]
#    workdir = conf["driver_args"]["workdir"]
#    outdir = conf["driver_args"]["workdir"]

    starttime = time.clock()

    # Compute the best transformations for each of the mfovs in section 1 (the transformations to section 2)
    best_transformations = get_best_transformations(mfov_pre_matches)

    img_matches = get_img_matches(ts1, tile_centers1, ts2, tile_centers2, best_transformations, mfov_centers1)

    point_matches = []

    actual_matches_num = 0
    # Iterate over the hexagonal points and find a match in the second section
    print("Matching {} points between the two sections".format(len(hexgr)))
    for i in range(len(hexgr)):
        if i % 1000 == 0 and i > 0:
            print(i)
        # Find the tile image where the point from the hexagonal is in the first section
        img1_ind = get_closest_index_to_point(hexgr[i], tile_centers1tree)
        if img1_ind is None:
            continue
        if not is_point_in_img(ts1[img1_ind], hexgr[i]):
            continue

        # Load the image, and get the template from it
        img1_url = ts1[img1_ind]["mipmapLevels"]["0"]["imageUrl"]
        img1_url = img1_url.replace("file://", "")
        img1 = cv2.imread(img1_url, 0)
        img1_resized = cv2.resize(img1, (0, 0), fx=scaling, fy=scaling)
        img1_offset = get_image_top_left(ts1, img1_ind)
        expected_transform = find_best_mfov_transformation(ts1[img1_ind]["mfov"], best_transformations, mfov_centers1)

        img1_template = get_template_from_img_and_point(img1_resized, template_size, (np.array(hexgr[i]) - img1_offset) * scaling)
        if img1_template is None:
            continue

        # Find the template coordinates
        chosen_template, startx, starty, not_on_mesh = img1_template
        # print("img1_template starts at: {}, {} and original point should have been {}".format(startx, starty, np.array(hexgr[i]) - img1_offset))
        w, h = chosen_template.shape
        center_point1 = np.array([startx + w / 2, starty + h / 2]) / scaling + img1_offset
        # print("center_point1", center_point1)
        expected_new_center = np.dot(expected_transform, np.append(center_point1, [1]))[0:2]

        ro, col = chosen_template.shape
        rad2deg = -180 / math.pi
        # TODO - assumes only rigid transformation, should be more general
        angle_of_rot = rad2deg * math.atan2(expected_transform[1][0], expected_transform[0][0])
        rotation_matrix = cv2.getRotationMatrix2D((h / 2, w / 2), angle_of_rot, 1)
        rotated_temp1 = cv2.warpAffine(chosen_template, rotation_matrix, (col, ro))
        xaa = int(w / 2.9)
        rotated_and_cropped_temp1 = rotated_temp1[(w / 2 - xaa):(w / 2 + xaa), (h / 2 - xaa):(h / 2 + xaa)]
        neww, newh = rotated_and_cropped_temp1.shape

        # TODO - assumes a single transformation, but there might be more
        img1_model = models.Transforms.from_tilespec(ts1[img1_ind]["transforms"][0])
        img1_center_point = img1_model.apply(np.array([starty + h / 2, startx + w / 2]) / scaling)  # + imgoffset1

        # Get the images from section 2 around the expected matching location
        # (img1ind, img2inds) = imgmatches[findindwithinmatches(imgmatches, img1ind)]
        img2_inds = img_matches[img1_ind]
        img2s = get_images_from_indices_and_point(ts2, img2_inds, expected_new_center)
        actual_matches_num += 1
        for (img2, img2_ind) in img2s:
            img2_resized = cv2.resize(img2, (0, 0), fx=scaling/1, fy=scaling/1)
            # imgoffset2 = get_image_top_left(slice2, img2mfov, img2num, data2)
            img2_offset = get_image_top_left(ts2, img2_ind)

            # template1topleft = np.array([startx, starty]) / scaling + imgoffset1
            result, reason = PMCC_filter_example.PMCC_match(img2_resized, rotated_and_cropped_temp1, min_correlation=min_corr, maximal_curvature_ratio=max_curvature, maximal_ROD=max_rod)
            if result is not None:
                reasonx, reasony = reason
                # img1topleft = np.array([startx, starty]) / scaling + imgoffset1
                # img2topleft = np.array(reason) / scaling + imgoffset2
                # TODO - assumes a single transformation, but there might be more
                img2_model = models.Transforms.from_tilespec(ts2[img2_ind]["transforms"][0])
                img2_center_point = img2_model.apply(np.array([reasony + newh / 2, reasonx + neww / 2]) / scaling)  # + imgoffset2
                point_matches.append((img1_center_point, img2_center_point, not_on_mesh))
                if debug_save_matches:
                    debug_out_fname1 = os.path.join(debug_dir, "debug_match_sec1{}-{}_sec2{}-{}_image1.png".format(hexgr[i][0], hexgr[i][1], reasonx, reasony))
                    debug_out_fname2 = os.path.join(debug_dir, "debug_match_sec1{}-{}_sec2{}-{}_image2.png".format(hexgr[i][0], hexgr[i][1], reasonx, reasony))
                    cv2.imwrite(debug_out_fname1, rotated_and_cropped_temp1)
                    temp1_final_sizex = rotated_and_cropped_temp1.shape[0]
                    temp1_final_sizey = rotated_and_cropped_temp1.shape[1]
                    img2_cut_out = img2_resized[reasonx:(reasonx + temp1_final_sizex), reasony:(reasony + temp1_final_sizey)]
                    cv2.imwrite(debug_out_fname2, img2_cut_out)
                '''
                temp1finalsizex = rotatedandcroppedtemp1.shape[0]
                temp1finalsizey = rotatedandcroppedtemp1.shape[1]
                imgout = np.zeros((1230,630), np.uint8)
                pikoo = np.array([startx + w / 2, starty + h / 2])
                # cv2.circle(img1resized, (int(pikoo[0]), int(pikoo[1])), 15, (0,0,255), -1)
                imgout[0:545,0:626] = img1resized
                # cv2.circle(img2resized, (int(reasony + temp1finalsize / 2), int(reasonx + temp1finalsize / 2)), 15, (0,0,255), -1)
                imgout[545:1090,0:626] = img2resized
                imgout[1090:(1090 + temp1finalsizex),0:temp1finalsizey] = rotatedandcroppedtemp1
                img2cutout = img2resized[reasonx:(reasonx + temp1finalsizex), reasony:(reasony + temp1finalsizey)]
                imgout[1090:(1090 + temp1finalsizey), (temp1finalsizey + 10):(temp1finalsizex + 10 + temp1finalsizex)] = img2cutout
                finalimgout = imgout[1090:(1090 + temp1finalsizex), 0:300]
                cv2.imwrite("/home/raahilsha/billy/ImageComparison#" + str(i) + ".png",finalimgout)
                '''

    print("Found {} matches out of possible {} points (on section points: {})".format(len(point_matches), len(hexgr), actual_matches_num))
    # Save the output
    print("Saving output to: {}".format(out_fname))
    out_jsonfile['runtime'] = time.clock() - starttime
    out_jsonfile['mesh'] = hexgr

    final_point_matches = []
    for pm in point_matches:
        p1, p2, nmesh = pm
        record = {}
        record['point1'] = p1.tolist()
        record['point2'] = p2.tolist()
        record['isvirtualpoint'] = nmesh
        final_point_matches.append(record)

    out_jsonfile['pointmatches'] = final_point_matches
    with open(out_fname, 'w') as out:
        json.dump(out_jsonfile, out, indent=4)
    print("Done.")
def match_layers_pmcc_matching(tiles_fname1,
                               tiles_fname2,
                               pre_matches_fname,
                               out_fname,
                               conf_fname=None):
    params = utils.conf_from_file(conf_fname, 'MatchLayersBlockMatching')
    if params is None:
        params = {}
    cv_wrap_module.setNumThreads(1)

    # Parameters for the matching
    hex_spacing = params.get("hexspacing", 500)
    scaling = params.get("scaling", 0.2)
    template_size = params.get("template_size", 200)
    template_size *= scaling

    # Read the tilespecs
    ts1 = utils.load_tilespecs(tiles_fname1)
    ts2 = utils.load_tilespecs(tiles_fname2)
    indexed_ts1 = utils.index_tilespec(ts1)

    # Get the tiles centers for each section
    tile_centers1 = get_tile_centers_from_json(ts1)
    tile_centers1tree = spatial.KDTree(tile_centers1)
    tile_centers2 = get_tile_centers_from_json(ts2)
    mfov_centers1 = get_mfov_centers_from_json(indexed_ts1)

    # Load the preliminary matches
    with open(pre_matches_fname, 'r') as data_matches:
        mfov_pre_matches = json.load(data_matches)

    # Generate an hexagonal grid according to the first section's bounding box
    bb = BoundingBox.read_bbox(tiles_fname1)
    hexgr = generatehexagonalgrid(bb, hex_spacing)

    if len(mfov_pre_matches["matches"]) == 0:
        print("No matches were found in pre-matching")
        return

    best_transformations = get_best_transformations(mfov_pre_matches)
    img_matches = get_img_matches(ts1, tile_centers1, ts2, tile_centers2,
                                  best_transformations, mfov_centers1)

    img1_url = ts1[50]["mipmapLevels"]["0"]["imageUrl"]
    img1_url = img1_url.replace("file://", "")
    img1 = cv2.imread(img1_url, 0)
    img1_resized = cv2.resize(img1, (0, 0), fx=scaling, fy=scaling)
    img1width = img1_resized.shape[0]
    img1height = img1_resized.shape[1]

    # Iterate over the hexagonal points and find a match in the second section
    thedictionary = {}
    for i in range(len(hexgr)):
        if i % 1000 == 0 and i > 0:
            print(i)

        # Find the tile image where the point from the hexagonal is in the first section
        img1_ind = get_closest_index_to_point(hexgr[i], tile_centers1tree)
        if img1_ind is None:
            continue
        if not is_point_in_img(ts1[img1_ind], hexgr[i]):
            continue

        # Get expected point of hexgr[i] in the second section
        img1_offset = get_image_top_left(ts1, img1_ind)
        expected_transform = find_best_mfov_transformation(
            ts1[img1_ind]["mfov"], best_transformations, mfov_centers1)
        img1_template = get_blank_template_from_img_and_point(
            img1width, img1height, template_size,
            (np.array(hexgr[i]) - img1_offset) * scaling)
        if img1_template is None:
            continue
        startx, starty, w, h, not_on_mesh = img1_template
        center_point1 = np.array([startx + w / 2, starty + h / 2
                                  ]) / scaling + img1_offset
        expected_new_center = np.dot(expected_transform,
                                     np.append(center_point1, [1]))[0:2]

        img2_inds = img_matches[img1_ind]
        img2s = get_images_indices_from_indices_and_point(
            ts2, img2_inds, expected_new_center)

        for img2_ind in img2s:
            # Build dictionary here
            if img1_ind in thedictionary:
                newdi = thedictionary[img1_ind]
                if img2_ind in newdi:
                    newdi[img2_ind].append(hexgr[i])
                else:
                    newdi[img2_ind] = []
                    newdi[img2_ind].append(hexgr[i])
            else:
                newdi = {}
                newdi[img2_ind] = []
                newdi[img2_ind].append(hexgr[i])
                thedictionary[img1_ind] = newdi

    with open(out_fname, 'w') as out:
        json.dump(thedictionary, out, indent=4)

    print("Done.")
Esempio n. 3
0
    if args.to_layer != -1:
        if layer > args.to_layer:
            continue

    if layer in skipped_layers:
        continue

    all_layers.append(layer)

    # update the bbox of each section
    #after_bbox_json = os.path.join(after_bbox_dir, "{0}{1}.json".format(tiles_fname_prefix, bbox_suffix))
    #if not os.path.exists(after_bbox_json):
    #    print "Updating bounding box of {0}".format(tiles_fname_prefix)
    #    update_bbox(args.jar_file, tiles_fname, out_dir=after_bbox_dir, out_suffix=bbox_suffix)
    #bbox = read_bbox(after_bbox_json)
    bbox = BoundingBox.read_bbox(tiles_fname)
    if imageWidth is None or imageWidth < (bbox[1] - bbox[0]):
        imageWidth = bbox[1] - bbox[0]
    if imageHeight is None or imageHeight < bbox[3] - bbox[2]:
        imageHeight = bbox[3] - bbox[2]

    if args.render_meshes_first:
        # precompute the transformed meshes of the tiles
        print "Creating meshes of {0}".format(tiles_fname_prefix)
        layer_meshes_dir[layer] = os.path.join(meshes_dir, tiles_fname_prefix)
        if not os.path.exists(layer_meshes_dir[layer]):
            create_dir(layer_meshes_dir[layer])
            create_meshes(tiles_fname, layer_meshes_dir[layer], args.jar_file)

    # create the sift features of these tiles
    print "Computing sift features of {0}".format(tiles_fname_prefix)