Exemple #1
0
def view_post_pmcc_mfov(pmcc_matches_fname, matches_num, seed, scale,
                        output_dir):

    # Load the preliminary matches
    with open(pmcc_matches_fname, 'r') as data_matches:
        pmcc_matches_data = json.load(data_matches)
    if len(pmcc_matches_data["pointmatches"]) == 0:
        print("No matches were found in pmcc-matching, aborting")
        return

    tiles_fname1 = pmcc_matches_data["tilespec1"]
    tiles_fname2 = pmcc_matches_data["tilespec2"]

    # Read the tilespecs
    ts1 = utils.load_tilespecs(tiles_fname1)
    ts2 = utils.load_tilespecs(tiles_fname2)
    indexed_ts1 = utils.index_tilespec(ts1)
    indexed_ts2 = utils.index_tilespec(ts2)

    # Create the (lazy) renderers for the two sections
    img1_renderer = TilespecAffineRenderer(ts1)
    img2_renderer = TilespecAffineRenderer(ts2)

    scale_transformation = np.array([[scale, 0., 0.], [0., scale, 0.]])
    img1_renderer.add_transformation(scale_transformation)
    img2_renderer.add_transformation(scale_transformation)

    # Find a random number of points
    np.random.seed(seed)
    matches_idxs = np.random.choice(len(pmcc_matches_data["pointmatches"]),
                                    matches_num,
                                    replace=False)
    template_size = int(TEMPLATE_SIZE * scale)
    print("Actual template size: {}".format(template_size))

    utils.create_dir(output_dir)
    # save the matches thumbnails to the output dir
    for idx in matches_idxs:
        # rescale the matches
        p1 = np.array(pmcc_matches_data["pointmatches"][idx]["point1"])
        p2 = np.array(pmcc_matches_data["pointmatches"][idx]["point2"])
        print("Saving match {}: {} and {}".format(idx, p1, p2))
        p1_scaled = p1 * scale
        p2_scaled = p2 * scale
        out_fname_prefix = os.path.join(
            output_dir,
            'pmcc_match_{}-{}_{}-{}'.format(int(p1[0]), int(p1[1]), int(p2[0]),
                                            int(p2[1])))

        # Crop and save
        cropped_img1, _ = img1_renderer.crop(p1_scaled[0] - template_size,
                                             p1_scaled[1] - template_size,
                                             p1_scaled[0] + template_size,
                                             p1_scaled[1] + template_size)
        cv2.imwrite('{}_image1.jpg'.format(out_fname_prefix), cropped_img1)
        cropped_img2, _ = img2_renderer.crop(p2_scaled[0] - template_size,
                                             p2_scaled[1] - template_size,
                                             p2_scaled[0] + template_size,
                                             p2_scaled[1] + template_size)
        cv2.imwrite('{}_image2.jpg'.format(out_fname_prefix), cropped_img2)
Exemple #2
0
def match_layers_sift_features(tiles_fname1,
                               features_dir1,
                               tiles_fname2,
                               features_dir2,
                               out_fname,
                               conf_fname=None):
    params = utils.conf_from_file(conf_fname,
                                  'MatchLayersSiftFeaturesAndFilter')
    if params is None:
        params = {}
    actual_params = {}
    # Parameters for the matching
    actual_params["max_attempts"] = params.get("max_attempts", 10)
    actual_params["num_filtered_cutoff"] = params.get("num_filtered_cutoff",
                                                      50)
    actual_params["filter_rate_cutoff"] = params.get("filter_rate_cutoff",
                                                     0.25)
    actual_params["ROD_cutoff"] = params.get("ROD_cutoff", 0.92)

    # Parameters for the RANSAC
    actual_params["model_index"] = params.get("model_index", 1)
    actual_params["iterations"] = params.get("iterations", 500)
    actual_params["max_epsilon"] = params.get("max_epsilon", 500.0)
    actual_params["min_inlier_ratio"] = params.get("min_inlier_ratio", 0.01)
    actual_params["min_num_inlier"] = params.get("min_num_inliers", 7)
    actual_params["max_trust"] = params.get("max_trust", 3)

    print("Matching layers: {} and {}".format(tiles_fname1, tiles_fname2))

    starttime = time.clock()

    # Read the tilespecs
    indexed_ts1 = utils.index_tilespec(utils.load_tilespecs(tiles_fname1))
    indexed_ts2 = utils.index_tilespec(utils.load_tilespecs(tiles_fname2))

    num_mfovs1 = len(indexed_ts1)
    num_mfovs2 = len(indexed_ts2)

    # Match the two sections
    retval = analyze2slices(indexed_ts1, indexed_ts2, num_mfovs1, num_mfovs2,
                            features_dir1, features_dir2, actual_params)

    # Save the output
    jsonfile = {}
    jsonfile['tilespec1'] = tiles_fname1
    jsonfile['tilespec2'] = tiles_fname2
    jsonfile['matches'] = retval
    jsonfile['runtime'] = time.clock() - starttime
    with open(out_fname, 'w') as out:
        json.dump(jsonfile, out, indent=4)
    print("Done.")
Exemple #3
0
def plot_tilespecs(ts_file):
    # Read the tilespecs file
    ts = utils.load_tilespecs(ts_file)
    # Index the tilespecs according to mfov and tile_index (1-based)
    indexed_ts = utils.index_tilespec(ts)

    # Get the centers
    centers = [get_center(indexed_ts[m]) for m in sorted(indexed_ts.keys())]
    max_x_or_y = np.max(centers)

    # Create the figure
    fig = plt.figure()
    fig.suptitle('{} - mfovs'.format(os.path.basename(ts_file)), fontsize=14, fontweight='bold')

    ax = fig.add_subplot(111)


    # plot text at the centers location
    for i, center in enumerate(centers):
        print (i+1), center
        center_x = int(center[0] / max_x_or_y * 1000)
        center_y = int(center[1] / max_x_or_y * 1000)
        ax.text(center_x, center_y, str(i + 1), fontsize=15)

    ax.axis([0, 1000, 0, 1000])
        
    # TODO - plot the boundaries of the mfovs

    # plot the entire graph
    plt.show()

    return fig
def match_multiple_sift_features_and_filter(tiles_file,
                                            features_h5_filename_list1,
                                            features_h5_filename_list2,
                                            output_json_filenames,
                                            index_pairs,
                                            processes_num=10):
    parameters = {}
    rod = parameters.get("rod", 0.92)
    iterations = parameters.get("iterations", 1000)
    max_epsilon = parameters.get("maxEpsilon", 100.0)
    min_inlier_ratio = parameters.get("minInlierRatio", 0.01)
    min_num_inlier = parameters.get("minNumInlier", 7)
    model_index = parameters.get("modelIndex", 1)
    max_trust = parameters.get("maxTrust", 3)
    det_delta = parameters.get("delDelta", 0.3)

    assert (len(index_pairs) == len(features_h5_filename_list1))
    assert (len(index_pairs) == len(features_h5_filename_list2))
    assert (len(index_pairs) == len(output_json_filenames))
    logger.info("Create a pool of {} processed".format(processes_num))
    pool = mp.Pool(processes=processes_num)

    indexed_tilespecs = utils.index_tilespec(
        utils.load_tile_specifications(tiles_file))
    pool_results = []
    for i, index_pair in enumerate(index_pairs):
        features_h5_filename1 = features_h5_filename_list1[i]
        features_h5_filename2 = features_h5_filename_list2[i]
        output_json_filename = output_json_filenames[i]

        logger.info(
            "Matching sift features of tilespecs file: {}, indices: {}".format(
                tiles_file, index_pair))
        if index_pair[0] not in indexed_tilespecs:
            logger.info(
                "The given tile_index {0} was not found in the tilespec: {1}".
                format(index_pair[0], tiles_file))
            continue
        if index_pair[1] not in indexed_tilespecs:
            logger.info(
                "The given tile_index {0} was not found in the tilespec: {1}".
                format(index_pair[1], tiles_file))
            continue

        tilespec1 = indexed_tilespecs[index_pair[0]]
        tilespec2 = indexed_tilespecs[index_pair[1]]

        res = pool.apply_async(
            match_single_pair,
            (tilespec1, tilespec2, features_h5_filename1,
             features_h5_filename2, output_json_filename, rod, iterations,
             max_epsilon, min_inlier_ratio, min_num_inlier, model_index,
             max_trust, det_delta))
        pool_results.append(res)

        for res in pool_results:
            res.get()

        pool.close()
        pool.join()
def match_single_sift_features_and_filter(tiles_file, feature_h5_filename1,
                                          feature_h5_filename2,
                                          output_json_filename, index_pair):
    parameters = {}
    rod = parameters.get("rod", 0.92)
    iterations = parameters.get("iterations", 1000)
    max_epsilon = parameters.get("maxEpsilon", 100.0)
    min_inlier_ratio = parameters.get("minInlierRatio", 0.01)
    min_num_inlier = parameters.get("minNumInlier", 7)
    model_index = parameters.get("modelIndex", 1)
    max_trust = parameters.get("maxTrust", 3)
    det_delta = parameters.get("delDelta", 0.3)

    logger.info(
        "Matching sift features of tilespecs file: {}, indices: {}".format(
            tiles_file, index_pair))
    indexed_tilespecs = utils.index_tilespec(
        utils.load_tile_specifications(tiles_file))
    if index_pair[0] not in indexed_tilespecs:
        logger.info(
            "The given tile_index {0} was not found in the tilespec: {1}".
            format(index_pair[0], tiles_file))
        return
    if index_pair[1] not in indexed_tilespecs:
        logger.info(
            "The given tile_index {0} was not found in the tilespec: {1}".
            format(index_pair[1], tiles_file))
        return

    tilespec1 = indexed_tilespecs[index_pair[0]]
    tilespec2 = indexed_tilespecs[index_pair[1]]

    match_single_pair(tilespec1, tilespec2, feature_h5_filename1,
                      feature_h5_filename2, output_json_filename, rod,
                      iterations, max_epsilon, min_inlier_ratio,
                      min_num_inlier, model_index, max_trust, det_delta)
def match_layers_pmcc_matching(tiles_fname1,
                               tiles_fname2,
                               pre_matches_fname,
                               out_fname,
                               conf_fname=None):
    params = utils.conf_from_file(conf_fname, 'MatchLayersBlockMatching')
    if params is None:
        params = {}
    cv_wrap_module.setNumThreads(1)

    # Parameters for the matching
    hex_spacing = params.get("hexspacing", 500)
    scaling = params.get("scaling", 0.2)
    template_size = params.get("template_size", 200)
    template_size *= scaling

    # Read the tilespecs
    ts1 = utils.load_tilespecs(tiles_fname1)
    ts2 = utils.load_tilespecs(tiles_fname2)
    indexed_ts1 = utils.index_tilespec(ts1)

    # Get the tiles centers for each section
    tile_centers1 = get_tile_centers_from_json(ts1)
    tile_centers1tree = spatial.KDTree(tile_centers1)
    tile_centers2 = get_tile_centers_from_json(ts2)
    mfov_centers1 = get_mfov_centers_from_json(indexed_ts1)

    # Load the preliminary matches
    with open(pre_matches_fname, 'r') as data_matches:
        mfov_pre_matches = json.load(data_matches)

    # Generate an hexagonal grid according to the first section's bounding box
    bb = BoundingBox.read_bbox(tiles_fname1)
    hexgr = generatehexagonalgrid(bb, hex_spacing)

    if len(mfov_pre_matches["matches"]) == 0:
        print("No matches were found in pre-matching")
        return

    best_transformations = get_best_transformations(mfov_pre_matches)
    img_matches = get_img_matches(ts1, tile_centers1, ts2, tile_centers2,
                                  best_transformations, mfov_centers1)

    img1_url = ts1[50]["mipmapLevels"]["0"]["imageUrl"]
    img1_url = img1_url.replace("file://", "")
    img1 = cv2.imread(img1_url, 0)
    img1_resized = cv2.resize(img1, (0, 0), fx=scaling, fy=scaling)
    img1width = img1_resized.shape[0]
    img1height = img1_resized.shape[1]

    # Iterate over the hexagonal points and find a match in the second section
    thedictionary = {}
    for i in range(len(hexgr)):
        if i % 1000 == 0 and i > 0:
            print(i)

        # Find the tile image where the point from the hexagonal is in the first section
        img1_ind = get_closest_index_to_point(hexgr[i], tile_centers1tree)
        if img1_ind is None:
            continue
        if not is_point_in_img(ts1[img1_ind], hexgr[i]):
            continue

        # Get expected point of hexgr[i] in the second section
        img1_offset = get_image_top_left(ts1, img1_ind)
        expected_transform = find_best_mfov_transformation(
            ts1[img1_ind]["mfov"], best_transformations, mfov_centers1)
        img1_template = get_blank_template_from_img_and_point(
            img1width, img1height, template_size,
            (np.array(hexgr[i]) - img1_offset) * scaling)
        if img1_template is None:
            continue
        startx, starty, w, h, not_on_mesh = img1_template
        center_point1 = np.array([startx + w / 2, starty + h / 2
                                  ]) / scaling + img1_offset
        expected_new_center = np.dot(expected_transform,
                                     np.append(center_point1, [1]))[0:2]

        img2_inds = img_matches[img1_ind]
        img2s = get_images_indices_from_indices_and_point(
            ts2, img2_inds, expected_new_center)

        for img2_ind in img2s:
            # Build dictionary here
            if img1_ind in thedictionary:
                newdi = thedictionary[img1_ind]
                if img2_ind in newdi:
                    newdi[img2_ind].append(hexgr[i])
                else:
                    newdi[img2_ind] = []
                    newdi[img2_ind].append(hexgr[i])
            else:
                newdi = {}
                newdi[img2_ind] = []
                newdi[img2_ind].append(hexgr[i])
                thedictionary[img1_ind] = newdi

    with open(out_fname, 'w') as out:
        json.dump(thedictionary, out, indent=4)

    print("Done.")
Exemple #7
0
def view_pre_pmcc_mfov(pre_matches_fname, targeted_mfov, output_fname, scale):

    # Load the preliminary matches
    with open(pre_matches_fname, 'r') as data_matches:
        mfov_pre_matches = json.load(data_matches)
    if len(mfov_pre_matches["matches"]) == 0:
        print("No matches were found in pre-matching, aborting")
        return

    tiles_fname1 = mfov_pre_matches["tilespec1"]
    tiles_fname2 = mfov_pre_matches["tilespec2"]

    # Read the tilespecs
    ts1 = utils.load_tilespecs(tiles_fname1)
    ts2 = utils.load_tilespecs(tiles_fname2)
    indexed_ts1 = utils.index_tilespec(ts1)
    indexed_ts2 = utils.index_tilespec(ts2)

    sorted_mfovs1 = sorted(indexed_ts1.keys())
    sorted_mfovs2 = sorted(indexed_ts2.keys())

    # Get the tiles centers for each section
    tile_centers1 = get_tile_centers_from_json(ts1)
    tile_centers1tree = spatial.KDTree(tile_centers1)
    tile_centers2 = get_tile_centers_from_json(ts2)
    tile_centers2tree = spatial.KDTree(tile_centers2)
    mfov_centers1 = get_mfov_centers_from_json(indexed_ts1)
    mfov_centers2 = get_mfov_centers_from_json(indexed_ts2)

    best_transformations = get_best_transformations(
        mfov_pre_matches, tiles_fname1, tiles_fname2, mfov_centers1,
        mfov_centers2, sorted_mfovs1, sorted_mfovs2)

    mfov_tiles = indexed_ts1[targeted_mfov]
    tiles_boundaries1 = []
    for tile in mfov_tiles.values():
        p1 = np.array([tile["bbox"][0], tile["bbox"][2]])
        p2 = np.array([tile["bbox"][0], tile["bbox"][3]])
        p3 = np.array([tile["bbox"][1], tile["bbox"][2]])
        p4 = np.array([tile["bbox"][1], tile["bbox"][3]])
        tiles_boundaries1.extend([p1, p2, p3, p4])

    # Create the (lazy) renderers for the two sections
    img1_renderer = TilespecAffineRenderer(indexed_ts1[targeted_mfov].values())

    # Use the mfov exepected transform (from section 1 to section 2) to transform img1
    img1_to_img2_transform = np.array(
        find_best_mfov_transformation(targeted_mfov, best_transformations,
                                      mfov_centers1)[:2])
    img1_renderer.add_transformation(img1_to_img2_transform)

    # Find the relevant tiles from section 2
    section2_tiles_indices = set()

    for p in tiles_boundaries1:
        # Find the tile image where the point from the hexagonal is in the first section
        img1_ind = get_closest_index_to_point(p, tile_centers1tree)
        #print(img1_ind)
        if img1_ind is None:
            continue
        if ts1[img1_ind]["mfov"] != targeted_mfov:
            continue
        if not is_point_in_img(ts1[img1_ind], p):
            continue

        img1_point = p

        # Find the point on img2
        img1_point_on_img2 = np.dot(img1_to_img2_transform[:2, :2],
                                    img1_point) + img1_to_img2_transform[:2, 2]

        # Find the tile that is closest to that point
        img2_ind = get_closest_index_to_point(img1_point_on_img2,
                                              tile_centers2tree)
        #print("img1_ind {}, img2_ind {}".format(img1_ind, img2_ind))
        section2_tiles_indices.add(img2_ind)

    print("section2 tiles (#tiles={}): {}".format(len(section2_tiles_indices),
                                                  section2_tiles_indices))

    # Scale down the rendered images
    scale_transformation = np.array([[scale, 0., 0.], [0., scale, 0.]])
    img1_renderer.add_transformation(scale_transformation)

    img2_renderer = TilespecAffineRenderer(
        [ts2[tile_index] for tile_index in section2_tiles_indices])
    img2_renderer.add_transformation(scale_transformation)

    # render the images
    start_time = time.time()
    img1, start_point1 = img1_renderer.render()
    print("image 1 rendered in {} seconds".format(time.time() - start_time))
    start_time = time.time()
    img2, start_point2 = img2_renderer.render()
    print("image 2 rendered in {} seconds".format(time.time() - start_time))

    end_point1 = start_point1 + np.array([img1.shape[1], img1.shape[0]])
    end_point2 = start_point2 + np.array([img2.shape[1], img2.shape[0]])

    # merge the 2 images into a single image using 2 channels (red and green)
    start_point = np.minimum(start_point1, start_point2)
    end_point = np.maximum(end_point1, end_point2)

    #    out_shape = ((end_point - start_point)[1], (end_point - start_point)[0])
    #    #img_out = np.zeros(((end_point - start_point)[1], (end_point - start_point)[0], 3), dtype=np.uint8)
    #    #img_out[start_point1[1] - start_point[1]:img1.shape[0] + start_point1[1] - start_point[1],
    #    #        start_point1[0] - start_point[0]:img1.shape[1] + start_point1[0] - start_point[0],
    #    #        1] = img1
    #    #img_out[start_point2[1] - start_point[1]:img2.shape[0] + start_point2[1] - start_point[1],
    #    #        start_point2[0] - start_point[0]:img2.shape[1] + start_point2[0] - start_point[0],
    #    #        2] = img2
    #    img_out = np.zeros(((end_point - start_point)[1], (end_point - start_point)[0]), dtype=np.uint8)
    #    img_out[start_point1[1] - start_point[1]:img1.shape[0] + start_point1[1] - start_point[1],
    #            start_point1[0] - start_point[0]:img1.shape[1] + start_point1[0] - start_point[0]] = img1
    #    img_out[start_point2[1] - start_point[1]:img2.shape[0] + start_point2[1] - start_point[1],
    #            start_point2[0] - start_point[0]:img2.shape[1] + start_point2[0] - start_point[0]] -= img2
    #
    #    cv2.imwrite(output_fname, img_out)
    save_animated_gif(img1, start_point1, img2, start_point2, output_fname)
    die
def main(jsonfile):
    with open(jsonfile, 'r') as data_file1:
        data1 = json.load(data_file1)

    ts1_fname = data1["tilespec1"].replace("file://", "")

    with open(ts1_fname, 'r') as ts_file1:
        ts1 = json.load(ts_file1)
        ts1_indexed = utils.index_tilespec(ts1)

    # The mfovs list in the match file must be a subset of the mfovs list in the tilespec
    mfov1_to_center2 = {}
    for mfov_match in data1["matches"]:
        mfov1_to_center2[mfov_match["mfov1"]] = np.array(
            mfov_match["section2_center"])
    centers2 = [
        mfov1_to_center2[mfov1] for mfov1 in sorted(mfov1_to_center2.keys())
    ]

    mfovs1_bb = [
        BoundingBox.read_bbox_from_ts(ts1_indexed[mfov].values())
        for mfov in sorted(mfov1_to_center2.keys())
    ]
    centers1 = [
        np.array([(bb.from_x + bb.to_x) / 2.0, (bb.from_y + bb.to_y) / 2.0])
        for bb in mfovs1_bb
    ]

    pointmatches = []
    for i in range(0, len(centers1)):
        pointmatches.append((centers1[i], centers2[i]))
    if (len(pointmatches) == 0):
        return

    point1s = map(list, zip(*pointmatches))[0]
    point1s = map(lambda x: np.matrix(x).T, point1s)
    point2s = map(list, zip(*pointmatches))[1]
    point2s = map(lambda x: np.matrix(x).T, point2s)
    centroid1 = [
        np.array(point1s)[:, 0].mean(),
        np.array(point1s)[:, 1].mean()
    ]
    centroid2 = [
        np.array(point2s)[:, 0].mean(),
        np.array(point2s)[:, 1].mean()
    ]
    h = np.matrix(np.zeros((2, 2)))
    for i in range(0, len(point1s)):
        sumpart = (np.matrix(point1s[i]) - centroid1).dot(
            (np.matrix(point2s[i]) - centroid2).T)
        h = h + sumpart
    U, S, Vt = np.linalg.svd(h)
    R = Vt.T.dot(U.T)
    plt.figure()
    for i in range(0, len(pointmatches)):
        point1, point2 = pointmatches[i]
        point1 = np.matrix(point1 - centroid1).dot(R.T).tolist()[0]
        point2 = point2 - centroid2
        plt.plot([point1[0], point2[0]], [point1[1], point2[1]])
        plt.scatter(point1[0], point1[1])
        axis('equal')
def match_layers_pmcc_matching(tiles_fname1, tiles_fname2, pre_matches_fname, out_fname, conf_fname=None):
    params = utils.conf_from_file(conf_fname, 'MatchLayersBlockMatching')
    if params is None:
        params = {}

    # Parameters for the matching
    hex_spacing = params.get("hexspacing", 1500)
    scaling = params.get("scaling", 0.2)
    template_size = params.get("template_size", 200)

    template_size *= scaling
    print("Actual template size (after scaling): {}".format(template_size))

    # Parameters for PMCC filtering
    min_corr = params.get("min_correlation", 0.2)
    max_curvature = params.get("maximal_curvature_ratio", 10)
    max_rod = params.get("maximal_ROD", 0.9)

    print(params)
    debug_save_matches = False
    if "debug_save_matches" in params.keys():
        print("Debug mode - on")
        debug_save_matches = True
    if debug_save_matches:
        # Create a debug directory
        import datetime
        debug_dir = os.path.join(os.path.dirname(out_fname), 'debug_matches_{}'.format(datetime.datetime.now().isoformat()))
        os.mkdir(debug_dir)

    print("Block-Matching+PMCC layers: {} and {}".format(tiles_fname1, tiles_fname2))

    # Read the tilespecs
    ts1 = utils.load_tilespecs(tiles_fname1)
    ts2 = utils.load_tilespecs(tiles_fname2)
    indexed_ts1 = utils.index_tilespec(ts1)
    indexed_ts2 = utils.index_tilespec(ts2)

    # num_mfovs1 = len(indexed_ts1)
    # num_mfovs2 = len(indexed_ts2)

    # Get the tiles centers for each section
    tile_centers1 = get_tile_centers_from_json(ts1)
    tile_centers1tree = spatial.KDTree(tile_centers1)
    tile_centers2 = get_tile_centers_from_json(ts2)
    mfov_centers1 = get_mfov_centers_from_json(indexed_ts1)

    # Load the preliminary matches
    with open(pre_matches_fname, 'r') as data_matches:
        mfov_pre_matches = json.load(data_matches)

    out_jsonfile = {}
    out_jsonfile['tilespec1'] = tiles_fname1
    out_jsonfile['tilespec2'] = tiles_fname2

    # Generate an hexagonal grid according to the first section's bounding box
    bb = BoundingBox.read_bbox(tiles_fname1)
    hexgr = generatehexagonalgrid(bb, hex_spacing)

    if len(mfov_pre_matches["matches"]) == 0:
        print("No matches were found in pre-matching, saving an empty matches output file")
        out_jsonfile['runtime'] = 0
        out_jsonfile['mesh'] = hexgr
        finalpointmatches = []
        out_jsonfile['pointmatches'] = finalpointmatches
        with open(out_fname, 'w') as out:
            json.dump(out_jsonfile, out, indent=4)
        return

#    global datadir
#    global imgdir
#    global workdir
#    global outdir
#    script, slice1, slice2, conffile = sys.argv
#    slice1 = int(slice1)
#    slice2 = int(slice2)
#    slicestring1 = ("%03d" % slice1)
#    slicestring2 = ("%03d" % slice2)
#    with open(conffile) as conf_file:
#        conf = json.load(conf_file)
#    datadir = conf["driver_args"]["datadir"]
#    imgdir = conf["driver_args"]["imgdir"]
#    workdir = conf["driver_args"]["workdir"]
#    outdir = conf["driver_args"]["workdir"]

    starttime = time.clock()

    # Compute the best transformations for each of the mfovs in section 1 (the transformations to section 2)
    best_transformations = get_best_transformations(mfov_pre_matches)

    img_matches = get_img_matches(ts1, tile_centers1, ts2, tile_centers2, best_transformations, mfov_centers1)

    point_matches = []

    actual_matches_num = 0
    # Iterate over the hexagonal points and find a match in the second section
    print("Matching {} points between the two sections".format(len(hexgr)))
    for i in range(len(hexgr)):
        if i % 1000 == 0 and i > 0:
            print(i)
        # Find the tile image where the point from the hexagonal is in the first section
        img1_ind = get_closest_index_to_point(hexgr[i], tile_centers1tree)
        if img1_ind is None:
            continue
        if not is_point_in_img(ts1[img1_ind], hexgr[i]):
            continue

        # Load the image, and get the template from it
        img1_url = ts1[img1_ind]["mipmapLevels"]["0"]["imageUrl"]
        img1_url = img1_url.replace("file://", "")
        img1 = cv2.imread(img1_url, 0)
        img1_resized = cv2.resize(img1, (0, 0), fx=scaling, fy=scaling)
        img1_offset = get_image_top_left(ts1, img1_ind)
        expected_transform = find_best_mfov_transformation(ts1[img1_ind]["mfov"], best_transformations, mfov_centers1)

        img1_template = get_template_from_img_and_point(img1_resized, template_size, (np.array(hexgr[i]) - img1_offset) * scaling)
        if img1_template is None:
            continue

        # Find the template coordinates
        chosen_template, startx, starty, not_on_mesh = img1_template
        # print("img1_template starts at: {}, {} and original point should have been {}".format(startx, starty, np.array(hexgr[i]) - img1_offset))
        w, h = chosen_template.shape
        center_point1 = np.array([startx + w / 2, starty + h / 2]) / scaling + img1_offset
        # print("center_point1", center_point1)
        expected_new_center = np.dot(expected_transform, np.append(center_point1, [1]))[0:2]

        ro, col = chosen_template.shape
        rad2deg = -180 / math.pi
        # TODO - assumes only rigid transformation, should be more general
        angle_of_rot = rad2deg * math.atan2(expected_transform[1][0], expected_transform[0][0])
        rotation_matrix = cv2.getRotationMatrix2D((h / 2, w / 2), angle_of_rot, 1)
        rotated_temp1 = cv2.warpAffine(chosen_template, rotation_matrix, (col, ro))
        xaa = int(w / 2.9)
        rotated_and_cropped_temp1 = rotated_temp1[(w / 2 - xaa):(w / 2 + xaa), (h / 2 - xaa):(h / 2 + xaa)]
        neww, newh = rotated_and_cropped_temp1.shape

        # TODO - assumes a single transformation, but there might be more
        img1_model = models.Transforms.from_tilespec(ts1[img1_ind]["transforms"][0])
        img1_center_point = img1_model.apply(np.array([starty + h / 2, startx + w / 2]) / scaling)  # + imgoffset1

        # Get the images from section 2 around the expected matching location
        # (img1ind, img2inds) = imgmatches[findindwithinmatches(imgmatches, img1ind)]
        img2_inds = img_matches[img1_ind]
        img2s = get_images_from_indices_and_point(ts2, img2_inds, expected_new_center)
        actual_matches_num += 1
        for (img2, img2_ind) in img2s:
            img2_resized = cv2.resize(img2, (0, 0), fx=scaling/1, fy=scaling/1)
            # imgoffset2 = get_image_top_left(slice2, img2mfov, img2num, data2)
            img2_offset = get_image_top_left(ts2, img2_ind)

            # template1topleft = np.array([startx, starty]) / scaling + imgoffset1
            result, reason = PMCC_filter_example.PMCC_match(img2_resized, rotated_and_cropped_temp1, min_correlation=min_corr, maximal_curvature_ratio=max_curvature, maximal_ROD=max_rod)
            if result is not None:
                reasonx, reasony = reason
                # img1topleft = np.array([startx, starty]) / scaling + imgoffset1
                # img2topleft = np.array(reason) / scaling + imgoffset2
                # TODO - assumes a single transformation, but there might be more
                img2_model = models.Transforms.from_tilespec(ts2[img2_ind]["transforms"][0])
                img2_center_point = img2_model.apply(np.array([reasony + newh / 2, reasonx + neww / 2]) / scaling)  # + imgoffset2
                point_matches.append((img1_center_point, img2_center_point, not_on_mesh))
                if debug_save_matches:
                    debug_out_fname1 = os.path.join(debug_dir, "debug_match_sec1{}-{}_sec2{}-{}_image1.png".format(hexgr[i][0], hexgr[i][1], reasonx, reasony))
                    debug_out_fname2 = os.path.join(debug_dir, "debug_match_sec1{}-{}_sec2{}-{}_image2.png".format(hexgr[i][0], hexgr[i][1], reasonx, reasony))
                    cv2.imwrite(debug_out_fname1, rotated_and_cropped_temp1)
                    temp1_final_sizex = rotated_and_cropped_temp1.shape[0]
                    temp1_final_sizey = rotated_and_cropped_temp1.shape[1]
                    img2_cut_out = img2_resized[reasonx:(reasonx + temp1_final_sizex), reasony:(reasony + temp1_final_sizey)]
                    cv2.imwrite(debug_out_fname2, img2_cut_out)
                '''
                temp1finalsizex = rotatedandcroppedtemp1.shape[0]
                temp1finalsizey = rotatedandcroppedtemp1.shape[1]
                imgout = np.zeros((1230,630), np.uint8)
                pikoo = np.array([startx + w / 2, starty + h / 2])
                # cv2.circle(img1resized, (int(pikoo[0]), int(pikoo[1])), 15, (0,0,255), -1)
                imgout[0:545,0:626] = img1resized
                # cv2.circle(img2resized, (int(reasony + temp1finalsize / 2), int(reasonx + temp1finalsize / 2)), 15, (0,0,255), -1)
                imgout[545:1090,0:626] = img2resized
                imgout[1090:(1090 + temp1finalsizex),0:temp1finalsizey] = rotatedandcroppedtemp1
                img2cutout = img2resized[reasonx:(reasonx + temp1finalsizex), reasony:(reasony + temp1finalsizey)]
                imgout[1090:(1090 + temp1finalsizey), (temp1finalsizey + 10):(temp1finalsizex + 10 + temp1finalsizex)] = img2cutout
                finalimgout = imgout[1090:(1090 + temp1finalsizex), 0:300]
                cv2.imwrite("/home/raahilsha/billy/ImageComparison#" + str(i) + ".png",finalimgout)
                '''

    print("Found {} matches out of possible {} points (on section points: {})".format(len(point_matches), len(hexgr), actual_matches_num))
    # Save the output
    print("Saving output to: {}".format(out_fname))
    out_jsonfile['runtime'] = time.clock() - starttime
    out_jsonfile['mesh'] = hexgr

    final_point_matches = []
    for pm in point_matches:
        p1, p2, nmesh = pm
        record = {}
        record['point1'] = p1.tolist()
        record['point2'] = p2.tolist()
        record['isvirtualpoint'] = nmesh
        final_point_matches.append(record)

    out_jsonfile['pointmatches'] = final_point_matches
    with open(out_fname, 'w') as out:
        json.dump(out_jsonfile, out, indent=4)
    print("Done.")