示例#1
0
def view_post_pmcc_mfov(pmcc_matches_fname, matches_num, seed, scale,
                        output_dir):

    # Load the preliminary matches
    with open(pmcc_matches_fname, 'r') as data_matches:
        pmcc_matches_data = json.load(data_matches)
    if len(pmcc_matches_data["pointmatches"]) == 0:
        print("No matches were found in pmcc-matching, aborting")
        return

    tiles_fname1 = pmcc_matches_data["tilespec1"]
    tiles_fname2 = pmcc_matches_data["tilespec2"]

    # Read the tilespecs
    ts1 = utils.load_tilespecs(tiles_fname1)
    ts2 = utils.load_tilespecs(tiles_fname2)
    indexed_ts1 = utils.index_tilespec(ts1)
    indexed_ts2 = utils.index_tilespec(ts2)

    # Create the (lazy) renderers for the two sections
    img1_renderer = TilespecAffineRenderer(ts1)
    img2_renderer = TilespecAffineRenderer(ts2)

    scale_transformation = np.array([[scale, 0., 0.], [0., scale, 0.]])
    img1_renderer.add_transformation(scale_transformation)
    img2_renderer.add_transformation(scale_transformation)

    # Find a random number of points
    np.random.seed(seed)
    matches_idxs = np.random.choice(len(pmcc_matches_data["pointmatches"]),
                                    matches_num,
                                    replace=False)
    template_size = int(TEMPLATE_SIZE * scale)
    print("Actual template size: {}".format(template_size))

    utils.create_dir(output_dir)
    # save the matches thumbnails to the output dir
    for idx in matches_idxs:
        # rescale the matches
        p1 = np.array(pmcc_matches_data["pointmatches"][idx]["point1"])
        p2 = np.array(pmcc_matches_data["pointmatches"][idx]["point2"])
        print("Saving match {}: {} and {}".format(idx, p1, p2))
        p1_scaled = p1 * scale
        p2_scaled = p2 * scale
        out_fname_prefix = os.path.join(
            output_dir,
            'pmcc_match_{}-{}_{}-{}'.format(int(p1[0]), int(p1[1]), int(p2[0]),
                                            int(p2[1])))

        # Crop and save
        cropped_img1, _ = img1_renderer.crop(p1_scaled[0] - template_size,
                                             p1_scaled[1] - template_size,
                                             p1_scaled[0] + template_size,
                                             p1_scaled[1] + template_size)
        cv2.imwrite('{}_image1.jpg'.format(out_fname_prefix), cropped_img1)
        cropped_img2, _ = img2_renderer.crop(p2_scaled[0] - template_size,
                                             p2_scaled[1] - template_size,
                                             p2_scaled[0] + template_size,
                                             p2_scaled[1] + template_size)
        cv2.imwrite('{}_image2.jpg'.format(out_fname_prefix), cropped_img2)
示例#2
0
def match_layers_sift_features(tiles_fname1,
                               features_dir1,
                               tiles_fname2,
                               features_dir2,
                               out_fname,
                               conf_fname=None):
    params = utils.conf_from_file(conf_fname,
                                  'MatchLayersSiftFeaturesAndFilter')
    if params is None:
        params = {}
    actual_params = {}
    # Parameters for the matching
    actual_params["max_attempts"] = params.get("max_attempts", 10)
    actual_params["num_filtered_cutoff"] = params.get("num_filtered_cutoff",
                                                      50)
    actual_params["filter_rate_cutoff"] = params.get("filter_rate_cutoff",
                                                     0.25)
    actual_params["ROD_cutoff"] = params.get("ROD_cutoff", 0.92)

    # Parameters for the RANSAC
    actual_params["model_index"] = params.get("model_index", 1)
    actual_params["iterations"] = params.get("iterations", 500)
    actual_params["max_epsilon"] = params.get("max_epsilon", 500.0)
    actual_params["min_inlier_ratio"] = params.get("min_inlier_ratio", 0.01)
    actual_params["min_num_inlier"] = params.get("min_num_inliers", 7)
    actual_params["max_trust"] = params.get("max_trust", 3)

    print("Matching layers: {} and {}".format(tiles_fname1, tiles_fname2))

    starttime = time.clock()

    # Read the tilespecs
    indexed_ts1 = utils.index_tilespec(utils.load_tilespecs(tiles_fname1))
    indexed_ts2 = utils.index_tilespec(utils.load_tilespecs(tiles_fname2))

    num_mfovs1 = len(indexed_ts1)
    num_mfovs2 = len(indexed_ts2)

    # Match the two sections
    retval = analyze2slices(indexed_ts1, indexed_ts2, num_mfovs1, num_mfovs2,
                            features_dir1, features_dir2, actual_params)

    # Save the output
    jsonfile = {}
    jsonfile['tilespec1'] = tiles_fname1
    jsonfile['tilespec2'] = tiles_fname2
    jsonfile['matches'] = retval
    jsonfile['runtime'] = time.clock() - starttime
    with open(out_fname, 'w') as out:
        json.dump(jsonfile, out, indent=4)
    print("Done.")
示例#3
0
def plot_tilespecs(ts_file):
    # Read the tilespecs file
    ts = utils.load_tilespecs(ts_file)
    # Index the tilespecs according to mfov and tile_index (1-based)
    indexed_ts = utils.index_tilespec(ts)

    # Get the centers
    centers = [get_center(indexed_ts[m]) for m in sorted(indexed_ts.keys())]
    max_x_or_y = np.max(centers)

    # Create the figure
    fig = plt.figure()
    fig.suptitle('{} - mfovs'.format(os.path.basename(ts_file)), fontsize=14, fontweight='bold')

    ax = fig.add_subplot(111)


    # plot text at the centers location
    for i, center in enumerate(centers):
        print (i+1), center
        center_x = int(center[0] / max_x_or_y * 1000)
        center_y = int(center[1] / max_x_or_y * 1000)
        ax.text(center_x, center_y, str(i + 1), fontsize=15)

    ax.axis([0, 1000, 0, 1000])
        
    # TODO - plot the boundaries of the mfovs

    # plot the entire graph
    plt.show()

    return fig
def create_surf_features(tiles_fname, out_fname, index, conf_fname=None):

    # load tilespecs files
    tilespecs = utils.load_tilespecs(tiles_fname)
    tilespec = tilespecs[index]

    # load the image
    image_path = tilespec["mipmapLevels"]["0"]["imageUrl"]
    image_path = image_path.replace("file://", "")
    if image_path.endswith(".jp2"):
        img_gray = glymur.Jp2k(image_path)[:]  # load in full resolution
    else:
        img_gray = cv2.imread(image_path, cv2.CV_LOAD_IMAGE_GRAYSCALE)

    print "Computing surf features for image: {}".format(image_path)

    # compute features for the given index
    # detector = cv2.FeatureDetector_create("SIFT")
    # extractor = cv2.DescriptorExtractor_create("SIFT")
    # #print("Detecting keypoints...")
    # kp = detector.detect(img_gray)
    # #print("Computing descriptions...")
    # pts, descs = extractor.compute(img_gray, kp)
    surf = cv2.SURF()
    pts, descs = surf.detectAndCompute(img_gray, None)
    descs = np.array(descs, dtype=np.uint8)

    print "Found {} features".format(len(descs))
    # Save the features

    print "Saving surf features at: {}".format(out_fname)
    with h5py.File(out_fname, 'w') as hf:
        hf.create_dataset("imageUrl",
                          data=np.array(image_path.encode("utf-8"), dtype='S'))
        hf.create_dataset("pts/responses",
                          data=np.array([p.response for p in pts],
                                        dtype=np.float32))
        hf.create_dataset("pts/locations",
                          data=np.array([p.pt for p in pts], dtype=np.float32))
        hf.create_dataset("pts/sizes",
                          data=np.array([p.size for p in pts],
                                        dtype=np.float32))
        hf.create_dataset("pts/octaves",
                          data=np.array([p.octave for p in pts],
                                        dtype=np.float32))
        hf.create_dataset("descs", data=descs)
def create_post_filter_jobs(slayer, filtered_ts_fname, layers_data, jobs,
                            matched_sifts_dir, workspace_dir, output_dir,
                            conf_file_name):

    layer_matched_sifts_intra_dir = os.path.join(
        matched_sifts_dir, os.path.join(layers_data[slayer]['prefix'],
                                        'intra'))
    layer_matched_sifts_inter_dir = os.path.join(
        matched_sifts_dir, os.path.join(layers_data[slayer]['prefix'],
                                        'inter'))
    create_dir(layer_matched_sifts_intra_dir)
    create_dir(layer_matched_sifts_inter_dir)

    # Read the filtered tilespec
    tiles_fname_prefix = os.path.splitext(
        os.path.basename(filtered_ts_fname))[0]
    cur_tilespec = load_tilespecs(filtered_ts_fname)

    mfovs = set()

    for ts in cur_tilespec:
        mfovs.add(ts["mfov"])

    # create the intra matched sifts directories
    for mfov in mfovs:
        mfov_intra_dir = os.path.join(layer_matched_sifts_intra_dir, str(mfov))
        create_dir(mfov_intra_dir)

    # A map between layer to a list of multiple matches
    multiple_match_jobs = {}
    # read every pair of overlapping tiles, and match their sift features
    jobs_match_intra_mfovs = {}
    jobs_match_inter_mfovs = []
    indices = []
    # TODO - use some other method to detect overlapping tiles
    for pair in itertools.combinations(xrange(len(cur_tilespec)), 2):
        idx1 = pair[0]
        idx2 = pair[1]
        ts1 = cur_tilespec[idx1]
        ts2 = cur_tilespec[idx2]
        # if the two tiles intersect, match them
        bbox1 = BoundingBox.fromList(ts1["bbox"])
        bbox2 = BoundingBox.fromList(ts2["bbox"])
        if bbox1.overlap(bbox2):
            imageUrl1 = ts1["mipmapLevels"]["0"]["imageUrl"]
            imageUrl2 = ts2["mipmapLevels"]["0"]["imageUrl"]
            tile_fname1 = os.path.basename(imageUrl1).split('.')[0]
            tile_fname2 = os.path.basename(imageUrl2).split('.')[0]
            index_pair = [
                "{}_{}".format(ts1["mfov"], ts1["tile_index"]),
                "{}_{}".format(ts2["mfov"], ts2["tile_index"])
            ]
            if ts1["mfov"] == ts2["mfov"]:
                # Intra mfov job
                cur_match_dir = os.path.join(layer_matched_sifts_intra_dir,
                                             str(ts1["mfov"]))
            else:
                # Inter mfov job
                cur_match_dir = layer_matched_sifts_inter_dir
            match_json = os.path.join(
                cur_match_dir,
                "{0}_sift_matches_{1}_{2}.json".format(tiles_fname_prefix,
                                                       tile_fname1,
                                                       tile_fname2))
            # match the features of overlapping tiles
            if not os.path.exists(match_json):
                print "Matching sift of tiles: {0} and {1}".format(
                    imageUrl1, imageUrl2)
                # The filter is done, so assumes no dependencies
                dependencies = []

                # Check if the job already exists
                if ts1["mfov"] == ts2["mfov"]:
                    # Intra mfov job
                    if ts1["mfov"] in jobs[slayer]['matched_sifts'][
                            'intra'].keys():
                        job_match = jobs[slayer]['matched_sifts']['intra'][
                            ts1["mfov"]]
                    else:
                        job_match = MatchMultipleSiftFeaturesAndFilter(
                            cur_match_dir,
                            filtered_ts_fname,
                            "intra_l{}_{}".format(slayer, ts1["mfov"]),
                            threads_num=4,
                            wait_time=None,
                            conf_fname=conf_file_name)
                        jobs[slayer]['matched_sifts']['intra'][
                            ts1["mfov"]] = job_match
                else:
                    # Inter mfov job
                    if jobs[slayer]['matched_sifts']['inter'] is None:
                        job_match = MatchMultipleSiftFeaturesAndFilter(
                            cur_match_dir,
                            filtered_ts_fname,
                            "inter_{}".format(slayer),
                            threads_num=4,
                            wait_time=None,
                            conf_fname=conf_file_name)
                        jobs[slayer]['matched_sifts']['inter'] = job_match
                    else:
                        job_match = jobs[slayer]['matched_sifts']['inter']
                job_match.add_job(dependencies,
                                  layers_data[slayer]['sifts'][imageUrl1],
                                  layers_data[slayer]['sifts'][imageUrl2],
                                  match_json, index_pair)

                #jobs[slayer]['matched_sifts'].append(job_match)
            layers_data[slayer]['matched_sifts'].append(match_json)

    # Create a single file that lists all tilespecs and a single file that lists all pmcc matches (the os doesn't support a very long list)
    matches_list_file = os.path.join(
        workspace_dir, "{}_matched_sifts_files.txt".format(tiles_fname_prefix))
    write_list_to_file(matches_list_file, layers_data[slayer]['matched_sifts'])

    # optimize (affine) the 2d layer matches (affine)
    opt_montage_json = os.path.join(
        output_dir, "{0}_montaged.json".format(tiles_fname_prefix))
    if not os.path.exists(opt_montage_json):
        print "Optimizing (affine) layer matches: {0}".format(slayer)
        dependencies = []
        if jobs[slayer]['matched_sifts']['inter'] is not None:
            dependencies.append(jobs[slayer]['matched_sifts']['inter'])
        if jobs[slayer]['matched_sifts']['intra'] is not None and len(
                jobs[slayer]['matched_sifts']['intra']) > 0:
            dependencies.extend(
                jobs[slayer]['matched_sifts']['intra'].values())
        job_opt_montage = OptimizeMontageTransform(dependencies,
                                                   filtered_ts_fname,
                                                   matches_list_file,
                                                   opt_montage_json,
                                                   conf_fname=conf_file_name)
    layers_data[slayer]['optimized_montage'] = opt_montage_json
    # Get all input json files (one per section) into a dictionary {json_fname -> [filtered json fname, sift features file, etc.]}
    json_files = dict(
        (jf, {}) for jf in (glob.glob(os.path.join(args.tiles_dir, '*.json'))))

    skipped_layers = parse_range(args.skip_layers)

    all_layers = []
    jobs = {}
    layers_data = {}

    fixed_tile = 0

    for f in sorted(json_files.keys()):
        tiles_fname_prefix = os.path.splitext(os.path.basename(f))[0]

        cur_tilespec = load_tilespecs(f)

        # read the layer from the file
        layer = None
        for tile in cur_tilespec:
            if tile['layer'] is None:
                print "Error reading layer in one of the tiles in: {0}".format(
                    f)
                sys.exit(1)
            if layer is None:
                layer = int(tile['layer'])
                break
            if layer != tile['layer']:
                print "Error when reading tiles from {0} found inconsistent layers numbers: {1} and {2}".format(
                    f, layer, tile['layer'])
                sys.exit(1)
def match_layers_pmcc_matching(tiles_fname1,
                               tiles_fname2,
                               pre_matches_fname,
                               out_fname,
                               conf_fname=None):
    params = utils.conf_from_file(conf_fname, 'MatchLayersBlockMatching')
    if params is None:
        params = {}
    cv_wrap_module.setNumThreads(1)

    # Parameters for the matching
    hex_spacing = params.get("hexspacing", 500)
    scaling = params.get("scaling", 0.2)
    template_size = params.get("template_size", 200)
    template_size *= scaling

    # Read the tilespecs
    ts1 = utils.load_tilespecs(tiles_fname1)
    ts2 = utils.load_tilespecs(tiles_fname2)
    indexed_ts1 = utils.index_tilespec(ts1)

    # Get the tiles centers for each section
    tile_centers1 = get_tile_centers_from_json(ts1)
    tile_centers1tree = spatial.KDTree(tile_centers1)
    tile_centers2 = get_tile_centers_from_json(ts2)
    mfov_centers1 = get_mfov_centers_from_json(indexed_ts1)

    # Load the preliminary matches
    with open(pre_matches_fname, 'r') as data_matches:
        mfov_pre_matches = json.load(data_matches)

    # Generate an hexagonal grid according to the first section's bounding box
    bb = BoundingBox.read_bbox(tiles_fname1)
    hexgr = generatehexagonalgrid(bb, hex_spacing)

    if len(mfov_pre_matches["matches"]) == 0:
        print("No matches were found in pre-matching")
        return

    best_transformations = get_best_transformations(mfov_pre_matches)
    img_matches = get_img_matches(ts1, tile_centers1, ts2, tile_centers2,
                                  best_transformations, mfov_centers1)

    img1_url = ts1[50]["mipmapLevels"]["0"]["imageUrl"]
    img1_url = img1_url.replace("file://", "")
    img1 = cv2.imread(img1_url, 0)
    img1_resized = cv2.resize(img1, (0, 0), fx=scaling, fy=scaling)
    img1width = img1_resized.shape[0]
    img1height = img1_resized.shape[1]

    # Iterate over the hexagonal points and find a match in the second section
    thedictionary = {}
    for i in range(len(hexgr)):
        if i % 1000 == 0 and i > 0:
            print(i)

        # Find the tile image where the point from the hexagonal is in the first section
        img1_ind = get_closest_index_to_point(hexgr[i], tile_centers1tree)
        if img1_ind is None:
            continue
        if not is_point_in_img(ts1[img1_ind], hexgr[i]):
            continue

        # Get expected point of hexgr[i] in the second section
        img1_offset = get_image_top_left(ts1, img1_ind)
        expected_transform = find_best_mfov_transformation(
            ts1[img1_ind]["mfov"], best_transformations, mfov_centers1)
        img1_template = get_blank_template_from_img_and_point(
            img1width, img1height, template_size,
            (np.array(hexgr[i]) - img1_offset) * scaling)
        if img1_template is None:
            continue
        startx, starty, w, h, not_on_mesh = img1_template
        center_point1 = np.array([startx + w / 2, starty + h / 2
                                  ]) / scaling + img1_offset
        expected_new_center = np.dot(expected_transform,
                                     np.append(center_point1, [1]))[0:2]

        img2_inds = img_matches[img1_ind]
        img2s = get_images_indices_from_indices_and_point(
            ts2, img2_inds, expected_new_center)

        for img2_ind in img2s:
            # Build dictionary here
            if img1_ind in thedictionary:
                newdi = thedictionary[img1_ind]
                if img2_ind in newdi:
                    newdi[img2_ind].append(hexgr[i])
                else:
                    newdi[img2_ind] = []
                    newdi[img2_ind].append(hexgr[i])
            else:
                newdi = {}
                newdi[img2_ind] = []
                newdi[img2_ind].append(hexgr[i])
                thedictionary[img1_ind] = newdi

    with open(out_fname, 'w') as out:
        json.dump(thedictionary, out, indent=4)

    print("Done.")
示例#8
0
def view_pre_pmcc_mfov(pre_matches_fname, targeted_mfov, output_fname, scale):

    # Load the preliminary matches
    with open(pre_matches_fname, 'r') as data_matches:
        mfov_pre_matches = json.load(data_matches)
    if len(mfov_pre_matches["matches"]) == 0:
        print("No matches were found in pre-matching, aborting")
        return

    tiles_fname1 = mfov_pre_matches["tilespec1"]
    tiles_fname2 = mfov_pre_matches["tilespec2"]

    # Read the tilespecs
    ts1 = utils.load_tilespecs(tiles_fname1)
    ts2 = utils.load_tilespecs(tiles_fname2)
    indexed_ts1 = utils.index_tilespec(ts1)
    indexed_ts2 = utils.index_tilespec(ts2)

    sorted_mfovs1 = sorted(indexed_ts1.keys())
    sorted_mfovs2 = sorted(indexed_ts2.keys())

    # Get the tiles centers for each section
    tile_centers1 = get_tile_centers_from_json(ts1)
    tile_centers1tree = spatial.KDTree(tile_centers1)
    tile_centers2 = get_tile_centers_from_json(ts2)
    tile_centers2tree = spatial.KDTree(tile_centers2)
    mfov_centers1 = get_mfov_centers_from_json(indexed_ts1)
    mfov_centers2 = get_mfov_centers_from_json(indexed_ts2)

    best_transformations = get_best_transformations(
        mfov_pre_matches, tiles_fname1, tiles_fname2, mfov_centers1,
        mfov_centers2, sorted_mfovs1, sorted_mfovs2)

    mfov_tiles = indexed_ts1[targeted_mfov]
    tiles_boundaries1 = []
    for tile in mfov_tiles.values():
        p1 = np.array([tile["bbox"][0], tile["bbox"][2]])
        p2 = np.array([tile["bbox"][0], tile["bbox"][3]])
        p3 = np.array([tile["bbox"][1], tile["bbox"][2]])
        p4 = np.array([tile["bbox"][1], tile["bbox"][3]])
        tiles_boundaries1.extend([p1, p2, p3, p4])

    # Create the (lazy) renderers for the two sections
    img1_renderer = TilespecAffineRenderer(indexed_ts1[targeted_mfov].values())

    # Use the mfov exepected transform (from section 1 to section 2) to transform img1
    img1_to_img2_transform = np.array(
        find_best_mfov_transformation(targeted_mfov, best_transformations,
                                      mfov_centers1)[:2])
    img1_renderer.add_transformation(img1_to_img2_transform)

    # Find the relevant tiles from section 2
    section2_tiles_indices = set()

    for p in tiles_boundaries1:
        # Find the tile image where the point from the hexagonal is in the first section
        img1_ind = get_closest_index_to_point(p, tile_centers1tree)
        #print(img1_ind)
        if img1_ind is None:
            continue
        if ts1[img1_ind]["mfov"] != targeted_mfov:
            continue
        if not is_point_in_img(ts1[img1_ind], p):
            continue

        img1_point = p

        # Find the point on img2
        img1_point_on_img2 = np.dot(img1_to_img2_transform[:2, :2],
                                    img1_point) + img1_to_img2_transform[:2, 2]

        # Find the tile that is closest to that point
        img2_ind = get_closest_index_to_point(img1_point_on_img2,
                                              tile_centers2tree)
        #print("img1_ind {}, img2_ind {}".format(img1_ind, img2_ind))
        section2_tiles_indices.add(img2_ind)

    print("section2 tiles (#tiles={}): {}".format(len(section2_tiles_indices),
                                                  section2_tiles_indices))

    # Scale down the rendered images
    scale_transformation = np.array([[scale, 0., 0.], [0., scale, 0.]])
    img1_renderer.add_transformation(scale_transformation)

    img2_renderer = TilespecAffineRenderer(
        [ts2[tile_index] for tile_index in section2_tiles_indices])
    img2_renderer.add_transformation(scale_transformation)

    # render the images
    start_time = time.time()
    img1, start_point1 = img1_renderer.render()
    print("image 1 rendered in {} seconds".format(time.time() - start_time))
    start_time = time.time()
    img2, start_point2 = img2_renderer.render()
    print("image 2 rendered in {} seconds".format(time.time() - start_time))

    end_point1 = start_point1 + np.array([img1.shape[1], img1.shape[0]])
    end_point2 = start_point2 + np.array([img2.shape[1], img2.shape[0]])

    # merge the 2 images into a single image using 2 channels (red and green)
    start_point = np.minimum(start_point1, start_point2)
    end_point = np.maximum(end_point1, end_point2)

    #    out_shape = ((end_point - start_point)[1], (end_point - start_point)[0])
    #    #img_out = np.zeros(((end_point - start_point)[1], (end_point - start_point)[0], 3), dtype=np.uint8)
    #    #img_out[start_point1[1] - start_point[1]:img1.shape[0] + start_point1[1] - start_point[1],
    #    #        start_point1[0] - start_point[0]:img1.shape[1] + start_point1[0] - start_point[0],
    #    #        1] = img1
    #    #img_out[start_point2[1] - start_point[1]:img2.shape[0] + start_point2[1] - start_point[1],
    #    #        start_point2[0] - start_point[0]:img2.shape[1] + start_point2[0] - start_point[0],
    #    #        2] = img2
    #    img_out = np.zeros(((end_point - start_point)[1], (end_point - start_point)[0]), dtype=np.uint8)
    #    img_out[start_point1[1] - start_point[1]:img1.shape[0] + start_point1[1] - start_point[1],
    #            start_point1[0] - start_point[0]:img1.shape[1] + start_point1[0] - start_point[0]] = img1
    #    img_out[start_point2[1] - start_point[1]:img2.shape[0] + start_point2[1] - start_point[1],
    #            start_point2[0] - start_point[0]:img2.shape[1] + start_point2[0] - start_point[0]] -= img2
    #
    #    cv2.imwrite(output_fname, img_out)
    save_animated_gif(img1, start_point1, img2, start_point2, output_fname)
    die
def match_layers_pmcc_matching(tiles_fname1, tiles_fname2, pre_matches_fname, out_fname, conf_fname=None):
    params = utils.conf_from_file(conf_fname, 'MatchLayersBlockMatching')
    if params is None:
        params = {}

    # Parameters for the matching
    hex_spacing = params.get("hexspacing", 1500)
    scaling = params.get("scaling", 0.2)
    template_size = params.get("template_size", 200)

    template_size *= scaling
    print("Actual template size (after scaling): {}".format(template_size))

    # Parameters for PMCC filtering
    min_corr = params.get("min_correlation", 0.2)
    max_curvature = params.get("maximal_curvature_ratio", 10)
    max_rod = params.get("maximal_ROD", 0.9)

    print(params)
    debug_save_matches = False
    if "debug_save_matches" in params.keys():
        print("Debug mode - on")
        debug_save_matches = True
    if debug_save_matches:
        # Create a debug directory
        import datetime
        debug_dir = os.path.join(os.path.dirname(out_fname), 'debug_matches_{}'.format(datetime.datetime.now().isoformat()))
        os.mkdir(debug_dir)

    print("Block-Matching+PMCC layers: {} and {}".format(tiles_fname1, tiles_fname2))

    # Read the tilespecs
    ts1 = utils.load_tilespecs(tiles_fname1)
    ts2 = utils.load_tilespecs(tiles_fname2)
    indexed_ts1 = utils.index_tilespec(ts1)
    indexed_ts2 = utils.index_tilespec(ts2)

    # num_mfovs1 = len(indexed_ts1)
    # num_mfovs2 = len(indexed_ts2)

    # Get the tiles centers for each section
    tile_centers1 = get_tile_centers_from_json(ts1)
    tile_centers1tree = spatial.KDTree(tile_centers1)
    tile_centers2 = get_tile_centers_from_json(ts2)
    mfov_centers1 = get_mfov_centers_from_json(indexed_ts1)

    # Load the preliminary matches
    with open(pre_matches_fname, 'r') as data_matches:
        mfov_pre_matches = json.load(data_matches)

    out_jsonfile = {}
    out_jsonfile['tilespec1'] = tiles_fname1
    out_jsonfile['tilespec2'] = tiles_fname2

    # Generate an hexagonal grid according to the first section's bounding box
    bb = BoundingBox.read_bbox(tiles_fname1)
    hexgr = generatehexagonalgrid(bb, hex_spacing)

    if len(mfov_pre_matches["matches"]) == 0:
        print("No matches were found in pre-matching, saving an empty matches output file")
        out_jsonfile['runtime'] = 0
        out_jsonfile['mesh'] = hexgr
        finalpointmatches = []
        out_jsonfile['pointmatches'] = finalpointmatches
        with open(out_fname, 'w') as out:
            json.dump(out_jsonfile, out, indent=4)
        return

#    global datadir
#    global imgdir
#    global workdir
#    global outdir
#    script, slice1, slice2, conffile = sys.argv
#    slice1 = int(slice1)
#    slice2 = int(slice2)
#    slicestring1 = ("%03d" % slice1)
#    slicestring2 = ("%03d" % slice2)
#    with open(conffile) as conf_file:
#        conf = json.load(conf_file)
#    datadir = conf["driver_args"]["datadir"]
#    imgdir = conf["driver_args"]["imgdir"]
#    workdir = conf["driver_args"]["workdir"]
#    outdir = conf["driver_args"]["workdir"]

    starttime = time.clock()

    # Compute the best transformations for each of the mfovs in section 1 (the transformations to section 2)
    best_transformations = get_best_transformations(mfov_pre_matches)

    img_matches = get_img_matches(ts1, tile_centers1, ts2, tile_centers2, best_transformations, mfov_centers1)

    point_matches = []

    actual_matches_num = 0
    # Iterate over the hexagonal points and find a match in the second section
    print("Matching {} points between the two sections".format(len(hexgr)))
    for i in range(len(hexgr)):
        if i % 1000 == 0 and i > 0:
            print(i)
        # Find the tile image where the point from the hexagonal is in the first section
        img1_ind = get_closest_index_to_point(hexgr[i], tile_centers1tree)
        if img1_ind is None:
            continue
        if not is_point_in_img(ts1[img1_ind], hexgr[i]):
            continue

        # Load the image, and get the template from it
        img1_url = ts1[img1_ind]["mipmapLevels"]["0"]["imageUrl"]
        img1_url = img1_url.replace("file://", "")
        img1 = cv2.imread(img1_url, 0)
        img1_resized = cv2.resize(img1, (0, 0), fx=scaling, fy=scaling)
        img1_offset = get_image_top_left(ts1, img1_ind)
        expected_transform = find_best_mfov_transformation(ts1[img1_ind]["mfov"], best_transformations, mfov_centers1)

        img1_template = get_template_from_img_and_point(img1_resized, template_size, (np.array(hexgr[i]) - img1_offset) * scaling)
        if img1_template is None:
            continue

        # Find the template coordinates
        chosen_template, startx, starty, not_on_mesh = img1_template
        # print("img1_template starts at: {}, {} and original point should have been {}".format(startx, starty, np.array(hexgr[i]) - img1_offset))
        w, h = chosen_template.shape
        center_point1 = np.array([startx + w / 2, starty + h / 2]) / scaling + img1_offset
        # print("center_point1", center_point1)
        expected_new_center = np.dot(expected_transform, np.append(center_point1, [1]))[0:2]

        ro, col = chosen_template.shape
        rad2deg = -180 / math.pi
        # TODO - assumes only rigid transformation, should be more general
        angle_of_rot = rad2deg * math.atan2(expected_transform[1][0], expected_transform[0][0])
        rotation_matrix = cv2.getRotationMatrix2D((h / 2, w / 2), angle_of_rot, 1)
        rotated_temp1 = cv2.warpAffine(chosen_template, rotation_matrix, (col, ro))
        xaa = int(w / 2.9)
        rotated_and_cropped_temp1 = rotated_temp1[(w / 2 - xaa):(w / 2 + xaa), (h / 2 - xaa):(h / 2 + xaa)]
        neww, newh = rotated_and_cropped_temp1.shape

        # TODO - assumes a single transformation, but there might be more
        img1_model = models.Transforms.from_tilespec(ts1[img1_ind]["transforms"][0])
        img1_center_point = img1_model.apply(np.array([starty + h / 2, startx + w / 2]) / scaling)  # + imgoffset1

        # Get the images from section 2 around the expected matching location
        # (img1ind, img2inds) = imgmatches[findindwithinmatches(imgmatches, img1ind)]
        img2_inds = img_matches[img1_ind]
        img2s = get_images_from_indices_and_point(ts2, img2_inds, expected_new_center)
        actual_matches_num += 1
        for (img2, img2_ind) in img2s:
            img2_resized = cv2.resize(img2, (0, 0), fx=scaling/1, fy=scaling/1)
            # imgoffset2 = get_image_top_left(slice2, img2mfov, img2num, data2)
            img2_offset = get_image_top_left(ts2, img2_ind)

            # template1topleft = np.array([startx, starty]) / scaling + imgoffset1
            result, reason = PMCC_filter_example.PMCC_match(img2_resized, rotated_and_cropped_temp1, min_correlation=min_corr, maximal_curvature_ratio=max_curvature, maximal_ROD=max_rod)
            if result is not None:
                reasonx, reasony = reason
                # img1topleft = np.array([startx, starty]) / scaling + imgoffset1
                # img2topleft = np.array(reason) / scaling + imgoffset2
                # TODO - assumes a single transformation, but there might be more
                img2_model = models.Transforms.from_tilespec(ts2[img2_ind]["transforms"][0])
                img2_center_point = img2_model.apply(np.array([reasony + newh / 2, reasonx + neww / 2]) / scaling)  # + imgoffset2
                point_matches.append((img1_center_point, img2_center_point, not_on_mesh))
                if debug_save_matches:
                    debug_out_fname1 = os.path.join(debug_dir, "debug_match_sec1{}-{}_sec2{}-{}_image1.png".format(hexgr[i][0], hexgr[i][1], reasonx, reasony))
                    debug_out_fname2 = os.path.join(debug_dir, "debug_match_sec1{}-{}_sec2{}-{}_image2.png".format(hexgr[i][0], hexgr[i][1], reasonx, reasony))
                    cv2.imwrite(debug_out_fname1, rotated_and_cropped_temp1)
                    temp1_final_sizex = rotated_and_cropped_temp1.shape[0]
                    temp1_final_sizey = rotated_and_cropped_temp1.shape[1]
                    img2_cut_out = img2_resized[reasonx:(reasonx + temp1_final_sizex), reasony:(reasony + temp1_final_sizey)]
                    cv2.imwrite(debug_out_fname2, img2_cut_out)
                '''
                temp1finalsizex = rotatedandcroppedtemp1.shape[0]
                temp1finalsizey = rotatedandcroppedtemp1.shape[1]
                imgout = np.zeros((1230,630), np.uint8)
                pikoo = np.array([startx + w / 2, starty + h / 2])
                # cv2.circle(img1resized, (int(pikoo[0]), int(pikoo[1])), 15, (0,0,255), -1)
                imgout[0:545,0:626] = img1resized
                # cv2.circle(img2resized, (int(reasony + temp1finalsize / 2), int(reasonx + temp1finalsize / 2)), 15, (0,0,255), -1)
                imgout[545:1090,0:626] = img2resized
                imgout[1090:(1090 + temp1finalsizex),0:temp1finalsizey] = rotatedandcroppedtemp1
                img2cutout = img2resized[reasonx:(reasonx + temp1finalsizex), reasony:(reasony + temp1finalsizey)]
                imgout[1090:(1090 + temp1finalsizey), (temp1finalsizey + 10):(temp1finalsizex + 10 + temp1finalsizex)] = img2cutout
                finalimgout = imgout[1090:(1090 + temp1finalsizex), 0:300]
                cv2.imwrite("/home/raahilsha/billy/ImageComparison#" + str(i) + ".png",finalimgout)
                '''

    print("Found {} matches out of possible {} points (on section points: {})".format(len(point_matches), len(hexgr), actual_matches_num))
    # Save the output
    print("Saving output to: {}".format(out_fname))
    out_jsonfile['runtime'] = time.clock() - starttime
    out_jsonfile['mesh'] = hexgr

    final_point_matches = []
    for pm in point_matches:
        p1, p2, nmesh = pm
        record = {}
        record['point1'] = p1.tolist()
        record['point2'] = p2.tolist()
        record['isvirtualpoint'] = nmesh
        final_point_matches.append(record)

    out_jsonfile['pointmatches'] = final_point_matches
    with open(out_fname, 'w') as out:
        json.dump(out_jsonfile, out, indent=4)
    print("Done.")