コード例 #1
0
ファイル: graph_search.py プロジェクト: rhasan/algorithms
def main():
    
    (g,geo_locations) = input_graph_undirected(INPUT_GRAPH_LOCATION)
    #g.pretty_print()
    p = ProblemShortestPath(g,g.node(0),g.node(1))

    #uniform cost search
    sw1 = StopWatch()
    (u_cost, result_path) =  uniform_cost_search(p)
    el1 = sw1.elapsed_milliseconds()

    print "Uniform cost search"
    print "Solution:",u_cost
    print "Path:", result_path
    print "Time:", el1

    #A* search
    p.init_huristic(geo_locations)
    sw1.reset()
    (a_cost, result_path) = a_star(p)
    el1 = sw1.elapsed_milliseconds()
    print "===================="
    print "A * search"
    print "Solution:",a_cost
    print "Path:", result_path
    print "Time:", el1    

    #A* search
    sw1.reset()
    beam_size = 3
    (a_cost, result_path) = a_star_beam_search(p,beam_size)
    el1 = sw1.elapsed_milliseconds()
    print "===================="
    print "A * beam search"
    print "Beam size:", beam_size
    print "Solution:",a_cost
    print "Path:", result_path
    print "Time:", el1    
コード例 #2
0
ファイル: graph_search.py プロジェクト: rhasan/algorithms
def main():

    (g, geo_locations) = input_graph_undirected(INPUT_GRAPH_LOCATION)
    #g.pretty_print()
    p = ProblemShortestPath(g, g.node(0), g.node(1))

    #uniform cost search
    sw1 = StopWatch()
    (u_cost, result_path) = uniform_cost_search(p)
    el1 = sw1.elapsed_milliseconds()

    print "Uniform cost search"
    print "Solution:", u_cost
    print "Path:", result_path
    print "Time:", el1

    #A* search
    p.init_huristic(geo_locations)
    sw1.reset()
    (a_cost, result_path) = a_star(p)
    el1 = sw1.elapsed_milliseconds()
    print "===================="
    print "A * search"
    print "Solution:", a_cost
    print "Path:", result_path
    print "Time:", el1

    #A* search
    sw1.reset()
    beam_size = 3
    (a_cost, result_path) = a_star_beam_search(p, beam_size)
    el1 = sw1.elapsed_milliseconds()
    print "===================="
    print "A * beam search"
    print "Beam size:", beam_size
    print "Solution:", a_cost
    print "Path:", result_path
    print "Time:", el1
コード例 #3
0
    def ensemble_models_id(self,
                           single_id,
                           set_type='train',
                           model='stage1_unet',
                           show=True,
                           verbose=True):
        self._load_ensembles(model)
        d = self._get_cell_data(single_id, set_type)
        logger.debug('image size=%dx%d' % (d.img_h, d.img_w))

        total_model_size = len(self.ensembles['rcnn']) + len(
            self.ensembles['unet'])
        logger.debug('total_model_size=%d rcnn=%d unet=%d' %
                     (total_model_size, len(
                         self.ensembles['rcnn']), len(self.ensembles['unet'])))

        rcnn_instances = []
        rcnn_scores = []

        # TODO : RCNN Ensemble
        rcnn_ensemble = False
        for idx, data in enumerate(self.ensembles['rcnn']):
            if set_type == 'train':
                instances, scores = data['valid_instances'].get(
                    single_id, (None, None))
                rcnn_ensemble = True
            else:
                # TODO
                ls = data['test_instances'].get(single_id, None)
                if ls is None:
                    instances = scores = None
                else:
                    instances = [x[0] for x in ls]
                    scores = [x[1] for x in ls]
                    rcnn_ensemble = True
                    logger.debug('rcnn # instances = %d' % len(instances))

            if instances is None:
                logger.warning('Not found id=%s in RCNN %d Model' %
                               (single_id, idx + 1))
                continue

            rcnn_instances.extend(
                [instance[:d.img_h, :d.img_w] for instance in instances])
            rcnn_scores.extend([
                s * HyperParams.get().rcnn_score_rescale for s in scores
            ])  # rescale scores

        total_instances = []
        total_scores = []

        # TODO : UNet Ensemble
        for idx, data in enumerate(self.ensembles['unet']):
            if set_type == 'train':
                instances, scores = data['valid_instances'].get(
                    single_id, (None, None))
            else:
                instances, scores = data['test_instances'].get(
                    single_id, (None, None))

            if instances is None:
                logger.warning('Not found id=%s in UNet %d Model' %
                               (single_id, idx + 1))
                continue

            total_instances.extend(instances)
            total_scores.extend(scores)

            # if single_id in ['646f5e00a2db3add97fb80a83ef3c07edd1b17b1b0d47c2bd650cdcab9f322c0']:
            # take too long
            #     logger.warning('no ensemble id=%s' % single_id)
            #     break

        watch = StopWatch()
        watch.start()
        logger.debug('voting+ size=%d' % len(total_instances))

        # TODO : Voting?
        voting_th = HyperParams.get().ensemble_voting_th

        rects = [get_rect_of_mask(a) for a in total_instances]
        voted = []
        for i, x in enumerate(total_instances):
            voted.append(
                filter_by_voting(
                    (x, total_instances, voting_th, 0.3, rects[i], rects)))

        total_instances = list(compress(total_instances, voted))
        total_scores = list(compress(total_scores, voted))

        watch.stop()
        logger.debug('voting elapsed=%.5f' % watch.get_elapsed())
        watch.reset()

        # nms
        watch.start()
        logger.debug('nms+ size=%d' % len(total_instances))
        instances, scores = Network.nms(
            total_instances,
            total_scores,
            None,
            thresh=HyperParams.get().ensemble_nms_iou)
        watch.stop()
        logger.debug('nms elapsed=%.5f' % watch.get_elapsed())
        watch.reset()

        # high threshold if not exists in RCNN
        if rcnn_ensemble:
            voted = []
            for i, x in enumerate(instances):
                voted.append(
                    filter_by_voting((x, rcnn_instances, 1, 0.3, None, None)))

            new_instances = []
            new_scores = []
            for instance, score, v in zip(instances, scores, voted):
                if v:
                    new_instances.append(instance)
                    new_scores.append(score)
                elif score > HyperParams.get().ensemble_th_no_rcnn:
                    new_instances.append(instance)
                    new_scores.append(score)
            instances, scores = new_instances, new_scores

        # nms with rcnn
        instances = instances + rcnn_instances
        scores = scores + rcnn_scores
        watch.start()
        logger.debug('nms_rcnn+ size=%d' % len(instances))
        instances, scores = Network.nms(
            instances, scores, None, thresh=HyperParams.get().ensemble_nms_iou)
        watch.stop()
        logger.debug('nms_rcnn- size=%d elapsed=%.5f' %
                     (len(instances), watch.get_elapsed()))
        watch.reset()

        # remove overlaps
        logger.debug('remove overlaps+')
        sorted_idx = [
            i[0] for i in sorted(enumerate(instances),
                                 key=lambda x: get_size_of_mask(x[1]),
                                 reverse=False)
        ]
        instances = [instances[x] for x in sorted_idx]
        scores = [scores[x] for x in sorted_idx]

        instances2 = [
            ndimage.morphology.binary_fill_holes(i) for i in instances
        ]
        instances2, scores2 = Network.remove_overlaps(instances2, scores)

        # remove deleted instances
        logger.debug('remove deleted+ size=%d' % len(instances2))
        voted = []
        for x in instances2:
            voted.append(filter_by_voting((x, instances, 1, 0.75, None, None)))
        instances = list(compress(instances2, voted))
        scores = list(compress(scores2, voted))

        # TODO : Filter by score?
        logger.debug('filter by score+ size=%d' % len(instances))
        score_filter_th = HyperParams.get().ensemble_score_th
        if score_filter_th > 0.0:
            logger.debug('filter_by_score=%.3f' % score_filter_th)
            instances = [
                i for i, s in zip(instances, scores) if s > score_filter_th
            ]
            scores = [
                s for i, s in zip(instances, scores) if s > score_filter_th
            ]

        logger.debug('finishing+ size=%d' % len(instances))
        image = d.image(is_gray=False)
        score_desc = []
        labels = []
        if len(d.masks) > 0:  # has label masks
            labels = list(d.multi_masks(transpose=False))
            tp, fp, fn = get_multiple_metric(thr_list, instances, labels)

            logger.debug('instances=%d, labels=%d' %
                         (len(instances), len(labels)))
            for i, thr in enumerate(thr_list):
                desc = 'score=%.3f, tp=%d, fp=%d, fn=%d --- iou %.2f' % (
                    (tp / (tp + fp + fn))[i], tp[i], fp[i], fn[i], thr)
                logger.debug(desc)
                score_desc.append(desc)
            score = np.mean(tp / (tp + fp + fn))
            logger.debug('score=%.3f, tp=%.1f, fp=%.1f, fn=%.1f --- mean' %
                         (score, np.mean(tp), np.mean(fp), np.mean(fn)))
        else:
            score = 0.0

        if show:
            img_vis = Network.visualize(image, labels, instances, None)
            cv2.imshow('valid', img_vis)
            cv2.waitKey(0)
        else:
            return {
                'instance_scores': scores,
                'score': score,
                'image': image,
                'instances': instances,
                'labels': labels,
                'score_desc': score_desc
            }
コード例 #4
0
    def single_id(self,
                  model,
                  checkpoint,
                  single_id,
                  set_type='train',
                  show=True,
                  verbose=True):
        if model:
            self.set_network(model)
            self.network.build()

        self.init_session()
        if checkpoint:
            saver = tf.train.Saver()
            saver.restore(self.sess, checkpoint)
            if verbose:
                logger.info('restored from checkpoint, %s' % checkpoint)

        d = self._get_cell_data(single_id, set_type)
        h, w = d.img.shape[:2]
        shortedge = min(h, w)
        logger.debug('%s image size=(%d x %d)' % (single_id, w, h))

        watch = StopWatch()
        logger.debug('preprocess+')
        d = self.network.preprocess(d)

        image = d.image(is_gray=False)

        total_instances = []
        total_scores = []
        total_from_set = []
        cutoff_instance_max = HyperParams.get().post_cutoff_max_th
        cutoff_instance_avg = HyperParams.get().post_cutoff_avg_th

        watch.start()
        logger.debug('inference at default scale+ %dx%d' % (w, h))
        inference_result = self.network.inference(
            self.sess,
            image,
            cutoff_instance_max=cutoff_instance_max,
            cutoff_instance_avg=cutoff_instance_avg)
        instances_pre, scores_pre = inference_result[
            'instances'], inference_result['scores']
        instances_pre = Network.resize_instances(instances_pre,
                                                 target_size=(h, w))
        total_instances = total_instances + instances_pre
        total_scores = total_scores + scores_pre
        total_from_set = [1] * len(instances_pre)
        watch.stop()
        logger.debug('inference- elapsed=%.5f' % watch.get_elapsed())
        watch.reset()

        logger.debug('inference with flips+')
        # re-inference using flip
        for flip_orientation in range(2):
            flipped = cv2.flip(image.copy(), flip_orientation)
            inference_result = self.network.inference(
                self.sess,
                flipped,
                cutoff_instance_max=cutoff_instance_max,
                cutoff_instance_avg=cutoff_instance_avg)
            instances_flip, scores_flip = inference_result[
                'instances'], inference_result['scores']
            instances_flip = [
                cv2.flip(instance.astype(np.uint8), flip_orientation)
                for instance in instances_flip
            ]
            instances_flip = Network.resize_instances(instances_flip,
                                                      target_size=(h, w))

            total_instances = total_instances + instances_flip
            total_scores = total_scores + scores_flip
            total_from_set = total_from_set + [2 + flip_orientation
                                               ] * len(instances_flip)

        watch.stop()
        logger.debug('inference- elapsed=%.5f' % watch.get_elapsed())
        watch.reset()
        logger.debug('inference with scaling+flips+')

        # re-inference after rescale image
        def inference_with_scale(image, resize_target):
            image = cv2.resize(image.copy(),
                               None,
                               None,
                               resize_target,
                               resize_target,
                               interpolation=cv2.INTER_AREA)
            inference_result = self.network.inference(
                self.sess,
                image,
                cutoff_instance_max=cutoff_instance_max,
                cutoff_instance_avg=cutoff_instance_avg)
            instances_rescale, scores_rescale = inference_result[
                'instances'], inference_result['scores']

            instances_rescale = Network.resize_instances(instances_rescale,
                                                         target_size=(h, w))
            return instances_rescale, scores_rescale

        max_mask = get_max_size_of_masks(instances_pre)
        logger.debug('max_mask=%d' % max_mask)
        resize_target = HyperParams.get().test_aug_scale_t / max_mask
        resize_target = min(HyperParams.get().test_aug_scale_max,
                            resize_target)
        resize_target = max(HyperParams.get().test_aug_scale_min,
                            resize_target)
        import math
        # resize_target = 2.0 / (1.0 + math.exp(-1.5*(resize_target - 1.0)))
        # resize_target = max(0.5, resize_target)
        resize_target = max(228.0 / shortedge, resize_target)
        # if resize_target > 1.0 and min(w, h) > 1000:
        #     logger.debug('too large image, no resize')
        #     resize_target = 0.8
        logger.debug('resize_target=%.4f' % resize_target)

        instances_rescale, scores_rescale = inference_with_scale(
            image, resize_target)
        total_instances = total_instances + instances_rescale
        total_scores = total_scores + scores_rescale
        total_from_set = total_from_set + [4] * len(instances_rescale)

        # re-inference using flip + rescale
        for flip_orientation in range(2):
            flipped = cv2.flip(image.copy(), flip_orientation)
            instances_flip, scores_flip = inference_with_scale(
                flipped, resize_target)
            instances_flip = [
                cv2.flip(instance.astype(np.uint8), flip_orientation)
                for instance in instances_flip
            ]
            instances_flip = Network.resize_instances(instances_flip,
                                                      target_size=(h, w))

            total_instances = total_instances + instances_flip
            total_scores = total_scores + scores_flip
            total_from_set = total_from_set + [5 + flip_orientation
                                               ] * len(instances_flip)

        watch.stop()
        logger.debug('inference- elapsed=%.5f' % watch.get_elapsed())
        watch.reset()

        watch.start()
        logger.debug('voting+ size=%d' % len(total_instances))

        # TODO : Voting?
        voting_th = HyperParams.get().post_voting_th
        rects = [get_rect_of_mask(a) for a in total_instances]
        voted = []
        for i, x in enumerate(total_instances):
            voted.append(
                filter_by_voting(
                    (x, total_instances, voting_th, 0.3, rects[i], rects)))

        total_instances = list(compress(total_instances, voted))
        total_scores = list(compress(total_scores, voted))
        total_from_set = list(compress(total_from_set, voted))

        watch.stop()
        logger.debug('voting elapsed=%.5f' % watch.get_elapsed())
        watch.reset()

        # nms
        watch.start()
        logger.debug('nms+ size=%d' % len(total_instances))
        instances, scores = Network.nms(
            total_instances,
            total_scores,
            total_from_set,
            thresh=HyperParams.get().test_aug_nms_iou)
        watch.stop()
        logger.debug('nms elapsed=%.5f' % watch.get_elapsed())
        watch.reset()

        # remove overlaps
        logger.debug('remove overlaps+')
        sorted_idx = [
            i[0] for i in sorted(enumerate(instances),
                                 key=lambda x: get_size_of_mask(x[1]),
                                 reverse=True)
        ]
        instances = [instances[x] for x in sorted_idx]
        scores = [scores[x] for x in sorted_idx]

        instances = [
            ndimage.morphology.binary_fill_holes(i) for i in instances
        ]
        instances, scores = Network.remove_overlaps(instances, scores)

        # TODO : Filter by score?
        # logger.debug('filter by score+')
        # score_filter_th = HyperParams.get().post_filter_th
        # if score_filter_th > 0.0:
        #     logger.debug('filter_by_score=%.3f' % score_filter_th)
        #     instances = [i for i, s in zip(instances, scores) if s > score_filter_th]
        #     scores = [s for i, s in zip(instances, scores) if s > score_filter_th]

        logger.debug('finishing+')
        image = cv2.resize(image, (w, h), interpolation=cv2.INTER_AREA)
        score_desc = []
        labels = []
        if len(d.masks) > 0:  # has label masks
            labels = list(d.multi_masks(transpose=False))
            labels = Network.resize_instances(labels, target_size=(h, w))
            tp, fp, fn = get_multiple_metric(thr_list, instances, labels)

            if verbose:
                logger.info('instances=%d, reinf(%.3f) labels=%d' %
                            (len(instances), resize_target, len(labels)))
            for i, thr in enumerate(thr_list):
                desc = 'score=%.3f, tp=%d, fp=%d, fn=%d --- iou %.2f' % (
                    (tp / (tp + fp + fn))[i], tp[i], fp[i], fn[i], thr)
                if verbose:
                    logger.info(desc)
                score_desc.append(desc)
            score = np.mean(tp / (tp + fp + fn))
            if verbose:
                logger.info('score=%.3f, tp=%.1f, fp=%.1f, fn=%.1f --- mean' %
                            (score, np.mean(tp), np.mean(fp), np.mean(fn)))
        else:
            score = 0.0

        if show:
            img_vis = Network.visualize(image, labels, instances, None)
            cv2.imshow('valid', img_vis)
            cv2.waitKey(0)
        if not model:
            return {
                'instance_scores': scores,
                'score': score,
                'image': image,
                'instances': instances,
                'labels': labels,
                'score_desc': score_desc
            }
コード例 #5
0
# This file is used solely for quick tests. It can be, at any time, completely omitted from the project.

from kdfinder import KDFinder
import numpy as np
import matplotlib.pyplot as plt
import helper as h
from stopwatch import StopWatch
from kdtree import BucketedKDTree, KDTree

a = np.random.rand(5000, 2)

sw = StopWatch()
obt = BucketedKDTree(a, optimized=True)
sw.reset('Build time for Optimized BKD')
bt = BucketedKDTree(a)
sw.reset('Build time for BKD')
t = KDTree(a)
sw.reset('Build time for regular KD')
for value in a:
    if not obt.has(value):
        print 'Missing Value!!'
sw.reset('Traversal time for Optimized BKD')
for value in a:
    if not bt.has(value):
        print 'Missing Value!!'
sw.reset('Traversal time for BKD')
for value in a:
    if not t.has(value):
        print 'Missing Value!!'
sw.reset('Traversal time for regular KD')
コード例 #6
0
    def load_queries(self):
        if not os.path.exists(DIRECTORY):
            os.makedirs(DIRECTORY)

        data_split = int(TOTAL_QUERY*0.6)
        validation_split = int(TOTAL_QUERY*0.2)
        test_split = int(TOTAL_QUERY*0.2)
        print "data_split", data_split
        print "validation_split", validation_split
        print "test_split", test_split

        f = open(DBPEDIA_QUERY_LOG,'rb')
        fq = open(DIRECTORY+"x_query.txt",'w')
        ft = open(DIRECTORY+"y_time.txt",'w')
        ff = open(DIRECTORY+"x_features.txt",'w')
        x_f_csv = csv.writer(ff)
        sparql = SPARQLWrapper(DBPEDIA_ENDPOINT)
        f_extractor = FeatureExtractor()
        
        
        
        sw1 = StopWatch()
        sw2 = StopWatch()
        print_log_split = int(TOTAL_QUERY/10)
        
      

        count =0 
        for line in f:
            if count%print_log_split==0:
                print count," queries processed in ",sw2.elapsed_seconds()," seconds"
        
            if(count>=TOTAL_QUERY):
                break

            if count == data_split:
                fq.close()
                ft.close()
                ff.close()
                fq = open(DIRECTORY+"xval_query.txt",'w')
                ft = open(DIRECTORY+"yval_time.txt",'w')
                ff = open(DIRECTORY+"xval_features.txt",'w')
                x_f_csv = csv.writer(ff)
            elif count == (data_split+validation_split):
                fq.close()
                ft.close()
                ff.close()
                fq = open(DIRECTORY+"xtest_query.txt",'w')
                ft = open(DIRECTORY+"ytest_time.txt",'w')
                ff = open(DIRECTORY+"xtest_features.txt",'w')
                x_f_csv = csv.writer(ff)


            try:
                row = line.split()
                query_log = row[6][1:-1]
                #print query_log
                par = urlparse.parse_qs(urlparse.urlparse(query_log).query)
                #util.url_decode(row[6])
                sparql_query = par['query'][0]

                if sparql._parseQueryType(sparql_query) != SELECT:
                    continue

                #print sparql_query
                #print row

                sparql_query = f_extractor.get_dbp_sparql(sparql_query)

                #print sparql_query
                


                feature_vector = f_extractor.get_features(sparql_query)

                if feature_vector == None:
                    print "feature vector not found"
                    continue



                sparql.setQuery(sparql_query)
                sparql.setReturnFormat(JSON)

                sw1.reset()
                results = sparql.query().convert()
                elapsed = sw1.elapsed_milliseconds()

                result_rows = len(results["results"]["bindings"])

                # if result_rows == 0:
                #     continue

                # print "QUERY =", sparql_query
                # print "feature vector:",feature_vector
                # print elapsed, "seconds"                
                # print results
                # print "rows", result_rows
                # print "-----------------------"

                fq.write(query_log+'\n')
                ft.write(str(elapsed)+'\n')
                x_f_csv.writerow(feature_vector)
                count += 1
            except Exception as inst:
                print "Exception", inst

            
        
        f.close()
        fq.close()
        ft.close()
        ff.close()
        print count, "queries processed"
コード例 #7
0
def run():
    sw = StopWatch()

    a = np.random.rand(n, 2) * s_range + s_min
    b = np.random.rand(m, 2) * s_range + s_min

    xa = a[:, 0]
    ya = a[:, 1]

    xb = b[:, 0]
    yb = b[:, 1]

    xlim = np.asarray(
        [np.min([xa.min(), xb.min()]),
         np.max([xa.max(), xb.max()])])
    ylim = np.asarray(
        [np.min([ya.min(), yb.min()]),
         np.max([ya.max(), yb.max()])])

    exp = (xlim[1] - xlim[0]) * x_exp
    xlim += [-exp, exp]
    exp = (ylim[1] - ylim[0]) * y_exp
    ylim += [-exp, exp]

    plt.xlim(xlim)
    plt.ylim(ylim)

    plt.plot(xa,
             ya,
             a_style,
             alpha=set_opacity,
             zorder=a_order,
             markersize=a_size)
    plt.plot(xb,
             yb,
             b_style,
             alpha=set_opacity,
             zorder=b_order,
             markersize=b_size)

    total_nv = 0
    total_kd = 0
    total_bkd = 0
    total_obkd = 0

    print "Initializing naive module..."
    sw.start()
    nf = NaiveFinder(a)
    bt_nv = sw.elapsed()
    total_nv += bt_nv
    sw.reset()

    print "Initializing K-D Tree module..."
    sw.start()
    kdf = KDFinder(a)
    bt_kd = sw.elapsed()
    total_kd += bt_kd
    sw.lap()

    print "Initializing Bucketed K-D Tree module..."
    sw.start()
    bkdf = BKDFinder(a)
    bt_bkd = sw.elapsed()
    total_bkd += bt_bkd
    sw.lap()

    print "Initializing Optimized Bucketed K-D Tree module..."
    sw.start()
    obkdf = BKDFinder(a)
    bt_obkd = sw.elapsed()
    total_obkd += bt_obkd
    sw.lap()

    for i in range(m):
        print i
        p1 = b[i, :]

        sw.start()
        found = nf.find_closest_m(p1, K)
        total_nv += sw.elapsed()

        def check_mismatch(h_f, finder):
            # If there's a mismatch with ground-truth values, save K-D search steps for debugging
            if not (np.asarray(found)[:, 1] == np.asarray(h_f)[:, 1]).all():
                print "Mismatch", np.asarray(found)[:, 1], np.asarray(h_f)[:,
                                                                           1]
                for element in found:
                    p2 = element[0]
                    plt.plot([p1[0], p2[0]], [p1[1], p2[1]],
                             color=ground_truth_col,
                             zorder=2,
                             linewidth=2)
                finder.setup_plot(xlim, ylim, True)
                finder.find_closest_m(p1, 5)
                sw.start()
                finder.find_closest_m(p1, 5)
                for element in h_f:
                    p2 = element[0]
                    plt.plot([p1[0], p2[0]], [p1[1], p2[1]],
                             color=test_col,
                             zorder=3,
                             linewidth=1.5)
                print "Done"
                plt.show()

        sw.start()
        kdfound = kdf.find_closest_m(p1, K)
        total_kd += sw.elapsed()

        check_mismatch(kdfound, kdf)

        sw.start()
        bkdfound = bkdf.find_closest_m(p1, K)
        total_bkd += sw.elapsed()

        check_mismatch(bkdfound, bkdf)

        sw.start()
        obkdfound = obkdf.find_closest_m(p1, K)
        total_obkd += sw.elapsed()

        check_mismatch(obkdfound, obkdf)

    found = nf.find_closest_m(p1, 5)
    for element in found:
        p2 = element[0]
        h1 = plt.plot([p1[0], p2[0]], [p1[1], p2[1]],
                      color=ground_truth_col,
                      zorder=2,
                      linewidth=2)

    kdf.setup_plot(xlim, ylim, save_steps)
    kdfound = kdf.find_closest_m(p1, 5)
    for element in kdfound:
        p2 = element[0]
        h2 = plt.plot([p1[0], p2[0]], [p1[1], p2[1]],
                      color=test_col,
                      zorder=3,
                      linewidth=1.5)

    if zoom_in:
        points = np.asarray(kdfound)[:, 0]
        xs = np.asarray([p[0] for p in points])
        ys = np.asarray([p[1] for p in points])

        xlim = np.asarray([xs.min(), xs.max()])
        ylim = np.asarray([ys.min(), ys.max()])

        exp = (xlim[1] - xlim[0]) * x_exp
        xlim += [-exp, exp]
        exp = (ylim[1] - ylim[0]) * y_exp
        ylim += [-exp, exp]

        for ax in plt.gcf().axes:
            ax.set_xlim(xlim)
            ax.set_ylim(ylim)

    if full_screen:
        mng = plt.get_current_fig_manager()
        print mng.full_screen_toggle()

    print ''
    print 'Doing', m, 'queries in', n, 'records for', K, 'closest'
    print ''
    print 'Method\t\t\tTotal Time\t\t\tBuild Time\t\t\tMean per-query'
    print 'Naive\t\t\t', total_nv, '\t\t', bt_nv, '\t\t', (total_nv -
                                                           bt_nv) / m
    print 'KD Tree\t\t\t', total_kd, '\t\t', bt_kd, '\t\t', (total_kd -
                                                             bt_kd) / m
    print 'BKD Tree\t\t', total_bkd, '\t\t', bt_bkd, '\t\t', (total_bkd -
                                                              bt_bkd) / m
    print 'OBKD Tree\t\t', total_obkd, '\t\t', bt_obkd, '\t\t', (total_obkd -
                                                                 bt_obkd) / m

    plt.show()