Exemplo n.º 1
0
    def test_stopwatch_full_cancel(self):
        """Test that an entire span - from root to children, can be cancelled."""

        sw = StopWatch()

        sw.start('root')
        sw.start('child')
        sw.start('grand')
        sw.cancel('grand')
        sw.cancel('child')
        sw.cancel('root')

        assert not sw.get_last_aggregated_report()
        assert not sw._cancelled_spans

        sw = StopWatch()
        with sw.timer('root'):
            with sw.timer('child'):
                with sw.timer('grandchild'):
                    sw.cancel('grandchild')
                    sw.cancel('child')
                    sw.cancel('root')

        assert not sw.get_last_aggregated_report()
        assert not sw._cancelled_spans
Exemplo n.º 2
0
def howLongToCountNumbers(startNum, endNum):
	stopwatch = StopWatch(time.time())
	stopwatch.start()
	sum = 0
	for i in range(startNum, endNum):
		sum += i
	stopwatch.stop()
	print(stopwatch.getElapsedTime())
Exemplo n.º 3
0
 def global_sw(self):
     """Returns the thread local stopwatch (creating if it doesn't exists)"""
     if not hasattr(self.threadlocal_sws, 'sw'):
         self.threadlocal_sws.sw = StopWatch(
             export_aggregated_timers_func=self.export_agg_timers_func,
             time_func=self.time_func,
         )
     return self.threadlocal_sws.sw
Exemplo n.º 4
0
 def test_time_func_default(self):
     """Make sure that the default time_func=None"""
     sw = StopWatch(time_func=None)
     with sw.timer('root'):
         pass
     agg_report = sw.get_last_aggregated_report()
     tr_data = agg_report.root_timer_data
     assert tr_data.name == 'root'
     assert tr_data.end_time >= tr_data.start_time
Exemplo n.º 5
0
    def test_override_exports(self):
        export_tracing = Mock()
        export_timers = Mock()
        sw = StopWatch(
            export_tracing_func=export_tracing,
            export_aggregated_timers_func=export_timers,
        )
        add_timers(sw)
        agg_report = sw.get_last_aggregated_report()
        traces = sw.get_last_trace_report()

        export_timers.assert_called_once_with(aggregated_report=agg_report)
        export_tracing.assert_called_once_with(reported_traces=traces)

        assert agg_report.aggregated_values == {
            'root': [900000.0, 1, None],
            'root#child1': [240000.0, 2, MyBuckets.BUCKET_A],
            'root#child1#grand_children1': [20000.0, 1, None],
            'root#child1#grand_children2': [80000.0, 2, None],
            'root#child1#grand_children3': [10000.0, 1, None],
            'root#child2': [560000.0, 1, MyBuckets.BUCKET_B],
            'root#child2#grand_children1': [260000.0, 1, None],
            'root#child2#grand_children3': [10000.0, 1, None],
        }
        assert agg_report.root_timer_data.start_time == 20.0
        assert agg_report.root_timer_data.end_time == 920.0
        assert agg_report.root_timer_data.name == 'root'
        assert agg_report.root_timer_data.trace_annotations == [
            TraceAnnotation('Cooltag', '1', 50),
            TraceAnnotation('Slowtag', '1', 920),
        ]

        # Traces are listed in the same order that scopes close
        assert [(trace.name, trace.log_name, trace.start_time, trace.end_time,
                 trace.parent_span_id) for trace in traces] == [
                     ('grand_children1', 'root#child1#grand_children1', 60, 80,
                      traces[2].span_id),
                     ('grand_children2', 'root#child1#grand_children2', 100,
                      120, traces[2].span_id),
                     ('child1', 'root#child1', 40, 140, traces[9].span_id),
                     ('grand_children3', 'root#child1#grand_children3', 180,
                      190, traces[5].span_id),
                     ('grand_children2', 'root#child1#grand_children2', 220,
                      280, traces[5].span_id),
                     ('child1', 'root#child1', 160, 300, traces[9].span_id),
                     ('grand_children3', 'root#child2#grand_children3', 380,
                      390, traces[8].span_id),
                     ('grand_children1', 'root#child2#grand_children1', 520,
                      780, traces[8].span_id),
                     ('child2', 'root#child2', 320, 880, traces[9].span_id),
                     ('root', 'root', 20, 920, None),
                 ]
        assert all(trace.trace_annotations == [] for trace in traces[:9])
        assert traces[9].trace_annotations == [
            TraceAnnotation('Cooltag', '1', 50),
            TraceAnnotation('Slowtag', '1', 920),
        ]
Exemplo n.º 6
0
 def test_stopwatch_cancel(self):
     """Test that spans can be correctly cancelled and not reported."""
     sw = StopWatch()
     sw.start('root')
     sw.start('child')
     sw.cancel('child')
     sw.end('root')
     agg_values = sw.get_last_aggregated_report().aggregated_values
     assert len(agg_values) == 1
     assert 'root' in agg_values
Exemplo n.º 7
0
 def test_sampling_timer(self):
     for i in range(100):
         sw = StopWatch()
         with sw.timer('root', start_time=20, end_time=120):
             with sw.sampling_timer('child', p=0.5, start_time=40, end_time=100):
                 pass
         agg_report = sw.get_last_aggregated_report()
         assert len(agg_report.aggregated_values) in (1, 2)
         if len(agg_report.aggregated_values) == 2:
             assert agg_report.aggregated_values['root#child'] == [60000.0, 1, None]
Exemplo n.º 8
0
 def test_stopwatch_cancel_context_manager(self):
     """Test that spans can be cancelled while inside a span context."""
     sw = StopWatch()
     with sw.timer('root'):
         with sw.timer('child'):
             sw.cancel('child')
             with sw.timer('grand'):
                 pass
     agg_values = sw.get_last_aggregated_report().aggregated_values
     assert len(agg_values) == 2
     assert all([span in agg_values for span in ('root', 'root#grand')])
Exemplo n.º 9
0
    def test_exception_annotation(self):
        class SpecialError(Exception):
            pass

        sw = StopWatch()
        with pytest.raises(SpecialError):
            with sw.timer('root', start_time=10, end_time=1000):
                raise SpecialError("Ahhh")
        trace_report = sw.get_last_trace_report()
        assert trace_report[0].trace_annotations == [
            TraceAnnotation('Exception', 'SpecialError', 1000),
        ]
Exemplo n.º 10
0
def follow_for_ms(tank, ms):
    """
    ``tank``: the MoveTank object that is following a line
    ``ms`` : the number of milliseconds to follow the line
    """
    if not hasattr(tank, 'stopwatch') or tank.stopwatch is None:
        tank.stopwatch = StopWatch()
        tank.stopwatch.start()

    if tank.stopwatch.value_ms >= ms:
        tank.stopwatch = None
        return False
    else:
        return True
Exemplo n.º 11
0
    def test_scope_in_loop(self):
        sw = StopWatch()
        with sw.timer('root', start_time=20, end_time=120):
            for t in range(30, 100, 10):
                with sw.timer('child', start_time=t, end_time=t + 5):
                    pass

        agg_report = sw.get_last_aggregated_report()
        assert agg_report.aggregated_values == {
            'root': [100000.0, 1, None],
            'root#child': [35000.0, 7, None],
        }
        assert agg_report.root_timer_data.start_time == 20.0
        assert agg_report.root_timer_data.end_time == 120.0
        assert agg_report.root_timer_data.name == 'root'
Exemplo n.º 12
0
    def test_time_func(self):
        """Test override of the time_func"""
        time_mock = Mock(side_effect=[50, 70])
        sw = StopWatch(time_func=time_mock)

        # Should call our timer func once on entry and once on exit
        with sw.timer('root'):
            pass

        agg_report = sw.get_last_aggregated_report()
        assert agg_report.aggregated_values == {
            'root': [20000.0, 1, None],
        }
        assert agg_report.root_timer_data.start_time == 50.0
        assert agg_report.root_timer_data.end_time == 70.0
        assert agg_report.root_timer_data.name == 'root'
Exemplo n.º 13
0
    def __init__(self, clock=None, clockface=None, buttons=None, **kwargs):
        tk.Frame.__init__(self, **kwargs)
        if not isinstance(clock, StopWatch):
            clock = StopWatch()

        if not isinstance(clockface, ClockFace):
            clockface = ClockFace(clock, master=self)

        if not isinstance(buttons, ToggleSwitch):
            buttons = ToggleSwitch(clock, master=self)

        self.set_clock(clock)
        self.set_buttons(buttons)
        self.set_clockface(clockface)

        self.clockface.pack(side=tk.TOP, pady=5, padx=10)  ###
        self.buttons.pack(side=tk.TOP, pady=5, padx=10)  ###
Exemplo n.º 14
0
    def test_format_report(self):
        sw = StopWatch()
        add_timers(sw)

        agg_report = sw.get_last_aggregated_report()
        formatted_report = format_report(agg_report)
        assert formatted_report == \
            "root                    900000.000ms (100%)\n" \
            "    BUCKET_A        child1                  2  240000.000ms (27%)\n" \
            "                        grand_children1         1  20000.000ms (2%)\n" \
            "                        grand_children2         2  80000.000ms (9%)\n" \
            "                        grand_children3         1  10000.000ms (1%)\n" \
            "    BUCKET_B        child2                  1  560000.000ms (62%)\n" \
            "                        grand_children1         1  260000.000ms (29%)\n" \
            "                        grand_children3         1  10000.000ms (1%)\n" \
            "Annotations: Cooltag, Slowtag"

        formatted_report2 = sw.format_last_report()
        assert formatted_report == formatted_report2
Exemplo n.º 15
0
    def test_multiple_root_spans(self):
        """Test multiple root spans timed in one instance of the StopWatch object."""
        sw = StopWatch()

        with sw.timer('root'):
            with sw.timer('child'):
                pass

        agg_values = sw.get_last_aggregated_report().aggregated_values
        assert len(agg_values) == 2
        assert all([span in agg_values for span in ('root', 'root#child')])

        with sw.timer('root'):
            with sw.timer('different_child'):
                pass

        agg_values = sw.get_last_aggregated_report().aggregated_values
        assert len(agg_values) == 2
        assert all([span in agg_values for span in ('root', 'root#different_child')])
Exemplo n.º 16
0
 def test_trace_annotations(self):
     sw = StopWatch()
     sw.add_annotation('key0', 'value0', event_time=0)
     with sw.timer('root', start_time=10, end_time=1000):
         with sw.timer('child', start_time=20, end_time=900):
             sw.add_span_annotation('key1', 'value1', event_time=101)
             sw.add_span_annotation('key2', 'value2', event_time=104)
             sw.add_annotation('key3', 'value3', event_time=107)
     trace_report = sw.get_last_trace_report()
     assert len(trace_report) == 2
     assert trace_report[0].name == 'child'
     assert trace_report[0].trace_annotations == [
         TraceAnnotation('key1', 'value1', 101),
         TraceAnnotation('key2', 'value2', 104),
     ]
     assert trace_report[1].name == 'root'
     assert trace_report[1].trace_annotations == [
         TraceAnnotation('key0', 'value0', 0),
         TraceAnnotation('key3', 'value3', 107),
     ]
Exemplo n.º 17
0
    def test_stopwatch_cancel_multiple_root_spans(self):
        """Test that spans can be cancelled inside a span context, with multiple
           of the same root span created. Ensure that they behave in an expected way.
        """

        sw = StopWatch()
        with sw.timer('root'):
            with sw.timer('child'):
                sw.cancel('child')
                pass

        with sw.timer('root'):
            with sw.timer('child'):
                pass

        agg_values = sw.get_last_aggregated_report().aggregated_values
        assert len(agg_values) == 2
        assert all([span in agg_values for span in ('root', 'root#child')])

        # Ensure that we are not leaking cancelled span data
        assert not sw._cancelled_spans
Exemplo n.º 18
0
def main():

    (g, geo_locations) = input_graph_undirected(INPUT_GRAPH_LOCATION)
    #g.pretty_print()
    p = ProblemShortestPath(g, g.node(0), g.node(1))

    #uniform cost search
    sw1 = StopWatch()
    (u_cost, result_path) = uniform_cost_search(p)
    el1 = sw1.elapsed_milliseconds()

    print "Uniform cost search"
    print "Solution:", u_cost
    print "Path:", result_path
    print "Time:", el1

    #A* search
    p.init_huristic(geo_locations)
    sw1.reset()
    (a_cost, result_path) = a_star(p)
    el1 = sw1.elapsed_milliseconds()
    print "===================="
    print "A * search"
    print "Solution:", a_cost
    print "Path:", result_path
    print "Time:", el1

    #A* search
    sw1.reset()
    beam_size = 3
    (a_cost, result_path) = a_star_beam_search(p, beam_size)
    el1 = sw1.elapsed_milliseconds()
    print "===================="
    print "A * beam search"
    print "Beam size:", beam_size
    print "Solution:", a_cost
    print "Path:", result_path
    print "Time:", el1
Exemplo n.º 19
0
    def ensemble_models_id(self,
                           single_id,
                           set_type='train',
                           model='stage1_unet',
                           show=True,
                           verbose=True):
        self._load_ensembles(model)
        d = self._get_cell_data(single_id, set_type)
        logger.debug('image size=%dx%d' % (d.img_h, d.img_w))

        total_model_size = len(self.ensembles['rcnn']) + len(
            self.ensembles['unet'])
        logger.debug('total_model_size=%d rcnn=%d unet=%d' %
                     (total_model_size, len(
                         self.ensembles['rcnn']), len(self.ensembles['unet'])))

        rcnn_instances = []
        rcnn_scores = []

        # TODO : RCNN Ensemble
        rcnn_ensemble = False
        for idx, data in enumerate(self.ensembles['rcnn']):
            if set_type == 'train':
                instances, scores = data['valid_instances'].get(
                    single_id, (None, None))
                rcnn_ensemble = True
            else:
                # TODO
                ls = data['test_instances'].get(single_id, None)
                if ls is None:
                    instances = scores = None
                else:
                    instances = [x[0] for x in ls]
                    scores = [x[1] for x in ls]
                    rcnn_ensemble = True
                    logger.debug('rcnn # instances = %d' % len(instances))

            if instances is None:
                logger.warning('Not found id=%s in RCNN %d Model' %
                               (single_id, idx + 1))
                continue

            rcnn_instances.extend(
                [instance[:d.img_h, :d.img_w] for instance in instances])
            rcnn_scores.extend([
                s * HyperParams.get().rcnn_score_rescale for s in scores
            ])  # rescale scores

        total_instances = []
        total_scores = []

        # TODO : UNet Ensemble
        for idx, data in enumerate(self.ensembles['unet']):
            if set_type == 'train':
                instances, scores = data['valid_instances'].get(
                    single_id, (None, None))
            else:
                instances, scores = data['test_instances'].get(
                    single_id, (None, None))

            if instances is None:
                logger.warning('Not found id=%s in UNet %d Model' %
                               (single_id, idx + 1))
                continue

            total_instances.extend(instances)
            total_scores.extend(scores)

            # if single_id in ['646f5e00a2db3add97fb80a83ef3c07edd1b17b1b0d47c2bd650cdcab9f322c0']:
            # take too long
            #     logger.warning('no ensemble id=%s' % single_id)
            #     break

        watch = StopWatch()
        watch.start()
        logger.debug('voting+ size=%d' % len(total_instances))

        # TODO : Voting?
        voting_th = HyperParams.get().ensemble_voting_th

        rects = [get_rect_of_mask(a) for a in total_instances]
        voted = []
        for i, x in enumerate(total_instances):
            voted.append(
                filter_by_voting(
                    (x, total_instances, voting_th, 0.3, rects[i], rects)))

        total_instances = list(compress(total_instances, voted))
        total_scores = list(compress(total_scores, voted))

        watch.stop()
        logger.debug('voting elapsed=%.5f' % watch.get_elapsed())
        watch.reset()

        # nms
        watch.start()
        logger.debug('nms+ size=%d' % len(total_instances))
        instances, scores = Network.nms(
            total_instances,
            total_scores,
            None,
            thresh=HyperParams.get().ensemble_nms_iou)
        watch.stop()
        logger.debug('nms elapsed=%.5f' % watch.get_elapsed())
        watch.reset()

        # high threshold if not exists in RCNN
        if rcnn_ensemble:
            voted = []
            for i, x in enumerate(instances):
                voted.append(
                    filter_by_voting((x, rcnn_instances, 1, 0.3, None, None)))

            new_instances = []
            new_scores = []
            for instance, score, v in zip(instances, scores, voted):
                if v:
                    new_instances.append(instance)
                    new_scores.append(score)
                elif score > HyperParams.get().ensemble_th_no_rcnn:
                    new_instances.append(instance)
                    new_scores.append(score)
            instances, scores = new_instances, new_scores

        # nms with rcnn
        instances = instances + rcnn_instances
        scores = scores + rcnn_scores
        watch.start()
        logger.debug('nms_rcnn+ size=%d' % len(instances))
        instances, scores = Network.nms(
            instances, scores, None, thresh=HyperParams.get().ensemble_nms_iou)
        watch.stop()
        logger.debug('nms_rcnn- size=%d elapsed=%.5f' %
                     (len(instances), watch.get_elapsed()))
        watch.reset()

        # remove overlaps
        logger.debug('remove overlaps+')
        sorted_idx = [
            i[0] for i in sorted(enumerate(instances),
                                 key=lambda x: get_size_of_mask(x[1]),
                                 reverse=False)
        ]
        instances = [instances[x] for x in sorted_idx]
        scores = [scores[x] for x in sorted_idx]

        instances2 = [
            ndimage.morphology.binary_fill_holes(i) for i in instances
        ]
        instances2, scores2 = Network.remove_overlaps(instances2, scores)

        # remove deleted instances
        logger.debug('remove deleted+ size=%d' % len(instances2))
        voted = []
        for x in instances2:
            voted.append(filter_by_voting((x, instances, 1, 0.75, None, None)))
        instances = list(compress(instances2, voted))
        scores = list(compress(scores2, voted))

        # TODO : Filter by score?
        logger.debug('filter by score+ size=%d' % len(instances))
        score_filter_th = HyperParams.get().ensemble_score_th
        if score_filter_th > 0.0:
            logger.debug('filter_by_score=%.3f' % score_filter_th)
            instances = [
                i for i, s in zip(instances, scores) if s > score_filter_th
            ]
            scores = [
                s for i, s in zip(instances, scores) if s > score_filter_th
            ]

        logger.debug('finishing+ size=%d' % len(instances))
        image = d.image(is_gray=False)
        score_desc = []
        labels = []
        if len(d.masks) > 0:  # has label masks
            labels = list(d.multi_masks(transpose=False))
            tp, fp, fn = get_multiple_metric(thr_list, instances, labels)

            logger.debug('instances=%d, labels=%d' %
                         (len(instances), len(labels)))
            for i, thr in enumerate(thr_list):
                desc = 'score=%.3f, tp=%d, fp=%d, fn=%d --- iou %.2f' % (
                    (tp / (tp + fp + fn))[i], tp[i], fp[i], fn[i], thr)
                logger.debug(desc)
                score_desc.append(desc)
            score = np.mean(tp / (tp + fp + fn))
            logger.debug('score=%.3f, tp=%.1f, fp=%.1f, fn=%.1f --- mean' %
                         (score, np.mean(tp), np.mean(fp), np.mean(fn)))
        else:
            score = 0.0

        if show:
            img_vis = Network.visualize(image, labels, instances, None)
            cv2.imshow('valid', img_vis)
            cv2.waitKey(0)
        else:
            return {
                'instance_scores': scores,
                'score': score,
                'image': image,
                'instances': instances,
                'labels': labels,
                'score_desc': score_desc
            }
Exemplo n.º 20
0
    def single_id(self,
                  model,
                  checkpoint,
                  single_id,
                  set_type='train',
                  show=True,
                  verbose=True):
        if model:
            self.set_network(model)
            self.network.build()

        self.init_session()
        if checkpoint:
            saver = tf.train.Saver()
            saver.restore(self.sess, checkpoint)
            if verbose:
                logger.info('restored from checkpoint, %s' % checkpoint)

        d = self._get_cell_data(single_id, set_type)
        h, w = d.img.shape[:2]
        shortedge = min(h, w)
        logger.debug('%s image size=(%d x %d)' % (single_id, w, h))

        watch = StopWatch()
        logger.debug('preprocess+')
        d = self.network.preprocess(d)

        image = d.image(is_gray=False)

        total_instances = []
        total_scores = []
        total_from_set = []
        cutoff_instance_max = HyperParams.get().post_cutoff_max_th
        cutoff_instance_avg = HyperParams.get().post_cutoff_avg_th

        watch.start()
        logger.debug('inference at default scale+ %dx%d' % (w, h))
        inference_result = self.network.inference(
            self.sess,
            image,
            cutoff_instance_max=cutoff_instance_max,
            cutoff_instance_avg=cutoff_instance_avg)
        instances_pre, scores_pre = inference_result[
            'instances'], inference_result['scores']
        instances_pre = Network.resize_instances(instances_pre,
                                                 target_size=(h, w))
        total_instances = total_instances + instances_pre
        total_scores = total_scores + scores_pre
        total_from_set = [1] * len(instances_pre)
        watch.stop()
        logger.debug('inference- elapsed=%.5f' % watch.get_elapsed())
        watch.reset()

        logger.debug('inference with flips+')
        # re-inference using flip
        for flip_orientation in range(2):
            flipped = cv2.flip(image.copy(), flip_orientation)
            inference_result = self.network.inference(
                self.sess,
                flipped,
                cutoff_instance_max=cutoff_instance_max,
                cutoff_instance_avg=cutoff_instance_avg)
            instances_flip, scores_flip = inference_result[
                'instances'], inference_result['scores']
            instances_flip = [
                cv2.flip(instance.astype(np.uint8), flip_orientation)
                for instance in instances_flip
            ]
            instances_flip = Network.resize_instances(instances_flip,
                                                      target_size=(h, w))

            total_instances = total_instances + instances_flip
            total_scores = total_scores + scores_flip
            total_from_set = total_from_set + [2 + flip_orientation
                                               ] * len(instances_flip)

        watch.stop()
        logger.debug('inference- elapsed=%.5f' % watch.get_elapsed())
        watch.reset()
        logger.debug('inference with scaling+flips+')

        # re-inference after rescale image
        def inference_with_scale(image, resize_target):
            image = cv2.resize(image.copy(),
                               None,
                               None,
                               resize_target,
                               resize_target,
                               interpolation=cv2.INTER_AREA)
            inference_result = self.network.inference(
                self.sess,
                image,
                cutoff_instance_max=cutoff_instance_max,
                cutoff_instance_avg=cutoff_instance_avg)
            instances_rescale, scores_rescale = inference_result[
                'instances'], inference_result['scores']

            instances_rescale = Network.resize_instances(instances_rescale,
                                                         target_size=(h, w))
            return instances_rescale, scores_rescale

        max_mask = get_max_size_of_masks(instances_pre)
        logger.debug('max_mask=%d' % max_mask)
        resize_target = HyperParams.get().test_aug_scale_t / max_mask
        resize_target = min(HyperParams.get().test_aug_scale_max,
                            resize_target)
        resize_target = max(HyperParams.get().test_aug_scale_min,
                            resize_target)
        import math
        # resize_target = 2.0 / (1.0 + math.exp(-1.5*(resize_target - 1.0)))
        # resize_target = max(0.5, resize_target)
        resize_target = max(228.0 / shortedge, resize_target)
        # if resize_target > 1.0 and min(w, h) > 1000:
        #     logger.debug('too large image, no resize')
        #     resize_target = 0.8
        logger.debug('resize_target=%.4f' % resize_target)

        instances_rescale, scores_rescale = inference_with_scale(
            image, resize_target)
        total_instances = total_instances + instances_rescale
        total_scores = total_scores + scores_rescale
        total_from_set = total_from_set + [4] * len(instances_rescale)

        # re-inference using flip + rescale
        for flip_orientation in range(2):
            flipped = cv2.flip(image.copy(), flip_orientation)
            instances_flip, scores_flip = inference_with_scale(
                flipped, resize_target)
            instances_flip = [
                cv2.flip(instance.astype(np.uint8), flip_orientation)
                for instance in instances_flip
            ]
            instances_flip = Network.resize_instances(instances_flip,
                                                      target_size=(h, w))

            total_instances = total_instances + instances_flip
            total_scores = total_scores + scores_flip
            total_from_set = total_from_set + [5 + flip_orientation
                                               ] * len(instances_flip)

        watch.stop()
        logger.debug('inference- elapsed=%.5f' % watch.get_elapsed())
        watch.reset()

        watch.start()
        logger.debug('voting+ size=%d' % len(total_instances))

        # TODO : Voting?
        voting_th = HyperParams.get().post_voting_th
        rects = [get_rect_of_mask(a) for a in total_instances]
        voted = []
        for i, x in enumerate(total_instances):
            voted.append(
                filter_by_voting(
                    (x, total_instances, voting_th, 0.3, rects[i], rects)))

        total_instances = list(compress(total_instances, voted))
        total_scores = list(compress(total_scores, voted))
        total_from_set = list(compress(total_from_set, voted))

        watch.stop()
        logger.debug('voting elapsed=%.5f' % watch.get_elapsed())
        watch.reset()

        # nms
        watch.start()
        logger.debug('nms+ size=%d' % len(total_instances))
        instances, scores = Network.nms(
            total_instances,
            total_scores,
            total_from_set,
            thresh=HyperParams.get().test_aug_nms_iou)
        watch.stop()
        logger.debug('nms elapsed=%.5f' % watch.get_elapsed())
        watch.reset()

        # remove overlaps
        logger.debug('remove overlaps+')
        sorted_idx = [
            i[0] for i in sorted(enumerate(instances),
                                 key=lambda x: get_size_of_mask(x[1]),
                                 reverse=True)
        ]
        instances = [instances[x] for x in sorted_idx]
        scores = [scores[x] for x in sorted_idx]

        instances = [
            ndimage.morphology.binary_fill_holes(i) for i in instances
        ]
        instances, scores = Network.remove_overlaps(instances, scores)

        # TODO : Filter by score?
        # logger.debug('filter by score+')
        # score_filter_th = HyperParams.get().post_filter_th
        # if score_filter_th > 0.0:
        #     logger.debug('filter_by_score=%.3f' % score_filter_th)
        #     instances = [i for i, s in zip(instances, scores) if s > score_filter_th]
        #     scores = [s for i, s in zip(instances, scores) if s > score_filter_th]

        logger.debug('finishing+')
        image = cv2.resize(image, (w, h), interpolation=cv2.INTER_AREA)
        score_desc = []
        labels = []
        if len(d.masks) > 0:  # has label masks
            labels = list(d.multi_masks(transpose=False))
            labels = Network.resize_instances(labels, target_size=(h, w))
            tp, fp, fn = get_multiple_metric(thr_list, instances, labels)

            if verbose:
                logger.info('instances=%d, reinf(%.3f) labels=%d' %
                            (len(instances), resize_target, len(labels)))
            for i, thr in enumerate(thr_list):
                desc = 'score=%.3f, tp=%d, fp=%d, fn=%d --- iou %.2f' % (
                    (tp / (tp + fp + fn))[i], tp[i], fp[i], fn[i], thr)
                if verbose:
                    logger.info(desc)
                score_desc.append(desc)
            score = np.mean(tp / (tp + fp + fn))
            if verbose:
                logger.info('score=%.3f, tp=%.1f, fp=%.1f, fn=%.1f --- mean' %
                            (score, np.mean(tp), np.mean(fp), np.mean(fn)))
        else:
            score = 0.0

        if show:
            img_vis = Network.visualize(image, labels, instances, None)
            cv2.imshow('valid', img_vis)
            cv2.waitKey(0)
        if not model:
            return {
                'instance_scores': scores,
                'score': score,
                'image': image,
                'instances': instances,
                'labels': labels,
                'score_desc': score_desc
            }
Exemplo n.º 21
0
def run():
    sw = StopWatch()

    a = np.random.rand(n, 2) * s_range + s_min
    b = np.random.rand(m, 2) * s_range + s_min

    xa = a[:, 0]
    ya = a[:, 1]

    xb = b[:, 0]
    yb = b[:, 1]

    xlim = np.asarray(
        [np.min([xa.min(), xb.min()]),
         np.max([xa.max(), xb.max()])])
    ylim = np.asarray(
        [np.min([ya.min(), yb.min()]),
         np.max([ya.max(), yb.max()])])

    exp = (xlim[1] - xlim[0]) * x_exp
    xlim += [-exp, exp]
    exp = (ylim[1] - ylim[0]) * y_exp
    ylim += [-exp, exp]

    plt.xlim(xlim)
    plt.ylim(ylim)

    plt.plot(xa,
             ya,
             a_style,
             alpha=set_opacity,
             zorder=a_order,
             markersize=a_size)
    plt.plot(xb,
             yb,
             b_style,
             alpha=set_opacity,
             zorder=b_order,
             markersize=b_size)

    total_nv = 0
    total_kd = 0
    total_bkd = 0
    total_obkd = 0

    print "Initializing naive module..."
    sw.start()
    nf = NaiveFinder(a)
    bt_nv = sw.elapsed()
    total_nv += bt_nv
    sw.reset()

    print "Initializing K-D Tree module..."
    sw.start()
    kdf = KDFinder(a)
    bt_kd = sw.elapsed()
    total_kd += bt_kd
    sw.lap()

    print "Initializing Bucketed K-D Tree module..."
    sw.start()
    bkdf = BKDFinder(a)
    bt_bkd = sw.elapsed()
    total_bkd += bt_bkd
    sw.lap()

    print "Initializing Optimized Bucketed K-D Tree module..."
    sw.start()
    obkdf = BKDFinder(a)
    bt_obkd = sw.elapsed()
    total_obkd += bt_obkd
    sw.lap()

    for i in range(m):
        print i
        p1 = b[i, :]

        sw.start()
        found = nf.find_closest_m(p1, K)
        total_nv += sw.elapsed()

        def check_mismatch(h_f, finder):
            # If there's a mismatch with ground-truth values, save K-D search steps for debugging
            if not (np.asarray(found)[:, 1] == np.asarray(h_f)[:, 1]).all():
                print "Mismatch", np.asarray(found)[:, 1], np.asarray(h_f)[:,
                                                                           1]
                for element in found:
                    p2 = element[0]
                    plt.plot([p1[0], p2[0]], [p1[1], p2[1]],
                             color=ground_truth_col,
                             zorder=2,
                             linewidth=2)
                finder.setup_plot(xlim, ylim, True)
                finder.find_closest_m(p1, 5)
                sw.start()
                finder.find_closest_m(p1, 5)
                for element in h_f:
                    p2 = element[0]
                    plt.plot([p1[0], p2[0]], [p1[1], p2[1]],
                             color=test_col,
                             zorder=3,
                             linewidth=1.5)
                print "Done"
                plt.show()

        sw.start()
        kdfound = kdf.find_closest_m(p1, K)
        total_kd += sw.elapsed()

        check_mismatch(kdfound, kdf)

        sw.start()
        bkdfound = bkdf.find_closest_m(p1, K)
        total_bkd += sw.elapsed()

        check_mismatch(bkdfound, bkdf)

        sw.start()
        obkdfound = obkdf.find_closest_m(p1, K)
        total_obkd += sw.elapsed()

        check_mismatch(obkdfound, obkdf)

    found = nf.find_closest_m(p1, 5)
    for element in found:
        p2 = element[0]
        h1 = plt.plot([p1[0], p2[0]], [p1[1], p2[1]],
                      color=ground_truth_col,
                      zorder=2,
                      linewidth=2)

    kdf.setup_plot(xlim, ylim, save_steps)
    kdfound = kdf.find_closest_m(p1, 5)
    for element in kdfound:
        p2 = element[0]
        h2 = plt.plot([p1[0], p2[0]], [p1[1], p2[1]],
                      color=test_col,
                      zorder=3,
                      linewidth=1.5)

    if zoom_in:
        points = np.asarray(kdfound)[:, 0]
        xs = np.asarray([p[0] for p in points])
        ys = np.asarray([p[1] for p in points])

        xlim = np.asarray([xs.min(), xs.max()])
        ylim = np.asarray([ys.min(), ys.max()])

        exp = (xlim[1] - xlim[0]) * x_exp
        xlim += [-exp, exp]
        exp = (ylim[1] - ylim[0]) * y_exp
        ylim += [-exp, exp]

        for ax in plt.gcf().axes:
            ax.set_xlim(xlim)
            ax.set_ylim(ylim)

    if full_screen:
        mng = plt.get_current_fig_manager()
        print mng.full_screen_toggle()

    print ''
    print 'Doing', m, 'queries in', n, 'records for', K, 'closest'
    print ''
    print 'Method\t\t\tTotal Time\t\t\tBuild Time\t\t\tMean per-query'
    print 'Naive\t\t\t', total_nv, '\t\t', bt_nv, '\t\t', (total_nv -
                                                           bt_nv) / m
    print 'KD Tree\t\t\t', total_kd, '\t\t', bt_kd, '\t\t', (total_kd -
                                                             bt_kd) / m
    print 'BKD Tree\t\t', total_bkd, '\t\t', bt_bkd, '\t\t', (total_bkd -
                                                              bt_bkd) / m
    print 'OBKD Tree\t\t', total_obkd, '\t\t', bt_obkd, '\t\t', (total_obkd -
                                                                 bt_obkd) / m

    plt.show()
Exemplo n.º 22
0
 def test_default_exports(self):
     sw = StopWatch()
     add_timers(sw)
Exemplo n.º 23
0
 def test_export_default(self):
     """Make sure that passing None in explicitly works"""
     sw = StopWatch(export_aggregated_timers_func=None,
                    export_tracing_func=None)
     with sw.timer('root'):
         pass
Exemplo n.º 24
0
from stopwatch import StopWatch

# creation of stopwatch class object
clock = StopWatch()

# use of clock method start
clock.start()
sum = 0
# Random loop to test how long it takes computer to complete
for i in range(1,1000001):
    sum+=i
# use of clock method stop
clock.stop()
# use of clock method getElapsedTime to display how long between the two events
print("The amount of time for the calculation to run is",clock.getElapsedTime(), "milliseconds")
Exemplo n.º 25
0
# This file is used solely for quick tests. It can be, at any time, completely omitted from the project.

from kdfinder import KDFinder
import numpy as np
import matplotlib.pyplot as plt
import helper as h
from stopwatch import StopWatch
from kdtree import BucketedKDTree, KDTree

a = np.random.rand(5000, 2)

sw = StopWatch()
obt = BucketedKDTree(a, optimized=True)
sw.reset('Build time for Optimized BKD')
bt = BucketedKDTree(a)
sw.reset('Build time for BKD')
t = KDTree(a)
sw.reset('Build time for regular KD')
for value in a:
    if not obt.has(value):
        print 'Missing Value!!'
sw.reset('Traversal time for Optimized BKD')
for value in a:
    if not bt.has(value):
        print 'Missing Value!!'
sw.reset('Traversal time for BKD')
for value in a:
    if not t.has(value):
        print 'Missing Value!!'
sw.reset('Traversal time for regular KD')
Exemplo n.º 26
0
 def __init__(self):
     self._stopwatch = StopWatch()
     self._totBytes = 0
     self._numFrames = 0
     self._bitrate = 0.0
Exemplo n.º 27
0
    def __init__(self, window, window_title, video_source=0, master=None):
        self.window = window
        self.window.title(window_title)
        self.video_source = video_source
        self.ok = False
        self.master = master

        #timer
        self.timer = StopWatch(self.window)

        # open video source (by default this will try to open the computer webcam)
        self.vid = VideoCapture(self.video_source)

        # Create a canvas that can fit the above video source size
        self.canvas = tk.Canvas(window,
                                width=self.vid.width,
                                height=self.vid.height)
        self.canvas.pack()

        # --------------------------------------------------------------------------------
        # fm = tk.Frame(master)

        #video control buttons

        self.img1 = tk.PhotoImage(file="stop.png")
        self.btn_stop = tk.Button(self.window,
                                  image=self.img1,
                                  padx=3,
                                  pady=2,
                                  activebackground='#979797',
                                  command=self.close_camera)
        self.btn_stop["border"] = "0"
        self.btn_stop.pack(side=tk.LEFT, fill=tk.BOTH, expand=tk.YES)
        self.img = tk.PhotoImage(file="start.png")
        self.btn_start = tk.Button(self.window,
                                   image=self.img,
                                   padx=3,
                                   pady=2,
                                   activebackground='#979797',
                                   command=self.open_camera)
        self.btn_start["border"] = "0"
        self.btn_start.pack(side=tk.LEFT, fill=tk.BOTH, expand=tk.YES)

        # Button that lets the user take a snapshot
        self.img2 = tk.PhotoImage(file="snap.png")
        self.btn_snapshot = tk.Button(self.window,
                                      image=self.img2,
                                      padx=3,
                                      pady=2,
                                      activebackground='#979797',
                                      command=self.snapshot)
        self.btn_snapshot["border"] = "0"
        self.btn_snapshot.pack(side=tk.LEFT, fill=tk.BOTH, expand=tk.YES)

        # Button t

        # quit button
        self.img3 = tk.PhotoImage(file="exit.png")
        self.btn_quit = tk.Button(self.window,
                                  text='QUIT',
                                  image=self.img3,
                                  padx=3,
                                  pady=2,
                                  activebackground='#979797',
                                  command=self.quit)
        self.btn_quit["border"] = "0"
        self.btn_quit.pack(side=tk.LEFT, fill=tk.BOTH, expand=tk.YES)

        # After it is called once, the update method will be automatically called every delay milliseconds
        self.delay = 10
        self.update()
        self.window.resizable(0, 0)
        self.window.mainloop()
Exemplo n.º 28
0
time_unit_string = StringVar()
time_unit_string.set("frames")

labelframe_experiment = LabelFrame(tab_record, text="Experiment Control")
labelframe_experiment.pack(fill="both", expand="yes")

experiment_frame_1 = Frame(labelframe_experiment)
experiment_frame_1.pack(fill=X)
experiment_frame_2 = Frame(labelframe_experiment)
experiment_frame_2.pack(fill=X)
experiment_frame_3 = Frame(labelframe_experiment)
experiment_frame_3.pack(fill=X)

timing_label = ttk.Label(experiment_frame_2, text="Time:")
timing_label.pack(side=LEFT, padx=5, pady=5)
timing_sw = StopWatch(experiment_frame_2, padx=5, pady=5)
timing_sw.pack(side=LEFT)

start_button = ttk.Button(experiment_frame_3,
                          text="Start",
                          command=startExperiment)
start_button.pack(side=LEFT, padx=10, pady=5)
stop_button = ttk.Button(experiment_frame_3,
                         text="Stop",
                         command=stopExperiment)
stop_button.pack(side=LEFT, padx=10, pady=5)

time_label = Label(experiment_frame_1, text="Record for")
time_label.grid(column=0, row=0, padx=5, pady=5)
time_text = ttk.Entry(experiment_frame_1, width=10, textvariable=time_string)
time_text.grid(column=1, row=0, pady=5)
Exemplo n.º 29
0

def unique1(s):
    """Return True is there are no duplicate elements in sequence s."""
    for j in range(len(s)):
        for k in range(j+1, len(s)):
            if s[j] == s[k]:
                return False
    return True


def unique2(s):
    """Return True is there are no duplicate elements in sequence s."""
    temp = sorted(s)
    for j in range(1, len(s)):
        if s[j-1] == s[j]:
                return False
    return True


if __name__ == '__main__':
    n = 20
    elapsed_time = []
    for j in range(4):
        s = [randint(0, 10000) for i in range(n)]
        watch = StopWatch()
        unique2(s)
        elapsed_time += [watch.elapsed()]
        print(n, elapsed_time[j],
              elapsed_time[j]/elapsed_time[j-1] if j > 0 else None)
        n *= 2
Exemplo n.º 30
0
    def GetChannels(self, includeDisabled=False, **kwargs):
        # type: (object) -> list
        """ Retrieves all enabled channels within Retrospect.

        If updated channels are found, the those channels are indexed and the
        channel index is rebuild.

        @type kwargs: here for backward compatibility

        @return: a list of ChannelInfo objects of enabled channels.

        """

        sw = StopWatch("ChannelIndex.GetChannels Importer", Logger.Instance())
        Logger.Info("Fetching all enabled channels.")

        self.__enabledChannels = []
        self.__allChannels = []
        self.__validChannels = []

        # What platform are we
        platform = envcontroller.EnvController.GetPlatform()

        channelsUpdated = False
        for channelSet in self.__channelIndex[self.__CHANNEL_INDEX_CHANNEL_KEY]:
            channelSet = self.__channelIndex[self.__CHANNEL_INDEX_CHANNEL_KEY][channelSet]
            channelSetInfoPath = channelSet[self.__CHANNEL_INDEX_CHANNEL_INFO_KEY]
            channelSetVersion = channelSet[self.__CHANNEL_INDEX_CHANNEL_VERSION_KEY]

            # Check if file exists. If not, rebuild index
            if not os.path.isfile(channelSetInfoPath) and not self.__reindexed:
                Logger.Warning("Missing channelSet file: %s.", channelSetInfoPath)
                self.__RebuildIndex()
                return self.GetChannels()

            channelInfos = ChannelInfo.FromJson(channelSetInfoPath, channelSetVersion)

            # Check if the channel was updated
            if self.__IsChannelSetUpdated(channelInfos[0]):
                # let's see if the index has already been updated this section, of not, do it and
                # restart the ChannelRetrieval.
                if not self.__reindexed:
                    # rebuild and restart
                    Logger.Warning("Re-index channel index due to channelSet update: %s.", channelSetInfoPath)
                    self.__RebuildIndex()
                    return self.GetChannels()
                else:
                    Logger.Warning("Found updated channelSet: %s.", channelSetInfoPath)

                if not channelsUpdated:
                    # this was the first update found (otherwise channelsUpdated was True) show a message:
                    title = LanguageHelper.GetLocalizedString(LanguageHelper.InitChannelTitle)
                    text = LanguageHelper.GetLocalizedString(LanguageHelper.InitChannelText)
                    XbmcWrapper.ShowNotification(title, text, displayTime=15000, logger=Logger.Instance())
                channelsUpdated |= True

                # Initialise the channelset.
                self.__InitialiseChannelSet(channelInfos[0])

                # And perform all first actions for the included channels in the set
                for channelInfo in channelInfos:
                    self.__InitialiseChannel(channelInfo)

            # Check the channel validity
            for channelInfo in channelInfos:
                if not self.__ChannelIsCorrect(channelInfo):
                    continue
                self.__allChannels.append(channelInfo)

                # valid channel for this platform ?
                if not channelInfo.compatiblePlatforms & platform == platform:
                    Logger.Warning("Not loading: %s -> platform '%s' is not compatible.",
                                   channelInfo, Environments.Name(platform))
                    continue
                self.__validChannels.append(channelInfo)

                # was the channel disabled?
                if not (AddonSettings.ShowChannel(
                        channelInfo) and AddonSettings.ShowChannelWithLanguage(
                        channelInfo.language)):
                    Logger.Warning("Not loading: %s -> Channel was disabled from settings.",
                                   channelInfo)
                    continue
                self.__enabledChannels.append(channelInfo)

                Logger.Debug("Loading: %s", channelInfo)

        if channelsUpdated:
            Logger.Info("New or updated channels found. Updating add-on configuration for all channels and user agent.")
            AddonSettings.UpdateAddOnSettingsWithChannels(self.__validChannels, Config)
            AddonSettings.UpdateUserAgent()
        else:
            Logger.Debug("No channel changes found. Skipping add-on configuration for channels.")
            # TODO: perhaps we should check that the settings.xml is correct and not broken?

        self.__enabledChannels.sort()
        Logger.Info("Fetch a total of %d channels of which %d are enabled.",
                    len(self.__allChannels),
                    len(self.__enabledChannels))

        sw.Stop()
        if includeDisabled:
            return self.__validChannels
        return self.__enabledChannels