def sum_primes_up_to_N(N): from stopwatch import StopWatch StopWatch.start() _generator = Prime() _primes = _generator.get_primes(N) print sum(_primes) StopWatch.print_time()
def get_Nth_prime(N): from stopwatch import StopWatch StopWatch.start() _generator = Prime() _primes = _generator.get_first_N_primes(N) print _primes[N-1] StopWatch.print_time()
def test_time_func_default(self): """Make sure that the default time_func=None""" sw = StopWatch(time_func=None) with sw.timer('root'): pass agg_report = sw.get_last_aggregated_report() tr_data = agg_report.root_timer_data assert tr_data.name == 'root' assert tr_data.end_time >= tr_data.start_time
def test_override_exports(self): export_tracing = Mock() export_timers = Mock() sw = StopWatch( export_tracing_func=export_tracing, export_aggregated_timers_func=export_timers, ) add_timers(sw) agg_report = sw.get_last_aggregated_report() traces = sw.get_last_trace_report() export_timers.assert_called_once_with(aggregated_report=agg_report) export_tracing.assert_called_once_with(reported_traces=traces) assert agg_report.aggregated_values == { 'root': [900000.0, 1, None], 'root#child1': [240000.0, 2, MyBuckets.BUCKET_A], 'root#child1#grand_children1': [20000.0, 1, None], 'root#child1#grand_children2': [80000.0, 2, None], 'root#child1#grand_children3': [10000.0, 1, None], 'root#child2': [560000.0, 1, MyBuckets.BUCKET_B], 'root#child2#grand_children1': [260000.0, 1, None], 'root#child2#grand_children3': [10000.0, 1, None], } assert agg_report.root_timer_data.start_time == 20.0 assert agg_report.root_timer_data.end_time == 920.0 assert agg_report.root_timer_data.name == 'root' assert agg_report.root_timer_data.trace_annotations == [ TraceAnnotation('Cooltag', '1', 50), TraceAnnotation('Slowtag', '1', 920), ] # Traces are listed in the same order that scopes close assert [(trace.name, trace.log_name, trace.start_time, trace.end_time, trace.parent_span_id) for trace in traces] == [ ('grand_children1', 'root#child1#grand_children1', 60, 80, traces[2].span_id), ('grand_children2', 'root#child1#grand_children2', 100, 120, traces[2].span_id), ('child1', 'root#child1', 40, 140, traces[9].span_id), ('grand_children3', 'root#child1#grand_children3', 180, 190, traces[5].span_id), ('grand_children2', 'root#child1#grand_children2', 220, 280, traces[5].span_id), ('child1', 'root#child1', 160, 300, traces[9].span_id), ('grand_children3', 'root#child2#grand_children3', 380, 390, traces[8].span_id), ('grand_children1', 'root#child2#grand_children1', 520, 780, traces[8].span_id), ('child2', 'root#child2', 320, 880, traces[9].span_id), ('root', 'root', 20, 920, None), ] assert all(trace.trace_annotations == [] for trace in traces[:9]) assert traces[9].trace_annotations == [ TraceAnnotation('Cooltag', '1', 50), TraceAnnotation('Slowtag', '1', 920), ]
def test_sampling_timer(self): for i in range(100): sw = StopWatch() with sw.timer('root', start_time=20, end_time=120): with sw.sampling_timer('child', p=0.5, start_time=40, end_time=100): pass agg_report = sw.get_last_aggregated_report() assert len(agg_report.aggregated_values) in (1, 2) if len(agg_report.aggregated_values) == 2: assert agg_report.aggregated_values['root#child'] == [60000.0, 1, None]
def test_exception_annotation(self): class SpecialError(Exception): pass sw = StopWatch() with pytest.raises(SpecialError): with sw.timer('root', start_time=10, end_time=1000): raise SpecialError("Ahhh") trace_report = sw.get_last_trace_report() assert trace_report[0].trace_annotations == [ TraceAnnotation('Exception', 'SpecialError', 1000), ]
def check_up_to_N(N): from stopwatch import StopWatch StopWatch.start() _checker = Prime() #_checker.get_primes(N) for i in range(N): if i % 10000 == 0: print i if _checker.is_prime(i): if N < 10000: print i, print "" StopWatch.print_time()
def test_override_exports(self): export_tracing = Mock() export_timers = Mock() sw = StopWatch( export_tracing_func=export_tracing, export_aggregated_timers_func=export_timers, ) add_timers(sw) agg_report = sw.get_last_aggregated_report() traces = sw.get_last_trace_report() assert export_timers.call_args[1]['reported_values'] == agg_report[0] assert export_timers.call_args[1]['tags'] == agg_report[1] export_tracing.assert_called_once_with(reported_traces=traces) export_timers.assert_called_once_with( reported_values={ 'root': [900000.0, 1, None], 'root#child1': [240000.0, 2, MyBuckets.BUCKET_A], 'root#child1#grand_children1': [20000.0, 1, None], 'root#child1#grand_children2': [80000.0, 2, None], 'root#child1#grand_children3': [10000.0, 1, None], 'root#child2': [560000.0, 1, MyBuckets.BUCKET_B], 'root#child2#grand_children1': [260000.0, 1, None], 'root#child2#grand_children3': [10000.0, 1, None], }, tags=set(["Cooltag", "Slowtag"]), total_time_ms=900000.0, root_span_name="root", ) # Traces are listed in the same order that scopes close assert [(trace.name, trace.log_name, trace.start_time, trace.end_time, trace.parent_span_id) for trace in traces] == [ ('grand_children1', 'root#child1#grand_children1', 60, 80, traces[2].span_id), ('grand_children2', 'root#child1#grand_children2', 100, 120, traces[2].span_id), ('child1', 'root#child1', 40, 140, traces[9].span_id), ('grand_children3', 'root#child1#grand_children3', 180, 190, traces[5].span_id), ('grand_children2', 'root#child1#grand_children2', 220, 280, traces[5].span_id), ('child1', 'root#child1', 160, 300, traces[9].span_id), ('grand_children3', 'root#child2#grand_children3', 380, 390, traces[8].span_id), ('grand_children1', 'root#child2#grand_children1', 520, 780, traces[8].span_id), ('child2', 'root#child2', 320, 880, traces[9].span_id), ('root', 'root', 20, 920, None), ] assert all(trace.trace_annotations == [] for trace in traces[:9]) assert traces[9].trace_annotations == [ KeyValueAnnotation('Cooltag', '1'), KeyValueAnnotation('Slowtag', '1'), ]
def test_scope_in_loop(self): sw = StopWatch() with sw.timer('root', start_time=20, end_time=120): for t in range(30, 100, 10): with sw.timer('child', start_time=t, end_time=t + 5): pass agg_report = sw.get_last_aggregated_report() assert agg_report.aggregated_values == { 'root': [100000.0, 1, None], 'root#child': [35000.0, 7, None], } assert agg_report.root_timer_data.start_time == 20.0 assert agg_report.root_timer_data.end_time == 120.0 assert agg_report.root_timer_data.name == 'root'
def test_time_func(self): """Test override of the time_func""" time_mock = Mock(side_effect=[50, 70]) sw = StopWatch(time_func=time_mock) # Should call our timer func once on entry and once on exit with sw.timer('root'): pass agg_report = sw.get_last_aggregated_report() assert agg_report.aggregated_values == { 'root': [20000.0, 1, None], } assert agg_report.root_timer_data.start_time == 50.0 assert agg_report.root_timer_data.end_time == 70.0 assert agg_report.root_timer_data.name == 'root'
def test_time_func(self): export_mock = Mock() time_mock = Mock(side_effect=[50, 70]) sw = StopWatch(export_aggregated_timers_func=export_mock, time_func=time_mock) # Should call our timer func once on entry and once on exit with sw.timer('root'): pass export_mock.assert_called_once_with( reported_values={ 'root': [20000.0, 1, None], }, tags=set(), total_time_ms=20000.0, root_span_name="root", )
def global_sw(self): """Returns the thread local stopwatch (creating if it doesn't exists)""" if not hasattr(self.threadlocal_sws, 'sw'): self.threadlocal_sws.sw = StopWatch( export_aggregated_timers_func=self.export_agg_timers_func, time_func=self.time_func, ) return self.threadlocal_sws.sw
def test_stopwatch_cancel_context_manager(self): """Test that spans can be cancelled while inside a span context.""" sw = StopWatch() with sw.timer('root'): with sw.timer('child'): sw.cancel('child') with sw.timer('grand'): pass agg_values = sw.get_last_aggregated_report().aggregated_values assert len(agg_values) == 2 assert all([span in agg_values for span in ('root', 'root#grand')])
def howLongToCountNumbers(startNum, endNum): stopwatch = StopWatch(time.time()) stopwatch.start() sum = 0 for i in range(startNum, endNum): sum += i stopwatch.stop() print(stopwatch.getElapsedTime())
def test_format_report(self): sw = StopWatch() add_timers(sw) agg_report = sw.get_last_aggregated_report() formatted_report = format_report(agg_report) assert formatted_report == \ "root 900000.000ms (100%)\n" \ " BUCKET_A child1 2 240000.000ms (27%)\n" \ " grand_children1 1 20000.000ms (2%)\n" \ " grand_children2 2 80000.000ms (9%)\n" \ " grand_children3 1 10000.000ms (1%)\n" \ " BUCKET_B child2 1 560000.000ms (62%)\n" \ " grand_children1 1 260000.000ms (29%)\n" \ " grand_children3 1 10000.000ms (1%)\n" \ "Annotations: Cooltag, Slowtag" formatted_report2 = sw.format_last_report() assert formatted_report == formatted_report2
def test_format_report(self): sw = StopWatch() add_timers(sw) agg_report = sw.get_last_aggregated_report() formatted_report = format_report(agg_report) assert formatted_report == \ "************************\n" \ "*** StopWatch Report ***\n" \ "************************\n" \ "root 900000.000 (100%)\n" \ " BUCKET_A child1 2 240000.000 (27%)\n" \ " grand_children1 1 20000.000 (2%)\n" \ " grand_children2 2 80000.000 (9%)\n" \ " grand_children3 1 10000.000 (1%)\n" \ " BUCKET_B child2 1 560000.000 (62%)\n" \ " grand_children1 1 260000.000 (29%)\n" \ " grand_children3 1 10000.000 (1%)\n" \ "Tags: Cooltag, Slowtag"
def test_scope_in_loop(self): export_timers = Mock() sw = StopWatch( export_aggregated_timers_func=export_timers, ) with sw.timer('root', start_time=20, end_time=120): for t in range(30, 100, 10): with sw.timer('child', start_time=t, end_time=t + 5): pass export_timers.assert_called_once_with( reported_values={ 'root': [100000.0, 1, None], 'root#child': [35000.0, 7, None], }, tags=set(), total_time_ms=100000.0, root_span_name="root", )
def test_trace_annotations(self): sw = StopWatch() sw.add_annotation('key0', 'value0', event_time=0) with sw.timer('root', start_time=10, end_time=1000): with sw.timer('child', start_time=20, end_time=900): sw.add_span_annotation('key1', 'value1', event_time=101) sw.add_span_annotation('key2', 'value2', event_time=104) sw.add_annotation('key3', 'value3', event_time=107) trace_report = sw.get_last_trace_report() assert len(trace_report) == 2 assert trace_report[0].name == 'child' assert trace_report[0].trace_annotations == [ TraceAnnotation('key1', 'value1', 101), TraceAnnotation('key2', 'value2', 104), ] assert trace_report[1].name == 'root' assert trace_report[1].trace_annotations == [ TraceAnnotation('key0', 'value0', 0), TraceAnnotation('key3', 'value3', 107), ]
class BitRate: def __init__(self): self._stopwatch = StopWatch() self._totBytes = 0 self._numFrames = 0 self._bitrate = 0.0 def update(self, bytecnt): self._numFrames += 1 self._totBytes += bytecnt # Returns bitrate in MBit/sec def get(self): if self._numFrames > 10: totTime = self._stopwatch.stop() self._bitrate = (8 * self._totBytes / totTime) / 1.0e6 self._numFrames = 0 self._totBytes = 0 self._stopwatch.start() return self._bitrate
def follow_for_ms(tank, ms): """ ``tank``: the MoveTank object that is following a line ``ms`` : the number of milliseconds to follow the line """ if not hasattr(tank, 'stopwatch') or tank.stopwatch is None: tank.stopwatch = StopWatch() tank.stopwatch.start() if tank.stopwatch.value_ms >= ms: tank.stopwatch = None return False else: return True
def test_stopwatch_cancel_multiple_root_spans(self): """Test that spans can be cancelled inside a span context, with multiple of the same root span created. Ensure that they behave in an expected way. """ sw = StopWatch() with sw.timer('root'): with sw.timer('child'): sw.cancel('child') pass with sw.timer('root'): with sw.timer('child'): pass agg_values = sw.get_last_aggregated_report().aggregated_values assert len(agg_values) == 2 assert all([span in agg_values for span in ('root', 'root#child')]) # Ensure that we are not leaking cancelled span data assert not sw._cancelled_spans
def __init__(self, clock=None, clockface=None, buttons=None, **kwargs): tk.Frame.__init__(self, **kwargs) if not isinstance(clock, StopWatch): clock = StopWatch() if not isinstance(clockface, ClockFace): clockface = ClockFace(clock, master=self) if not isinstance(buttons, ToggleSwitch): buttons = ToggleSwitch(clock, master=self) self.set_clock(clock) self.set_buttons(buttons) self.set_clockface(clockface) self.clockface.pack(side=tk.TOP, pady=5, padx=10) ### self.buttons.pack(side=tk.TOP, pady=5, padx=10) ###
def test_multiple_root_spans(self): """Test multiple root spans timed in one instance of the StopWatch object.""" sw = StopWatch() with sw.timer('root'): with sw.timer('child'): pass agg_values = sw.get_last_aggregated_report().aggregated_values assert len(agg_values) == 2 assert all([span in agg_values for span in ('root', 'root#child')]) with sw.timer('root'): with sw.timer('different_child'): pass agg_values = sw.get_last_aggregated_report().aggregated_values assert len(agg_values) == 2 assert all([span in agg_values for span in ('root', 'root#different_child')])
def main(): (g, geo_locations) = input_graph_undirected(INPUT_GRAPH_LOCATION) #g.pretty_print() p = ProblemShortestPath(g, g.node(0), g.node(1)) #uniform cost search sw1 = StopWatch() (u_cost, result_path) = uniform_cost_search(p) el1 = sw1.elapsed_milliseconds() print "Uniform cost search" print "Solution:", u_cost print "Path:", result_path print "Time:", el1 #A* search p.init_huristic(geo_locations) sw1.reset() (a_cost, result_path) = a_star(p) el1 = sw1.elapsed_milliseconds() print "====================" print "A * search" print "Solution:", a_cost print "Path:", result_path print "Time:", el1 #A* search sw1.reset() beam_size = 3 (a_cost, result_path) = a_star_beam_search(p, beam_size) el1 = sw1.elapsed_milliseconds() print "====================" print "A * beam search" print "Beam size:", beam_size print "Solution:", a_cost print "Path:", result_path print "Time:", el1
def main(): (g,geo_locations) = input_graph_undirected(INPUT_GRAPH_LOCATION) #g.pretty_print() p = ProblemShortestPath(g,g.node(0),g.node(1)) #uniform cost search sw1 = StopWatch() (u_cost, result_path) = uniform_cost_search(p) el1 = sw1.elapsed_milliseconds() print "Uniform cost search" print "Solution:",u_cost print "Path:", result_path print "Time:", el1 #A* search p.init_huristic(geo_locations) sw1.reset() (a_cost, result_path) = a_star(p) el1 = sw1.elapsed_milliseconds() print "====================" print "A * search" print "Solution:",a_cost print "Path:", result_path print "Time:", el1 #A* search sw1.reset() beam_size = 3 (a_cost, result_path) = a_star_beam_search(p,beam_size) el1 = sw1.elapsed_milliseconds() print "====================" print "A * beam search" print "Beam size:", beam_size print "Solution:",a_cost print "Path:", result_path print "Time:", el1
def test_stopwatch_cancel(self): """Test that spans can be correctly cancelled and not reported.""" sw = StopWatch() sw.start('root') sw.start('child') sw.cancel('child') sw.end('root') agg_values = sw.get_last_aggregated_report().aggregated_values assert len(agg_values) == 1 assert 'root' in agg_values # Ensure that we are not leaking cancelled span data assert not sw._cancelled_spans
#import psyco; psyco.full() from math import sqrt, ceil def sieveOfEratosthenes(n): """sieveOfEratosthenes(n): return the list of the primes < n.""" # Code from: <*****@*****.**>, Nov 30 2006 # http://groups.google.com/group/comp.lang.python/msg/f1f10ced88c68c2d if n <= 2: return [] sieve = range(3, n, 2) # is there a way we can do this top = len(sieve) for si in sieve: if si: bottom = (si*si - 3) // 2 # index of the square of si if bottom >= top: break num_zeros = -((bottom - top) // si) sieve[bottom::si] = [0] * num_zeros return [2] + [el for el in sieve if el] if __name__=='__main__': n=10000000 from stopwatch import StopWatch StopWatch.start() sieveOfEratosthenes(n) print "Sieve of Eratosthenes: ", StopWatch.print_time()
from stopwatch import StopWatch # creation of stopwatch class object clock = StopWatch() # use of clock method start clock.start() sum = 0 # Random loop to test how long it takes computer to complete for i in range(1,1000001): sum+=i # use of clock method stop clock.stop() # use of clock method getElapsedTime to display how long between the two events print("The amount of time for the calculation to run is",clock.getElapsedTime(), "milliseconds")
class App: def __init__(self, window, window_title, video_source=0, master=None): self.window = window self.window.title(window_title) self.video_source = video_source self.ok = False self.master = master #timer self.timer = StopWatch(self.window) # open video source (by default this will try to open the computer webcam) self.vid = VideoCapture(self.video_source) # Create a canvas that can fit the above video source size self.canvas = tk.Canvas(window, width=self.vid.width, height=self.vid.height) self.canvas.pack() # -------------------------------------------------------------------------------- # fm = tk.Frame(master) #video control buttons self.img1 = tk.PhotoImage(file="stop.png") self.btn_stop = tk.Button(self.window, image=self.img1, padx=3, pady=2, activebackground='#979797', command=self.close_camera) self.btn_stop["border"] = "0" self.btn_stop.pack(side=tk.LEFT, fill=tk.BOTH, expand=tk.YES) self.img = tk.PhotoImage(file="start.png") self.btn_start = tk.Button(self.window, image=self.img, padx=3, pady=2, activebackground='#979797', command=self.open_camera) self.btn_start["border"] = "0" self.btn_start.pack(side=tk.LEFT, fill=tk.BOTH, expand=tk.YES) # Button that lets the user take a snapshot self.img2 = tk.PhotoImage(file="snap.png") self.btn_snapshot = tk.Button(self.window, image=self.img2, padx=3, pady=2, activebackground='#979797', command=self.snapshot) self.btn_snapshot["border"] = "0" self.btn_snapshot.pack(side=tk.LEFT, fill=tk.BOTH, expand=tk.YES) # Button t # quit button self.img3 = tk.PhotoImage(file="exit.png") self.btn_quit = tk.Button(self.window, text='QUIT', image=self.img3, padx=3, pady=2, activebackground='#979797', command=self.quit) self.btn_quit["border"] = "0" self.btn_quit.pack(side=tk.LEFT, fill=tk.BOTH, expand=tk.YES) # After it is called once, the update method will be automatically called every delay milliseconds self.delay = 10 self.update() self.window.resizable(0, 0) self.window.mainloop() def snapshot(self): # Get a frame from the video source ret, frame = self.vid.get_frame() if ret: cv2.imwrite("IMG-" + time.strftime("%d-%m-%Y-%H-%M-%S") + ".jpg", cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)) def open_camera(self): self.ok = True self.timer.start() print("camera opened => Recording") def close_camera(self): self.ok = False self.timer.stop() print("camera closed => Not Recording") def update(self): # Get a frame from the video source ret, frame = self.vid.get_frame() if ret: self.photo = PIL.ImageTk.PhotoImage( image=PIL.Image.fromarray(frame)) self.canvas.create_image(0, 0, image=self.photo, anchor=tk.NW) self.window.after(self.delay, self.update) def quit(self): self.window.destroy()
def __init__(self, window, window_title, video_source=0, master=None): self.window = window self.window.title(window_title) self.video_source = video_source self.ok = False self.master = master #timer self.timer = StopWatch(self.window) # open video source (by default this will try to open the computer webcam) self.vid = VideoCapture(self.video_source) # Create a canvas that can fit the above video source size self.canvas = tk.Canvas(window, width=self.vid.width, height=self.vid.height) self.canvas.pack() # -------------------------------------------------------------------------------- # fm = tk.Frame(master) #video control buttons self.img1 = tk.PhotoImage(file="stop.png") self.btn_stop = tk.Button(self.window, image=self.img1, padx=3, pady=2, activebackground='#979797', command=self.close_camera) self.btn_stop["border"] = "0" self.btn_stop.pack(side=tk.LEFT, fill=tk.BOTH, expand=tk.YES) self.img = tk.PhotoImage(file="start.png") self.btn_start = tk.Button(self.window, image=self.img, padx=3, pady=2, activebackground='#979797', command=self.open_camera) self.btn_start["border"] = "0" self.btn_start.pack(side=tk.LEFT, fill=tk.BOTH, expand=tk.YES) # Button that lets the user take a snapshot self.img2 = tk.PhotoImage(file="snap.png") self.btn_snapshot = tk.Button(self.window, image=self.img2, padx=3, pady=2, activebackground='#979797', command=self.snapshot) self.btn_snapshot["border"] = "0" self.btn_snapshot.pack(side=tk.LEFT, fill=tk.BOTH, expand=tk.YES) # Button t # quit button self.img3 = tk.PhotoImage(file="exit.png") self.btn_quit = tk.Button(self.window, text='QUIT', image=self.img3, padx=3, pady=2, activebackground='#979797', command=self.quit) self.btn_quit["border"] = "0" self.btn_quit.pack(side=tk.LEFT, fill=tk.BOTH, expand=tk.YES) # After it is called once, the update method will be automatically called every delay milliseconds self.delay = 10 self.update() self.window.resizable(0, 0) self.window.mainloop()
def __init__(self): self._stopwatch = StopWatch() self._totBytes = 0 self._numFrames = 0 self._bitrate = 0.0
def single_id(self, model, checkpoint, single_id, set_type='train', show=True, verbose=True): if model: self.set_network(model) self.network.build() self.init_session() if checkpoint: saver = tf.train.Saver() saver.restore(self.sess, checkpoint) if verbose: logger.info('restored from checkpoint, %s' % checkpoint) d = self._get_cell_data(single_id, set_type) h, w = d.img.shape[:2] shortedge = min(h, w) logger.debug('%s image size=(%d x %d)' % (single_id, w, h)) watch = StopWatch() logger.debug('preprocess+') d = self.network.preprocess(d) image = d.image(is_gray=False) total_instances = [] total_scores = [] total_from_set = [] cutoff_instance_max = HyperParams.get().post_cutoff_max_th cutoff_instance_avg = HyperParams.get().post_cutoff_avg_th watch.start() logger.debug('inference at default scale+ %dx%d' % (w, h)) inference_result = self.network.inference( self.sess, image, cutoff_instance_max=cutoff_instance_max, cutoff_instance_avg=cutoff_instance_avg) instances_pre, scores_pre = inference_result[ 'instances'], inference_result['scores'] instances_pre = Network.resize_instances(instances_pre, target_size=(h, w)) total_instances = total_instances + instances_pre total_scores = total_scores + scores_pre total_from_set = [1] * len(instances_pre) watch.stop() logger.debug('inference- elapsed=%.5f' % watch.get_elapsed()) watch.reset() logger.debug('inference with flips+') # re-inference using flip for flip_orientation in range(2): flipped = cv2.flip(image.copy(), flip_orientation) inference_result = self.network.inference( self.sess, flipped, cutoff_instance_max=cutoff_instance_max, cutoff_instance_avg=cutoff_instance_avg) instances_flip, scores_flip = inference_result[ 'instances'], inference_result['scores'] instances_flip = [ cv2.flip(instance.astype(np.uint8), flip_orientation) for instance in instances_flip ] instances_flip = Network.resize_instances(instances_flip, target_size=(h, w)) total_instances = total_instances + instances_flip total_scores = total_scores + scores_flip total_from_set = total_from_set + [2 + flip_orientation ] * len(instances_flip) watch.stop() logger.debug('inference- elapsed=%.5f' % watch.get_elapsed()) watch.reset() logger.debug('inference with scaling+flips+') # re-inference after rescale image def inference_with_scale(image, resize_target): image = cv2.resize(image.copy(), None, None, resize_target, resize_target, interpolation=cv2.INTER_AREA) inference_result = self.network.inference( self.sess, image, cutoff_instance_max=cutoff_instance_max, cutoff_instance_avg=cutoff_instance_avg) instances_rescale, scores_rescale = inference_result[ 'instances'], inference_result['scores'] instances_rescale = Network.resize_instances(instances_rescale, target_size=(h, w)) return instances_rescale, scores_rescale max_mask = get_max_size_of_masks(instances_pre) logger.debug('max_mask=%d' % max_mask) resize_target = HyperParams.get().test_aug_scale_t / max_mask resize_target = min(HyperParams.get().test_aug_scale_max, resize_target) resize_target = max(HyperParams.get().test_aug_scale_min, resize_target) import math # resize_target = 2.0 / (1.0 + math.exp(-1.5*(resize_target - 1.0))) # resize_target = max(0.5, resize_target) resize_target = max(228.0 / shortedge, resize_target) # if resize_target > 1.0 and min(w, h) > 1000: # logger.debug('too large image, no resize') # resize_target = 0.8 logger.debug('resize_target=%.4f' % resize_target) instances_rescale, scores_rescale = inference_with_scale( image, resize_target) total_instances = total_instances + instances_rescale total_scores = total_scores + scores_rescale total_from_set = total_from_set + [4] * len(instances_rescale) # re-inference using flip + rescale for flip_orientation in range(2): flipped = cv2.flip(image.copy(), flip_orientation) instances_flip, scores_flip = inference_with_scale( flipped, resize_target) instances_flip = [ cv2.flip(instance.astype(np.uint8), flip_orientation) for instance in instances_flip ] instances_flip = Network.resize_instances(instances_flip, target_size=(h, w)) total_instances = total_instances + instances_flip total_scores = total_scores + scores_flip total_from_set = total_from_set + [5 + flip_orientation ] * len(instances_flip) watch.stop() logger.debug('inference- elapsed=%.5f' % watch.get_elapsed()) watch.reset() watch.start() logger.debug('voting+ size=%d' % len(total_instances)) # TODO : Voting? voting_th = HyperParams.get().post_voting_th rects = [get_rect_of_mask(a) for a in total_instances] voted = [] for i, x in enumerate(total_instances): voted.append( filter_by_voting( (x, total_instances, voting_th, 0.3, rects[i], rects))) total_instances = list(compress(total_instances, voted)) total_scores = list(compress(total_scores, voted)) total_from_set = list(compress(total_from_set, voted)) watch.stop() logger.debug('voting elapsed=%.5f' % watch.get_elapsed()) watch.reset() # nms watch.start() logger.debug('nms+ size=%d' % len(total_instances)) instances, scores = Network.nms( total_instances, total_scores, total_from_set, thresh=HyperParams.get().test_aug_nms_iou) watch.stop() logger.debug('nms elapsed=%.5f' % watch.get_elapsed()) watch.reset() # remove overlaps logger.debug('remove overlaps+') sorted_idx = [ i[0] for i in sorted(enumerate(instances), key=lambda x: get_size_of_mask(x[1]), reverse=True) ] instances = [instances[x] for x in sorted_idx] scores = [scores[x] for x in sorted_idx] instances = [ ndimage.morphology.binary_fill_holes(i) for i in instances ] instances, scores = Network.remove_overlaps(instances, scores) # TODO : Filter by score? # logger.debug('filter by score+') # score_filter_th = HyperParams.get().post_filter_th # if score_filter_th > 0.0: # logger.debug('filter_by_score=%.3f' % score_filter_th) # instances = [i for i, s in zip(instances, scores) if s > score_filter_th] # scores = [s for i, s in zip(instances, scores) if s > score_filter_th] logger.debug('finishing+') image = cv2.resize(image, (w, h), interpolation=cv2.INTER_AREA) score_desc = [] labels = [] if len(d.masks) > 0: # has label masks labels = list(d.multi_masks(transpose=False)) labels = Network.resize_instances(labels, target_size=(h, w)) tp, fp, fn = get_multiple_metric(thr_list, instances, labels) if verbose: logger.info('instances=%d, reinf(%.3f) labels=%d' % (len(instances), resize_target, len(labels))) for i, thr in enumerate(thr_list): desc = 'score=%.3f, tp=%d, fp=%d, fn=%d --- iou %.2f' % ( (tp / (tp + fp + fn))[i], tp[i], fp[i], fn[i], thr) if verbose: logger.info(desc) score_desc.append(desc) score = np.mean(tp / (tp + fp + fn)) if verbose: logger.info('score=%.3f, tp=%.1f, fp=%.1f, fn=%.1f --- mean' % (score, np.mean(tp), np.mean(fp), np.mean(fn))) else: score = 0.0 if show: img_vis = Network.visualize(image, labels, instances, None) cv2.imshow('valid', img_vis) cv2.waitKey(0) if not model: return { 'instance_scores': scores, 'score': score, 'image': image, 'instances': instances, 'labels': labels, 'score_desc': score_desc }
def ensemble_models_id(self, single_id, set_type='train', model='stage1_unet', show=True, verbose=True): self._load_ensembles(model) d = self._get_cell_data(single_id, set_type) logger.debug('image size=%dx%d' % (d.img_h, d.img_w)) total_model_size = len(self.ensembles['rcnn']) + len( self.ensembles['unet']) logger.debug('total_model_size=%d rcnn=%d unet=%d' % (total_model_size, len( self.ensembles['rcnn']), len(self.ensembles['unet']))) rcnn_instances = [] rcnn_scores = [] # TODO : RCNN Ensemble rcnn_ensemble = False for idx, data in enumerate(self.ensembles['rcnn']): if set_type == 'train': instances, scores = data['valid_instances'].get( single_id, (None, None)) rcnn_ensemble = True else: # TODO ls = data['test_instances'].get(single_id, None) if ls is None: instances = scores = None else: instances = [x[0] for x in ls] scores = [x[1] for x in ls] rcnn_ensemble = True logger.debug('rcnn # instances = %d' % len(instances)) if instances is None: logger.warning('Not found id=%s in RCNN %d Model' % (single_id, idx + 1)) continue rcnn_instances.extend( [instance[:d.img_h, :d.img_w] for instance in instances]) rcnn_scores.extend([ s * HyperParams.get().rcnn_score_rescale for s in scores ]) # rescale scores total_instances = [] total_scores = [] # TODO : UNet Ensemble for idx, data in enumerate(self.ensembles['unet']): if set_type == 'train': instances, scores = data['valid_instances'].get( single_id, (None, None)) else: instances, scores = data['test_instances'].get( single_id, (None, None)) if instances is None: logger.warning('Not found id=%s in UNet %d Model' % (single_id, idx + 1)) continue total_instances.extend(instances) total_scores.extend(scores) # if single_id in ['646f5e00a2db3add97fb80a83ef3c07edd1b17b1b0d47c2bd650cdcab9f322c0']: # take too long # logger.warning('no ensemble id=%s' % single_id) # break watch = StopWatch() watch.start() logger.debug('voting+ size=%d' % len(total_instances)) # TODO : Voting? voting_th = HyperParams.get().ensemble_voting_th rects = [get_rect_of_mask(a) for a in total_instances] voted = [] for i, x in enumerate(total_instances): voted.append( filter_by_voting( (x, total_instances, voting_th, 0.3, rects[i], rects))) total_instances = list(compress(total_instances, voted)) total_scores = list(compress(total_scores, voted)) watch.stop() logger.debug('voting elapsed=%.5f' % watch.get_elapsed()) watch.reset() # nms watch.start() logger.debug('nms+ size=%d' % len(total_instances)) instances, scores = Network.nms( total_instances, total_scores, None, thresh=HyperParams.get().ensemble_nms_iou) watch.stop() logger.debug('nms elapsed=%.5f' % watch.get_elapsed()) watch.reset() # high threshold if not exists in RCNN if rcnn_ensemble: voted = [] for i, x in enumerate(instances): voted.append( filter_by_voting((x, rcnn_instances, 1, 0.3, None, None))) new_instances = [] new_scores = [] for instance, score, v in zip(instances, scores, voted): if v: new_instances.append(instance) new_scores.append(score) elif score > HyperParams.get().ensemble_th_no_rcnn: new_instances.append(instance) new_scores.append(score) instances, scores = new_instances, new_scores # nms with rcnn instances = instances + rcnn_instances scores = scores + rcnn_scores watch.start() logger.debug('nms_rcnn+ size=%d' % len(instances)) instances, scores = Network.nms( instances, scores, None, thresh=HyperParams.get().ensemble_nms_iou) watch.stop() logger.debug('nms_rcnn- size=%d elapsed=%.5f' % (len(instances), watch.get_elapsed())) watch.reset() # remove overlaps logger.debug('remove overlaps+') sorted_idx = [ i[0] for i in sorted(enumerate(instances), key=lambda x: get_size_of_mask(x[1]), reverse=False) ] instances = [instances[x] for x in sorted_idx] scores = [scores[x] for x in sorted_idx] instances2 = [ ndimage.morphology.binary_fill_holes(i) for i in instances ] instances2, scores2 = Network.remove_overlaps(instances2, scores) # remove deleted instances logger.debug('remove deleted+ size=%d' % len(instances2)) voted = [] for x in instances2: voted.append(filter_by_voting((x, instances, 1, 0.75, None, None))) instances = list(compress(instances2, voted)) scores = list(compress(scores2, voted)) # TODO : Filter by score? logger.debug('filter by score+ size=%d' % len(instances)) score_filter_th = HyperParams.get().ensemble_score_th if score_filter_th > 0.0: logger.debug('filter_by_score=%.3f' % score_filter_th) instances = [ i for i, s in zip(instances, scores) if s > score_filter_th ] scores = [ s for i, s in zip(instances, scores) if s > score_filter_th ] logger.debug('finishing+ size=%d' % len(instances)) image = d.image(is_gray=False) score_desc = [] labels = [] if len(d.masks) > 0: # has label masks labels = list(d.multi_masks(transpose=False)) tp, fp, fn = get_multiple_metric(thr_list, instances, labels) logger.debug('instances=%d, labels=%d' % (len(instances), len(labels))) for i, thr in enumerate(thr_list): desc = 'score=%.3f, tp=%d, fp=%d, fn=%d --- iou %.2f' % ( (tp / (tp + fp + fn))[i], tp[i], fp[i], fn[i], thr) logger.debug(desc) score_desc.append(desc) score = np.mean(tp / (tp + fp + fn)) logger.debug('score=%.3f, tp=%.1f, fp=%.1f, fn=%.1f --- mean' % (score, np.mean(tp), np.mean(fp), np.mean(fn))) else: score = 0.0 if show: img_vis = Network.visualize(image, labels, instances, None) cv2.imshow('valid', img_vis) cv2.waitKey(0) else: return { 'instance_scores': scores, 'score': score, 'image': image, 'instances': instances, 'labels': labels, 'score_desc': score_desc }
def test_export_default(self): """Make sure that passing None in explicitly works""" sw = StopWatch(export_aggregated_timers_func=None, export_tracing_func=None) with sw.timer('root'): pass
#!python from trie import Trie import datetime import pickle from stopwatch import StopWatch stopwatch = StopWatch() def find_call_cost(phone_numbers: [str], trie: Trie) -> [str]: """ Given list of phone numbers and trie will return list of costs for calling numbers Params: phone_numbers: array of strings that are phone numbers phone number format --> +19131232342 trie: a trie containing phone number prefix, cost data """ costs_list = [] # iterate over phone numbers # running trie search command on each one # append the cost returned from search class into cost list for number in phone_numbers: num = number[1:] cost = trie.search(num) costs_list.append(cost)
def main(): s = StopWatch() s.start() import compute_pi s.stop() print("Elapsed time:", round(s.time(), 2), "seconds")
def test_default_exports(self): sw = StopWatch() add_timers(sw)
def load_queries(self): if not os.path.exists(DIRECTORY): os.makedirs(DIRECTORY) data_split = int(TOTAL_QUERY*0.6) validation_split = int(TOTAL_QUERY*0.2) test_split = int(TOTAL_QUERY*0.2) print "data_split", data_split print "validation_split", validation_split print "test_split", test_split f = open(DBPEDIA_QUERY_LOG,'rb') fq = open(DIRECTORY+"x_query.txt",'w') ft = open(DIRECTORY+"y_time.txt",'w') ff = open(DIRECTORY+"x_features.txt",'w') x_f_csv = csv.writer(ff) sparql = SPARQLWrapper(DBPEDIA_ENDPOINT) f_extractor = FeatureExtractor() sw1 = StopWatch() sw2 = StopWatch() print_log_split = int(TOTAL_QUERY/10) count =0 for line in f: if count%print_log_split==0: print count," queries processed in ",sw2.elapsed_seconds()," seconds" if(count>=TOTAL_QUERY): break if count == data_split: fq.close() ft.close() ff.close() fq = open(DIRECTORY+"xval_query.txt",'w') ft = open(DIRECTORY+"yval_time.txt",'w') ff = open(DIRECTORY+"xval_features.txt",'w') x_f_csv = csv.writer(ff) elif count == (data_split+validation_split): fq.close() ft.close() ff.close() fq = open(DIRECTORY+"xtest_query.txt",'w') ft = open(DIRECTORY+"ytest_time.txt",'w') ff = open(DIRECTORY+"xtest_features.txt",'w') x_f_csv = csv.writer(ff) try: row = line.split() query_log = row[6][1:-1] #print query_log par = urlparse.parse_qs(urlparse.urlparse(query_log).query) #util.url_decode(row[6]) sparql_query = par['query'][0] if sparql._parseQueryType(sparql_query) != SELECT: continue #print sparql_query #print row sparql_query = f_extractor.get_dbp_sparql(sparql_query) #print sparql_query feature_vector = f_extractor.get_features(sparql_query) if feature_vector == None: print "feature vector not found" continue sparql.setQuery(sparql_query) sparql.setReturnFormat(JSON) sw1.reset() results = sparql.query().convert() elapsed = sw1.elapsed_milliseconds() result_rows = len(results["results"]["bindings"]) # if result_rows == 0: # continue # print "QUERY =", sparql_query # print "feature vector:",feature_vector # print elapsed, "seconds" # print results # print "rows", result_rows # print "-----------------------" fq.write(query_log+'\n') ft.write(str(elapsed)+'\n') x_f_csv.writerow(feature_vector) count += 1 except Exception as inst: print "Exception", inst f.close() fq.close() ft.close() ff.close() print count, "queries processed"
def test_stopwatch_full_cancel(self): """Test that an entire span - from root to children, can be cancelled.""" sw = StopWatch() sw.start('root') sw.start('child') sw.start('grand') sw.cancel('grand') sw.cancel('child') sw.cancel('root') assert not sw.get_last_aggregated_report() assert not sw._cancelled_spans sw = StopWatch() with sw.timer('root'): with sw.timer('child'): with sw.timer('grandchild'): sw.cancel('grandchild') sw.cancel('child') sw.cancel('root') assert not sw.get_last_aggregated_report() assert not sw._cancelled_spans
def unique1(s): """Return True is there are no duplicate elements in sequence s.""" for j in range(len(s)): for k in range(j+1, len(s)): if s[j] == s[k]: return False return True def unique2(s): """Return True is there are no duplicate elements in sequence s.""" temp = sorted(s) for j in range(1, len(s)): if s[j-1] == s[j]: return False return True if __name__ == '__main__': n = 20 elapsed_time = [] for j in range(4): s = [randint(0, 10000) for i in range(n)] watch = StopWatch() unique2(s) elapsed_time += [watch.elapsed()] print(n, elapsed_time[j], elapsed_time[j]/elapsed_time[j-1] if j > 0 else None) n *= 2
# This file is used solely for quick tests. It can be, at any time, completely omitted from the project. from kdfinder import KDFinder import numpy as np import matplotlib.pyplot as plt import helper as h from stopwatch import StopWatch from kdtree import BucketedKDTree, KDTree a = np.random.rand(5000, 2) sw = StopWatch() obt = BucketedKDTree(a, optimized=True) sw.reset('Build time for Optimized BKD') bt = BucketedKDTree(a) sw.reset('Build time for BKD') t = KDTree(a) sw.reset('Build time for regular KD') for value in a: if not obt.has(value): print 'Missing Value!!' sw.reset('Traversal time for Optimized BKD') for value in a: if not bt.has(value): print 'Missing Value!!' sw.reset('Traversal time for BKD') for value in a: if not t.has(value): print 'Missing Value!!' sw.reset('Traversal time for regular KD')