def threadJob(URL): global isDone, lockData global data global timeTotal global requestCount, successCount, failureCount while (not isDone): startTime = time.thread_time_ns() r = requests.get( URL, ) #assumption is any request so get request for simplicity endTime = time.thread_time_ns() status = r.status_code while (lockData ): #wait for lock so the thread working on it can finish pass lockData = True #lock global variables if (int(status / 100) == 2): timeTotal += (endTime - startTime) successCount += 1 if int(status / 100) == 4 or int(status / 100) == 5: failureCount += 1 requestCount += 1 if (status in data): data[status] += 1 else: data[status] = 1 lockData = False #unlock global variables
def function_test_speed(random_integer): start = time.thread_time_ns() p = factorint(random_integer) end = time.thread_time_ns() print(end - start) start = time.thread_time_ns() inverse_result = inverse_factorint(p) end = time.thread_time_ns() print(end - start)
def timed_function(*args): old_time = thread_time_ns() result = func(*args) new_time = thread_time_ns() print( 'Time taken for test to conclude: ' f'{float((new_time - old_time)/(10**6))} (in milliseconds)', end='\n\n') return result
def test_thread_time_ns(self): import time if not hasattr(time, 'thread_time_ns'): skip("need time.thread_time_ns") t1 = time.thread_time_ns() assert isinstance(t1, int) time.sleep(0.1) t2 = time.process_time_ns() # process_thread_ns() should not include time spent during sleep assert (t2 - t1) < 5 * 10**7 assert abs(time.thread_time() - time.thread_time_ns() * 1e-9) < 0.1
def chat1(user_responsive): #print('-----------------USER:'******'<=== FEEL VALUE') #---------------------------------------------- with open('chatbot.txt', 'a') as self: if user_response == 'quit': None else: self.write(user_response + '\n' '\n') #--------------------------------------------- short_term_mem = {"convo": []} user_response = Inflect_pronouns(user_response) user_response = user_response.lower() short_term_mem["convo"].append(user_response) if (user_response != ''): if (user_response == 'exit' or user_response == 'quit'): print('NANOSEC TIME STOP: ', time.thread_time_ns() / 1000000000) else: print('\n', end=" ") responsive = (response(user_response)) print(responsive + '\n') sent_tokens.remove(user_response) rejuvinate(1) return responsive
def __init__(self, k : int, data : np.array): self._k : int = k self._point_dim : tuple = (1, data.shape[1]) # Setting up the points array: self._points_data = np.array(data) self._num_points : int = self._points_data.shape[0] np.reshape(self._points_data, (self._num_points, self._point_dim[1])) # Creating a random seed: random.seed(time.thread_time_ns()) # Initializing the data arrays: self._clusters : np.array = np.zeros(self._num_points, dtype=int) self._centroids = list([]) for i in range(self._k): self._centroids.append(np.zeros(self._point_dim)) # Initializing the distubed data arrays: self._disturbed_clusters : np.array = np.zeros(self._num_points, dtype=int) self._disturbed_centroids = list([]) for i in range(self._k): self._disturbed_centroids.append(np.zeros(self._point_dim)) # Initializing the cost of the regular state and the disturbed state: self._sse : float = 0 self._disturbed_sse : float = 0 # Initializing the list of sse's for each cluster: self._sse_per_cluster : list = [] for i in range(self._k): self._sse_per_cluster.append(0.)
def random(): x = time.thread_time_ns() a = 8974556217 c = 524556445 m = 2**32 x = (a * x + c) % m return x
def stop(self, text: Optional[str] = None) -> None: """ Stops a timing block. Args: text: Text of the timing block. """ if self._start_time is None: logger.warning( "Timer hasn't been started yet. Use .start() to start it first." ) return if text is not None: self.text = str(text) self.blocks.append( Block( start_time=self._start_time, stop_time=perf_counter_ns(), thread_duration=thread_time_ns() - self._start_thread_time, text=self.text, )) self._start_time = None self.text = None if not self._is_main_process: # Python doesn't honor `atexit` registrations in forked processes (https://bugs.python.org/issue39675). # When running in a child process, generate the report immediately self.save_report(is_main_process=False)
def restart(self, new_seed=True): if self.__started: self.print("Force restarting") [p.reset() for p in self.__players] self.__started = True self.__gameover = False self.__turns = 0 self.__actions.clear() self.__move_count = 0 self.__idle_count = [0, 0] random.seed(time.thread_time_ns()) if not self.__state: self.__state = GameState(random.randint(0, 2** 31) if new_seed else self.seed \ , self.__league) self.__state.generate_map(self.__league) else: self.__state.reset() self.__state.create_hq(PLAYER_COUNT) self.current_player = random.choice(self.__players) self.send_init_messages() while not self.__gameover: self.gameloop()
def refresh(self): if self.refresh_ev.is_set(): while not self.init_queue.empty(): # init self.init_queue.get()(self) while not self._pipinput_queue.empty(): # init args = self._pipinput_queue.get() # append mode if args[0] is None: # print('o1') self._pipeline.append(args[1]) # insert mode elif type(args[0]) == int: self._pipeline.insert(*args) elif type(args[0]) == str: if args[0] == 'len': self._pipoutput_queue.put(('len', len(self._pipeline))) elif args[0] == 'key': self._pipoutput_queue.put( ('key', self._pipeline[args[1]])) elif args[0] == 'del': self.input_line.empty() del self._pipeline[args[1]] elif args[0] == 'time': self.input_line.empty() t0 = tdiff = 0 if self._list_dispatch: t0 = time.thread_thread_time_ns() self._run_dispatched() tdiff = time.thread_time_ns() - t0 else: t0 = time.thread_time_ns() self._run_norm() tdiff = time.thread_time_ns() - t0 self._pipoutput_queue.put(('time', tdiff * 1e-3)) self.refresh_ev.clear()
def gpioControl(): global data_array # thread_time_ns t = time.thread_time_ns() summ = 0 for i in range(0, len(data_array)): summ = summ + data_array[i] if i % 2: #"space" #print("space: " + str(data_array[i]/1000)) led.off() while (time.thread_time_ns() - t) <= summ: pass else: #"mark" #print("mark: " + str(data_array[i]/1000)) led.on() while (time.thread_time_ns() - t) <= summ: pass led.off()
def times(): _times_dic = { 'monotonic': time.monotonic(), # 'clock':time.clock(), 'time': time.time(), 'time_ns': time.time_ns(), 'perf_counter_ns': time.perf_counter_ns(), 'process_time_ns': time.process_time_ns(), 'thread_time_ns': time.thread_time_ns(), 'gmtime': time.gmtime(), } return _times_dic
def track(self, w): self.good_error_sum = 0.0 self.bad_error_sum = 0.0 self.current_index += 1 if self.current_index == self.max_length: self.current_index = 0 errors = [] avg_errors = [] for t in range(0, self.good_count + self.bad_count): current_t = time.thread_time_ns() bird = w.birds[t] ps, vs, v2s = self.infer_measurements(w, bird) target = self.infer_target(w, bird) prediction = Bird.flock(ps, vs, target, bird.old_v) avg_v_observed = pygame.Vector2(0, 0) for v in v2s: avg_v_observed += v avg_v_observed /= len(v2s) error = (prediction - avg_v_observed).length() errors.append(error) avg_errors.append(self.calculate_running_avg(t, error)) if len(self.errors) < self.max_length: self.errors.append(errors) else: self.errors[self.current_index] = errors if len(self.avg_errors) < self.max_length: self.avg_errors.append(avg_errors) else: self.avg_errors[self.current_index] = avg_errors self.avg_error = 0 for error in errors: self.avg_error += error self.avg_error /= len(w.birds) self.median_error = sorted(avg_errors)[int(len(w.birds) / 2)] self.mark_suspicious_birds(w)
def read_cam(cap,yaw,pitch): p_error_x=0 p_error_y=0 period=0.16 while True: start_time=time.thread_time_ns() if cv2.getWindowProperty(WINDOW_NAME, 0) < 0: break _,img = cap.read() # grab the next image frame from camera image,face_cascade,gray=face_detect(cap,img) yaw,pitch,error_x,error_y=controller(face_cascade,gray,yaw,pitch,p_error_x,p_error_y) p_error_x=error_x p_error_y=error_y cv2.imshow(WINDOW_NAME, image) key = cv2.waitKey(5) if key == 27: # ESC key: quit program break print("Time:", (time.thread_time_ns()-start_time)/1000000) print("\033c")
def function_action_execute(self, target, source, env): task_metrics = { 'outputs': [str(t) for t in target], 'inputs': [str(s) for s in source], 'action': fullname(self.original_func), 'builder': target[0].get_builder().get_name(target[0].get_env()), } profile = memory_profiler.LineProfiler(include_children=False) task_metrics['start_time'] = time.time_ns() thread_start_time = time.thread_time_ns() return_value = profile(self.original_func)(target=target, source=source, env=env) task_metrics['cpu_time'] = time.thread_time_ns() - thread_start_time task_metrics['end_time'] = time.time_ns() memory_increases_per_line = [] for (file_where_code_is, lines_of_code) in profile.code_map.items(): # skip the first item in the list because this is just the initial # memory state, and we are interested just in the increases for (line_number, memory_usage) in list(lines_of_code)[1:]: if memory_usage: memory_increase = memory_usage[0] memory_increases_per_line.append(memory_increase) task_metrics['mem_usage'] = int( sum(memory_increases_per_line) * 1024 * 1024) self.per_action_instance.build_tasks_metrics.append(task_metrics) task_metrics[ 'array_index'] = self.per_action_instance.build_tasks_metrics.index( task_metrics) return return_value
def update(self): """ Updates the load profiling. :return: """ thread_time = time.thread_time_ns() monotonic_time = TIMER() if monotonic_time == self._lastMonotonicTime: return load = (thread_time - self._lastThreadTime) / (monotonic_time - self._lastMonotonicTime) self._lastThreadTime = thread_time self._lastMonotonicTime = monotonic_time self._measurements.append((monotonic_time, load))
def start(self, text: Optional[str] = None) -> None: """ Starts a timing block. Args: text: Text of the timing block. """ if self._start_time is not None: logger.warning( "Timer can't be started twice. Use .stop() to stop it first.") return if text is not None: self.text = str(text) self._start_time = perf_counter_ns() self._start_thread_time = thread_time_ns()
def update(self, dt): t_start = time.thread_time_ns() self.calculate_distances() t_distances = time.thread_time_ns() - t_start update_neighbor_t = 0 update_measurements_t = 0 current_t = time.thread_time_ns() for bird in self.birds: bird.update_neighbours(self) new_t = time.thread_time_ns() update_neighbor_t += new_t - current_t current_t = new_t bird.update_measurements(self) new_t = time.thread_time_ns() update_measurements_t += new_t - current_t current_t = new_t bird.old_v = bird.v t_update_birds = time.thread_time_ns() - t_start - t_distances for bird in self.birds: bird.v = bird.calculate_v(self) t_update_vs = time.thread_time_ns( ) - t_start - t_distances - t_update_birds for bird in self.birds: bird.update_p(dt) t_update_ps = time.thread_time_ns( ) - t_start - t_distances - t_update_birds - t_update_vs return [ t_distances, t_update_birds, t_update_vs, t_update_ps, update_neighbor_t, update_measurements_t ]
def test_time_ns_type(self): def check_ns(sec, ns): self.assertIsInstance(ns, int) sec_ns = int(sec * 1e9) # tolerate a difference of 50 ms self.assertLess((sec_ns - ns), 50**6, (sec, ns)) check_ns(time.time(), time.time_ns()) check_ns(time.monotonic(), time.monotonic_ns()) check_ns(time.perf_counter(), time.perf_counter_ns()) check_ns(time.process_time(), time.process_time_ns()) if hasattr(time, 'thread_time'): check_ns(time.thread_time(), time.thread_time_ns()) if hasattr(time, 'clock_gettime'): check_ns(time.clock_gettime(time.CLOCK_REALTIME), time.clock_gettime_ns(time.CLOCK_REALTIME))
def test_time_ns_type(self): def check_ns(sec, ns): self.assertIsInstance(ns, int) sec_ns = int(sec * 1e9) # tolerate a difference of 50 ms self.assertLess((sec_ns - ns), 50 ** 6, (sec, ns)) check_ns(time.time(), time.time_ns()) check_ns(time.monotonic(), time.monotonic_ns()) check_ns(time.perf_counter(), time.perf_counter_ns()) check_ns(time.process_time(), time.process_time_ns()) if hasattr(time, 'thread_time'): check_ns(time.thread_time(), time.thread_time_ns()) if hasattr(time, 'clock_gettime'): check_ns(time.clock_gettime(time.CLOCK_REALTIME), time.clock_gettime_ns(time.CLOCK_REALTIME))
def stop(self): self.stop_time = time.thread_time_ns() self.elapsed_ms = (self.stop_time - self.start_time) / 1000000
def flush_queue(self): try: traces = self._trace_queue.get(block=False) except Empty: return send_stats = self._send_stats() if send_stats: traces_queue_length = len(traces) traces_queue_size = sum(map(sizeof.sizeof, traces)) traces_queue_spans = sum(map(len, traces)) # Before sending the traces, make them go through the # filters try: traces = self._apply_filters(traces) except Exception as err: log.error('error while filtering traces: {0}'.format(err)) return if send_stats: traces_filtered = len(traces) - traces_queue_length # If we have data, let's try to send it. traces_responses = self.api.send_traces(traces) for response in traces_responses: if isinstance(response, Exception) or response.status >= 400: self._log_error_status(response) elif self._priority_sampler: result_traces_json = response.get_json() if result_traces_json and 'rate_by_service' in result_traces_json: self._priority_sampler.set_sample_rate_by_service( result_traces_json['rate_by_service']) # Dump statistics # NOTE: Do not use the buffering of metrics_client as it's not thread-safe # https://github.com/opentelemetry/datadogpy/issues/439 if send_stats: # Statistics about the queue length, size and number of spans self.metrics_client.gauge('opentelemetry.tracer.queue.max_length', self._trace_queue.maxsize) self.metrics_client.gauge('opentelemetry.tracer.queue.length', traces_queue_length) self.metrics_client.gauge('opentelemetry.tracer.queue.size', traces_queue_size) self.metrics_client.gauge('opentelemetry.tracer.queue.spans', traces_queue_spans) # Statistics about the rate at which spans are inserted in the queue dropped, enqueued, enqueued_lengths, enqueued_size = self._trace_queue.reset_stats( ) self.metrics_client.increment('opentelemetry.tracer.queue.dropped', dropped) self.metrics_client.increment( 'opentelemetry.tracer.queue.accepted', enqueued) self.metrics_client.increment( 'opentelemetry.tracer.queue.accepted_lengths', enqueued_lengths) self.metrics_client.increment( 'opentelemetry.tracer.queue.accepted_size', enqueued_size) # Statistics about the filtering self.metrics_client.increment( 'opentelemetry.tracer.traces.filtered', traces_filtered) # Statistics about API self.metrics_client.increment('opentelemetry.tracer.api.requests', len(traces_responses)) self.metrics_client.increment( 'opentelemetry.tracer.api.errors', len( list(t for t in traces_responses if isinstance(t, Exception)))) for status, grouped_responses in itertools.groupby( sorted((t for t in traces_responses if not isinstance(t, Exception)), key=lambda r: r.status), key=lambda r: r.status): self.metrics_client.increment( 'opentelemetry.tracer.api.responses', len(list(grouped_responses)), tags=['status:%d' % status]) # Statistics about the writer thread if hasattr(time, 'thread_time_ns'): self.metrics_client.increment( 'opentelemetry.tracer.writer.cpu_time', time.thread_time_ns())
def main(params=None): parser = argparse.ArgumentParser(description='根据参考图片,定位查询图片的位置和朝向') parser.add_argument('qryimage', metavar='QUERY', nargs=1, help='查询图片') parser.add_argument('refimages', metavar='IMAGES', nargs='+', help='参考图片模型文件') parser.add_argument('--homography', action='store_true', help='是否使用 homography 过滤匹配结果') parser.add_argument('--fundamental', action='store_true', help='是否使用 fundamental 过滤匹配结果') parser.add_argument('--save', action='store_true', help='是否保存定位结果') parser.add_argument('--reject', type=int, default=6, help='匹配失败阀值,小于该值认为是失败,默认值为6') parser.add_argument('--accept', type=int, default=100, help='匹配成功阀值,大于该值认为匹配成功,默认值100') parser.add_argument('--output', default="", help='输出文件的路径') parser.add_argument('--focals', default="0.9722,0.7292", help='相机内参(fx,fy)') parser.add_argument('--size', default="2448,3264", help='查询照片大小(w,h)') parser.add_argument('--solve', default="P3P", choices=('ITERATIVE', 'P3P', 'AP3P', 'EPNP'), help='选择定位算法,默认是 P3P') parser.add_argument('--debug', action='store_true', help='输出定位结果json文件') args = parser.parse_args(params) logging.info("查询图片: %s", args.qryimage[0]) # for filename in args.refimages: # if not os.path.exists(filename): # logging.warning('不存在的文件: %s', filename) # return t1 = time.thread_time_ns() kp1, des1 = load_keypoint_data(args.qryimage[0]) total, yaw, pos = 0, None, None index = None refImg = None imagePoints, points3d, refPoints = [], [], [] for i in range(len(args.refimages)): logging.info('使用第 %s 张照片进行匹配', i + 1) pose = query_image(args, args.refimages[i], kp1, des1) if pose is not None: logging.info('第 %s 张照片匹配成功', i + 1) if pose[0] > total: total, yaw, pos, imagePoints, points3d, refPoints = pose index = i refImg = os.path.basename(args.refimages[i]) # 大于 100 个直接使用该定位结果 if total > args.accept: break t2 = time.thread_time_ns() t = (t2 - t1) * 1000 name = os.path.basename(args.qryimage[0]).rsplit('-', 1)[0] filename = os.path.join(args.output, name + '-pose.txt') if pos is None: logging.info('%s 定位失败,耗时: %s', name, t) if args.save: logging.info("定位结果写入文件: %s", filename) with open(filename, "w") as f: f.write('%-8s %-8.2f %-8s\n' % (name, t, 'NaN')) return a = yaw.A.ravel()[0] * 180 / np.pi # 这是相对参考相机拍摄角度的偏转,顺时针为正值 refname = os.path.splitext(os.path.basename(args.refimages[index]))[0] logging.info("使用参考照片 %s 的定位结果,消耗时间: %s 毫秒,匹配关键点: %d", refname, t, total) logging.info("相对参考相机的拍摄角度: %s", a) logging.info("相对参考相机的偏移: %s", pos.ravel()) if args.debug: if args.debug: sw, sh = args.size.rsplit(',') points3d = points3d + pos image = { 'url': name, 'size': [int(sw), int(sh)], 'points': imagePoints.tolist() } refimage = { "url": refImg, "size": [int(sw), int(sh)], 'points': refPoints.tolist() } if os.path.exists('./debug') == False: os.makedirs('./debug') with open('./debug/' + name + '-' + refImg + '.json', 'w') as f: json.dump( { "image": image, "points3d": points3d.tolist(), "refimage": refimage }, f, indent=2) if args.save: logging.info("定位结果写入文件: %s", filename) with open(filename, "w") as f: f.write('%-8s %-8.2f %-6s %-6.2f %8.2f %8.2f %4d\n' % (name, t, refname, a, pos[0], pos[2], total)) return yaw, pos
threading.Timer(10, self.ping_heartbeat_details).start() def close_connection(self): # close the connection self.socket_conn.close() if __name__ == '__main__': client = Client_a() client.create_socket_connection() message = client.get_cpu_memory_info() data_thread = Thread(target=client.sized_rotating_filehandler) data_thread.start() client.ping_heartbeat_details() thread_id = data_thread.ident clk_id = time.thread_time_ns() print(data_thread, type(data_thread), time.clock_gettime(clk_id)) def send_perf_info(): job_thread_1 = Thread(target=client.send_data_to_server, args=(message, )) job_thread_1.start() threading.Timer(10, send_perf_info).start() send_perf_info() time_check = datetime.datetime.now() while True: time.sleep(5) current_time = datetime.datetime.now() if current_time.minute - time_check.minute > RUN_TIME: break
def wrap(*args, **kwargs): t1 = time.thread_time_ns() result = func(*args, **kwargs) t2 = time.thread_time_ns() logging.info('%s: %s ms', func.__name__, (t2 - t1) * 1000) return result
def parse_number(text, method_object, markers, variables): if text.startswith('-'): return -parse_number(text.replace('-', '', 1), method_object, markers, variables) if str(try_to_num(text)) == text: return try_to_num(text) elif text.count('.') > 0: args = text.split('.') text1 = get_num(args[0]) text2 = get_num(args[1]) if str(text1) + '.' + str(text2) == text: return float(text) # TIME NANO elif text == 'time:nano': return int(int(time.time_ns()) / 100) # idk elif text == 'time:thread': return int(time.thread_time()) # idk elif text == 'time:thread.nano': return int(time.thread_time_ns()) # idk elif text == 'time:process': return int(time.process_time()) # idk elif text == 'time:process.nano': return int(time.process_time_ns()) # TIME CLOCK (https://vstinner.github.io/python37-pep-564-nanoseconds.html) elif text == 'time:clock': return int(time.clock()) # TIME elif text == 'time': return int(time.time()) # idk elif text == 'time:monotonic': return int(time.monotonic()) # EXECUTION TIME elif text == 'time:execution': return int(time.perf_counter()) # EXECUTION TIME elif text == 'time:execution.nano': return int(int(time.perf_counter_ns()) / 100) # RANDOM elif text.startswith('rand='): range_vals = text.replace('rand=', '') range_list = [] if text.count('-') >= 1: range_list = range_vals.split('-', 1) elif text.count(',') >= 1: range_list = range_vals.split(',', 1) elif text.count(':') >= 1: range_list = range_vals.split(':', 1) if len(range_list) > 1: return random.randrange( int(parse_value_full(range_list[0], method_object, markers, variables)), int(parse_value_full(range_list[1], method_object, markers, variables)) + 1 ) elif len(range_list) == 1: return random.randrange(0, int(parse_value_full(range_list[0], method_object, markers, variables)) + 1) else: print(range_list) print(text) elif \ text.count('+') > 0 or \ text.count('-') > 0 or \ text.count('*') > 0 or \ text.count('/') > 0 or \ text.count('%') > 0 or \ text.count('^') > 0: while text.count('%') > 0: args = text.split('%', 1) text1 = args[0] text2 = args[1] text1 = text1\ .replace('-', ' ')\ .replace('+', ' ')\ .replace('*', ' ')\ .replace('^', ' ')\ .replace('/', ' ')\ .replace('%', ' ') text2 = text2\ .replace('-', ' ')\ .replace('+', ' ')\ .replace('*', ' ')\ .replace('^', ' ')\ .replace('/', ' ')\ .replace('%', ' ') args = text1.split(' ') text1_2 = args[len(args) - 1] args = text2.split(' ') text2_2 = args[0] text1_3 = parse_value_full(text1_2, method_object, markers, variables) text2_3 = parse_value_full(text2_2, method_object, markers, variables) text = text.replace(str(text1_2) + '%' + str(text2_2), str(mod(str(text1_3), str(text2_3)))) while text.count('^') > 0: args = text.split('^', 1) text1 = args[0] text2 = args[1] text1 = text1.replace('-', ' ').replace('+', ' ').replace('*', ' ').replace('^', ' ').replace('/', ' ') text2 = text2.replace('-', ' ').replace('+', ' ').replace('*', ' ').replace('^', ' ').replace('/', ' ') args = text1.split(' ') text1_2 = args[len(args) - 1] args = text2.split(' ') text2_2 = args[0] text1_3 = parse_value_full(text1_2, method_object, markers, variables) text2_3 = parse_value_full(text2_2, method_object, markers, variables) text = text.replace(str(text1_2) + '^' + str(text2_2), str(power(str(text1_3), str(text2_3)))) while text.count('*') > 0: args = text.split('*', 1) text1 = args[0] text2 = args[1] text1 = text1.replace('-', ' ').replace('+', ' ').replace('*', ' ').replace('/', ' ') text2 = text2.replace('-', ' ').replace('+', ' ').replace('*', ' ').replace('/', ' ') args = text1.split(' ') text1_2 = args[len(args) - 1] args = text2.split(' ') text2_2 = args[0] text1_3 = parse_value_full(text1_2, method_object, markers, variables) text2_3 = parse_value_full(text2_2, method_object, markers, variables) text = text.replace(str(text1_2) + '*' + str(text2_2), str(multiply(str(text1_3), str(text2_3)))) while text.count('/') > 0: args = text.split('/', 1) text1 = args[0] text2 = args[1] text1 = text1.replace('-', ' ').replace('+', ' ').replace('/', ' ') text2 = text2.replace('-', ' ').replace('+', ' ').replace('/', ' ') args = text1.split(' ') text1_2 = args[len(args) - 1] args = text2.split(' ') text2_2 = args[0] text1_3 = parse_value_full(text1_2, method_object, markers, variables) text2_3 = parse_value_full(text2_2, method_object, markers, variables) text = text.replace(str(text1_2) + '/' + str(text2_2), str(divide(str(text1_3), str(text2_3)))) while text.count('+') > 0: args = text.split('+', 1) text1 = args[0] text2 = args[1] text1 = text1.replace('-', ' ').replace('+', ' ') text2 = text2.replace('-', ' ').replace('+', ' ') args = text1.split(' ') text1_2 = args[len(args) - 1] args = text2.split(' ') text2_2 = args[0] text1_3 = parse_value_full(text1_2, method_object, markers, variables) text2_3 = parse_value_full(text2_2, method_object, markers, variables) text = text.replace(str(text1_2) + '+' + str(text2_2), str(add(str(text1_3), str(text2_3)))) while text.count('-') > 0: args = text.split('-', 1) text1 = args[0] text2 = args[1] text1 = text1.replace('-', ' ') text2 = text2.replace('-', ' ') args = text1.split(' ') text1_2 = args[len(args) - 1] args = text2.split(' ') text2_2 = args[0] text1_3 = parse_value_full(text1_2, method_object, markers, variables) text2_3 = parse_value_full(text2_2, method_object, markers, variables) text = text.replace(str(text1_2) + '-' + str(text2_2), str(subtract(str(text1_3), str(text2_3)))) return try_to_num(text) else: return "NAN"
from PIL import Image from PIL import Image, ImageDraw, ImageFont import os import numpy as np import cv2 #os.environ["CUDA_VISIBLE_DEVICES"] = "-1" retinanet = Retinanet() with open("./train_data/test.txt", "r") as f: lines = f.readlines() import time detect_time = open("accuracy/time.txt", "w") for line in lines: if line.find("***") > 0: img_name = line.split("***")[:1][0] else: img_name = line.split(",")[:1][0] file_name = img_name.split("/")[-1].split(".")[0] print(img_name) img = Image.open(img_name) with open("accuracy/detections/" + file_name + ".txt", "w") as f: starttime = time.thread_time_ns() / 10**6 #ms r_image = retinanet.detect_image(img) endtime = time.thread_time_ns() / 10**6 # ms detect_time.write(img_name + " " + str(endtime - starttime) + "\n") print("==============") for tmp in retinanet.predict_all: f.write(tmp + "\n") detect_time.close()
dt = (time.time()) focal_plane = float(bottom + ((pulse_count) * steps)) print('Focal plane', focal_plane) etl.current(focal_plane) print('ETL', etl.current()) old_pulse_count += 1 print('Diff', (temp - dt)) temp = dt while n_planes >= pulse_count > n_planes / 2: pulse_count = int( np.array(dev.getFeedback(u3.Counter(counter=1)))) t0 = time.thread_time_ns() if pulse_count > old_pulse_count: # for initial trigger (first TTL pulse/first plane of the volume) # if pulse_count == n_planes-1: # break dt = (time.thread_time_ns()) - t0 print('time', dt) print("Done") focal_plane = float(bottom + ((counter) * steps)) print('Focal plane', focal_plane) etl.current(focal_plane) print('ETL', etl.current()) old_pulse_count += 1 counter = counter - 1 print('DONE')
def start(self): self.start_time = time.thread_time_ns()
def __init__(self): self._lastThreadTime = time.thread_time_ns() self._lastMonotonicTime = TIMER() self._portProfiling = {} self._portStack = [] self._measurements = []
threshold = 0 hourglass = layouts.HourGlass(width, height, good_count, bad_count) w = TimedWorld(hourglass.width, hourglass.height) w.birds = hourglass.birds w.targets = hourglass.targets w.p_stds = hourglass.p_stds w.v_stds = hourglass.v_stds charter = Tracer(width, 10, 100, good_count, bad_count) charter.threshold_multiplier = threshold clock = pygame.time.Clock() current_t = time.thread_time_ns() update_ts = np.zeros(iteration_count) chart_ts = np.zeros(iteration_count) ts = np.zeros((iteration_count, 6)) # Do a first step without time tracking, to initialize everything w.update(10) charter.track(w) for i in range(0, iteration_count): ts[i][:] = w.update(10) update_ts[i] = time.thread_time_ns() - current_t charter.track(w) chart_ts[i] = time.thread_time_ns() - current_t - update_ts[i] new_t = time.thread_time_ns()