async def frame_loop(): while True: compute_time = 0 print("Frame") if len(screen_pool) > 0: t1 = time.clock_gettime_ns(time.CLOCK_REALTIME) # Get next frame and compress it buffer = BytesIO() c.run_frame() image = i.to_pil() image.save(buffer, format="JPEG") frame = buffer.getvalue() # Send frame to listeners dead_connections = [] for screen in screen_pool: try: await screen.send(frame) except: dead_connections.append(screen) # Remove dead connections for conn in dead_connections: screen_pool.remove(conn) await conn.close() # Compute duration t2 = time.clock_gettime_ns(time.CLOCK_REALTIME) compute_time = t2 - t1 # print(compute_time) if compute_time > 40000000: print("Warning, compute time is ", compute_time) # print("Time to get frame is ", t2 - t1) await asyncio.sleep(max(0, 0.03 - compute_time / 1000000000))
def slave( base_sequence , sequence_length # Length of shifts sequences , task_size # Height of the sub trees , hamming_upper_limit , correlation_upper_limit , verbose): exit_var = False while not exit_var: # Recieve task t = clock_gettime_ns(CLOCK_PROCESS_CPUTIME_ID) data = comm.recv(source=0, tag=11) if data != -1: # If it's an actual task # Compute the task seq = int_to_shift_sequence(data, sequence_length, len(base_sequence)+1, task_size) if verbose: elapsed = int((clock_gettime_ns(CLOCK_PROCESS_CPUTIME_ID) - t)/1000000) log("TASK_ASSIGNED " + str(list(seq[:len(seq)-task_size])) + " " + str(elapsed) + "ms") r = bb.py_get_list_of_good_shifts( base_sequence, hamming_upper_limit , correlation_upper_limit, seq, task_size) # Store the results store_sequences(r, rank) else: # If there's no more tasks if verbose: log("EXITED") # Exit exit_var = True
def test_oui(): import time for s in ("00-30-44", "00-40-68"): start = time.clock_gettime_ns(time.CLOCK_MONOTONIC) vendor = oui.vendor_lookup(s) end = time.clock_gettime_ns(time.CLOCK_MONOTONIC) print(vendor)
def get_combined_data(self): # Store start time (in nanoseconds) time_start = time.clock_gettime_ns(time.CLOCK_REALTIME) time_prev = time_start # FIFO overflow if self.fifo_count() >= 1024: # Reset FIFO self.bus.write_byte_data(self.MPU6050_I2C_ADDR, self.__REG_USER_CTRL, 0x44) # Measure acceleration accel_x = (self.read_raw_data(self.__REG_ACCEL_XOUT_H) / self.ACCEL_DIV) - self.x_offset accel_y = (self.read_raw_data(self.__REG_ACCEL_YOUT_H) / self.ACCEL_DIV) - self.y_offset accel_z = (self.read_raw_data(self.__REG_ACCEL_ZOUT_H) / self.ACCEL_DIV) - self.z_offset # INA260 voltage = self.ina260.voltage current = self.ina260.current # Measure time period time_delta = time.clock_gettime_ns(time.CLOCK_REALTIME) - time_prev time_prev = time.clock_gettime_ns(time.CLOCK_REALTIME) if self.verbose: print( 'addr: {} \t time: {}ms \t x: {}g \t y: {}g \t z: {}g'.format( hex(self.MPU6050_I2C_ADDR), time_delta / 1000000, accel_x, accel_y, accel_z)) return (time_delta, accel_x, accel_y, accel_z, voltage, current)
def test_time_clock_gettime_ns_realtime(): with time_machine.travel(EPOCH + 190.0): first = time.clock_gettime_ns(time.CLOCK_REALTIME) assert first == int((EPOCH + 190.0) * NANOSECONDS_PER_SECOND) second = time.clock_gettime_ns(time.CLOCK_REALTIME) assert first < second < int((EPOCH + 191.0) * NANOSECONDS_PER_SECOND) assert time.clock_gettime_ns(time.CLOCK_REALTIME) >= int( LIBRARY_EPOCH * NANOSECONDS_PER_SECOND)
def DoTest(value, time_freq, loop_freq): f = Fraction() start = time.clock_gettime_ns(time.CLOCK_PROCESS_CPUTIME_ID) for i in range(0, 100): f.set(value) finish = time.clock_gettime_ns(time.CLOCK_PROCESS_CPUTIME_ID) time_freq.increment(int((finish - start) / 10 / 100)) loop_freq.increment(Fraction.Loops())
def test_time_clock_gettime_ns_monotonic_unaffected(): start = time.clock_gettime_ns(time.CLOCK_MONOTONIC) with time_machine.travel(EPOCH + 190.0): frozen = time.clock_gettime_ns(time.CLOCK_MONOTONIC) assert isinstance(frozen, int) assert frozen > start now = time.clock_gettime_ns(time.CLOCK_MONOTONIC) assert isinstance(now, int) assert now > frozen
def test_clock_realtime_ns(self): import time if not hasattr(time, 'clock_gettime_ns'): skip("need time.clock_gettime_ns()") t1 = time.clock_gettime_ns(time.CLOCK_REALTIME) assert isinstance(t1, int) time.sleep(time.clock_getres(time.CLOCK_REALTIME)) t2 = time.clock_gettime_ns(time.CLOCK_REALTIME) assert t1 != t2 assert abs(time.clock_gettime(time.CLOCK_REALTIME) - time.clock_gettime_ns(time.CLOCK_REALTIME) * 1e-9) < 0.1
def evolve(self, loss: callable, generator: callable, data=[], validation=[], epochs=10, batch_size=100, shuffle=True, decay=1): pool = Pool(int(cpu_count() * 1.5)) batch_number = 1 if batch_size is None: batch_number = 1 batch_size = len(data[0]) else: batch_number = len(data[0]) // batch_size for epoch in range(epochs): t0 = time.clock_gettime_ns(time.CLOCK_REALTIME) print("Epoch {}/{}".format(epoch + 1, epochs)) epoch_loss = 0 if shuffle and batch_size != None: rng_state = np.random.get_state() np.random.shuffle(data[0]) np.random.set_state(rng_state) np.random.shuffle(data[1]) for batch in range(batch_number): m = "\r Evolving {}/{} (lr : {}) -- loss : ".format( batch + 1, batch_number, self.lr_qty) best_loss = self.evolve_batch( loss, generator, data[0][batch * batch_size:(batch + 1) * batch_size], data[1][batch * batch_size:(batch + 1) * batch_size], pool=pool, lr_qty=self.lr_qty) if best_loss <= 1e-13 or np.isnan(best_loss): best_loss = 1e-13 try: m += "{}, (log : {})".format(best_loss, int(np.log10(best_loss))) except ValueError as e: print(e, best_loss) print(m, end=" " * 10) epoch_loss = best_loss duration = time.clock_gettime_ns(time.CLOCK_REALTIME) - t0 print("\n\t --- {} ms/inference".format(duration / batch_number / batch_size / 1e6)) self.lr_qty = 1.0 / (decay * epoch + 1) self.history.append(epoch_loss)
def test_clock_monotonic_ns(self): import time if not (hasattr(time, 'clock_gettime_ns') and hasattr(time, 'CLOCK_MONOTONIC')): skip("need time.clock_gettime()/CLOCK_MONOTONIC") t1 = time.clock_gettime_ns(time.CLOCK_MONOTONIC) assert isinstance(t1, int) time.sleep(time.clock_getres(time.CLOCK_MONOTONIC)) t2 = time.clock_gettime_ns(time.CLOCK_MONOTONIC) assert t1 < t2 assert abs(time.clock_gettime(time.CLOCK_MONOTONIC) - time.clock_gettime_ns(time.CLOCK_MONOTONIC) * 1e-9) < 0.1
def getAccelSample(n_sample): accel_arr = np.array([]).reshape(0, 4) time_prev = time.clock_gettime_ns(time.CLOCK_REALTIME) for s in range(n_sample): accel = mpu1.get_accel_data() accel_data = np.array([ time.clock_gettime_ns(time.CLOCK_REALTIME) - time_prev, accel[0], accel[1], accel[2] ]).reshape(1, 4) accel_arr = np.append(accel_arr, accel_data, axis=0) time_prev = time.clock_gettime_ns(time.CLOCK_REALTIME) return accel_arr
def getVISample(n_sample): vi_arr = np.array([]).reshape(0, 3) time_prev = time.clock_gettime_ns(time.CLOCK_REALTIME) for s in range(n_sample): v = ina260.voltage i = ina260.current vi_data = np.array( [time.clock_gettime_ns(time.CLOCK_REALTIME) - time_prev, v, i]).reshape(1, 3) vi_arr = np.append(vi_arr, vi_data, axis=0) time_prev = time.clock_gettime_ns(time.CLOCK_REALTIME) return vi_arr
def get_server_difference(self, url): t0 = time.clock_gettime_ns(time.CLOCK_REALTIME) try: ret = requests.session().get(url).text js = json.loads(ret) t = int(js["serverTime"]) * 1000000 except Exception as e: logger.error(e) return 0 t1 = time.clock_gettime_ns(time.CLOCK_REALTIME) server_time = t + ((t1 - t0) / 2) logger.info("diff_time: %d", t1 - server_time) return t1 - server_time
def logTime(*args: Any, level: int = INFO, loggername: str = None, cumulative_key: str = "", sep=" ") -> None: start = time.clock_gettime_ns(time.CLOCK_THREAD_CPUTIME_ID) yield elapsed_time = time.clock_gettime_ns(time.CLOCK_THREAD_CPUTIME_ID) - start if cumulative_key: cumulative_time[cumulative_key] += elapsed_time extra_args = [ f"cumulative elapsed: {cumulative_time[cumulative_key] / 1e6} ms elapsed: {elapsed_time / 1e6} ms elapsed" ] else: extra_args = [f"elapsed: {elapsed_time / 1e6} ms elapsed"] logMessage(*(args + extra_args), level, loggername, sep)
def test_getitem(arg, conf): session = boto3.Session(profile_name=arg.profile) dynamo = session.client('dynamodb') for i in range(0, arg.number): try: id = random.randint(0, conf['counter']) # start to meserment get_item start = time.clock_gettime_ns(time.CLOCK_MONOTONIC) res = dynamo.get_item(TableName=arg.testdb, Key={'sequenceid': { 'N': str(id) }}) end = time.clock_gettime_ns(time.CLOCK_MONOTONIC) # end to meserment get_item delta = end - start #print(json.dumps(res, indent=2)) print('id={0}: data={1}: responce(ms)={2:6.2f}'.format( id, res['Item']['Address']['S'], delta / 1000000)) #store result to the result table. res = dynamo.put_item( TableName=arg.resultdb, Item={ 'Time': { 'S': '{0}{1}'.format( time.clock_gettime_ns(time.CLOCK_REALTIME), id) }, 'Max': { 'N': '{0:d}'.format(conf['maxnumber']) }, 'Id': { 'N': '{0:d}'.format(id) }, 'Responce_ms': { 'N': '{0:f}'.format(delta / 1000000) }, 'Date': { 'S': datetime.datetime.now().strftime('%Y/%m/%d %H:%M:%S') } }) except Exception as error: print('id={}: error:{}'.format(id, error))
def new(self) -> ID: timestamp: int with self._lock: # Using RAW to circumvent a bug in Pine64/ARM64 and the 3.x family # of Linux Kernels which allowed `CLOCK_MONOTONIC` to go backwards # (PR: #4156). # # A monotonic clock with microsecond (us), or better, precision must # not return the same value twice, looking up the time itself should # take more then 1us: # https://www.python.org/dev/peps/pep-0564/#annex-clocks-resolution-in-python new_monotonic = clock_gettime_ns(CLOCK_MONOTONIC_RAW) assert (new_monotonic > self._previous_monotonic ), "The monotonic clock must not go backwards" delta = new_monotonic - self._previous_monotonic timestamp = self._previous_timestamp + delta self._previous_monotonic = new_monotonic self._previous_timestamp = timestamp rnd = random.getrandbits(64) identifier = ULID( timestamp.to_bytes(8, "big") + rnd.to_bytes(8, "big")) return cast(ID, identifier)
def find_byte(real_sig, known_sig, sleep): from collections import defaultdict runtimes = defaultdict(int) trials = 10 for j in range(trials): for i in range(255): test_sig = known_sig + bytes([i]) + bytes( [0] * (32 - len(known_sig) - 1)) start = time.clock_gettime_ns(time.CLOCK_MONOTONIC) insecure_compare(real_sig, test_sig, sleep) stop = time.clock_gettime_ns(time.CLOCK_MONOTONIC) runtimes[i] += (stop - start) # key where runtime is max return sorted([(k, v) for (k, v) in runtimes.items()], key=lambda d: d[1], reverse=True)[0][0]
def get_accel_data(self): # FIFO overflow if self.fifo_count() >= 1024: # Reset FIFO self.bus.write_byte_data(self.MPU6050_I2C_ADDR, self.__REG_USER_CTRL, 0x44) # Measure acceleration accel_x = (self.read_raw_data(self.__REG_ACCEL_XOUT_H) / self.ACCEL_DIV) - self.x_offset accel_y = (self.read_raw_data(self.__REG_ACCEL_YOUT_H) / self.ACCEL_DIV) - self.y_offset accel_z = (self.read_raw_data(self.__REG_ACCEL_ZOUT_H) / self.ACCEL_DIV) - self.z_offset # temp = self.read_raw_data(REG_TEMP_OUT_H) / -100 # Measure time period timestamp = time.clock_gettime_ns(time.CLOCK_REALTIME) if self.verbose: print( 'addr: {} \t time: {}ms \t x: {}g \t y: {}g \t z: {}g'.format( hex(self.MPU6050_I2C_ADDR), time_delta / 1000000, accel_x, accel_y, accel_z)) return (timestamp, accel_x, accel_y, accel_z)
def __init__(self): seed = time.clock_gettime_ns(time.CLOCK_MONOTONIC_RAW) self._generator = np.random.default_rng(seed) """ Virtually private constructor. """ if RandomIntegerGenerator.__instance != None: raise Exception("This class is a singleton!") else: RandomIntegerGenerator.__instance = self
def generate_raw_anemometer_bytes(): with serial.Serial("/dev/serial0", 9600, timeout=0.5) as ser: while True: ser.write(b"?Q") ser.readall() t = (time.clock_gettime_ns(time.CLOCK_REALTIME) // 1000000 ) # Epoch in yield s, t
def get_monotonic_clock_ns(): if 'VIRT_TIME_CONF' in os.environ: raise RuntimeError("Must not be under libvirttime influence") try: return time.clock_gettime_ns(time.CLOCK_MONOTONIC) except AttributeError: return int(time.clock_gettime(time.CLOCK_MONOTONIC) * NSEC_IN_SEC)
def speed(vel): counter = 0 GPIO.output(dir_pin, False) time.sleep(steptime) dt = int(1000000000 / (vel / steplength)) t1 = time.clock_gettime_ns() while GPIO.input(safe_pin) == GPIO.HIGH: t2 = time.clock_gettime_ns() if t2 - t1 >= dt: counter = counter + 1 GPIO.output(step_pin, True) time.sleep(steptime) GPIO.output(step_pin, False) time.sleep(steptime) step_release = counter print(counter) return
def __init__(self, seed=None): if seed == None: # produce an arbitrary 5-digit seed seed = time.clock_gettime_ns(time.CLOCK_UPTIME_RAW) \ % 90000 + 10000 # print('Reproduce this player with: Random(%s)' % str(seed)) self.seed = seed random.seed(seed)
def wait(self, timeout): now = time.clock_gettime_ns(time.CLOCK_MONOTONIC) args = drm_tegra_syncpoint_wait() args.timeout_ns = now + timeout args.id = self.syncpt.id args.threshold = self.fence self.ioctl(DRM_IOCTL_TEGRA_SYNCPOINT_WAIT, args)
def init(self): #Create Beat self.beat = Beat(time=60.0/self.bpm, taps=self.beats, w1=100, w2=100, w3=100) self.bar_trig = TrigFunc(self.beat['end'], lambda : self._bar_end()) self.last_bar = time.clock_gettime_ns(time.CLOCK_BOOTTIME) #Init Data for i in range(16): data = self.runtime_data.channel_data(i) data.last_start = time.clock_gettime_ns(time.CLOCK_BOOTTIME) #Create metronome trig_env = TrigEnv(self.beat, CosTable(), dur=0.2, mul=self.beat['amp']) self.metronome = FastSine(freq=440, mul=trig_env) self.beat.store(0) print(self.beat.getPresets()) #Start beat self.beat.play() for i in [0]: threading.Thread(target=lambda ch=i : self.play_thread(ch)).start()
def speed(vel, length): counter = 0 GPIO.output(dir_pin, False) time.sleep(steptime) dt = int(1000000000 / (vel / steplength)) t1 = time.clock_gettime_ns() while state == 1 and length > counter * steplength: t2 = time.clock_gettime_ns() if t2 - t1 >= dt: GPIO.output(step_pin, True) time.sleep(steptime) GPIO.output(step_pin, False) time.sleep(steptime) counter = counter + 1 imu_data() step_release = counter print(counter) return counter * steplength
def _bar_end(self): print("Bar end") self.bar_counter += 1 self.last_bar = time.clock_gettime_ns(time.CLOCK_BOOTTIME) print(self.last_bar) for i in range(16): channel = self.runtime_data.channel_data(i) data = self.cube.reg().data(self).channel_data(i) if (self.bar_counter % data.bars) == 0: channel.last_start = self.last_bar
def headless_visit(tbb_dir): out_img = join(dirname(realpath(__file__)), "headless_screenshot.png") # start a virtual display xvfb_display = start_xvfb() with TorBrowserDriver(tbb_dir) as driver: for i in range(len(load_table)): start_time = time.clock_gettime_ns(time.CLOCK_REALTIME) driver.load_url(load_table[i][URLS]) end_time = time.clock_gettime_ns(time.CLOCK_REALTIME) driver.get_screenshot_as_file(out_img) print("Screenshot is saved as %s" % out_img) elapsed_time = (end_time - start_time) / 1000000000 print("Load time: ", str(elapsed_time) + "s") load_table[i][VANILLA] = elapsed_time col = -1 for bridge in BRIDGE_TYPE: with TorBrowserDriver(tbb_dir, default_bridge_type=bridge) as bdriver: if bridge == "obfs4": col = 2 print("obfs4..........") if bridge == "meek-azure": col = 3 print("meek-azure..........") if col == -1: break for i in range(len(load_table)): start_time = time.clock_gettime_ns(time.CLOCK_REALTIME) bdriver.load_url(load_table[i][URLS]) end_time = time.clock_gettime_ns(time.CLOCK_REALTIME) bdriver.get_screenshot_as_file(out_img) print("Screenshot is saved as %s" % out_img) elapsed_time = (end_time - start_time) / 1000000000 print("Load time: ", str(elapsed_time) + "s") load_table[i][col] = elapsed_time print("About to print..........") write_csv() stop_xvfb(xvfb_display)
def _tick(self): while (True): if self.is_paused(): time.sleep(0.001) continue current_ns = time.clock_gettime_ns(time.CLOCK_REALTIME) if current_ns - self._last_step_ns > self._step_delta_ns: self._last_step_ns = current_ns threading.Thread(target=self.next_step, daemon=True, args=()).start()
def send (self, msg: mido.Message): self.input.send(msg) channel: RuntimeChannelData = self.runtime_data.channel_data(msg.channel) data: ChannelData = self.cube.reg().data(self).channel_data(msg.channel) index = 0 curr_bar = self.bar_counter % data.bars msg.time = time.clock_gettime_ns(time.CLOCK_BOOTTIME) - channel.last_start #Find postition for i in range(len(channel.messages)): if channel.messages[len(channel.messages) - i - 1].time < msg.time: index = len(channel.messages) - i #Insert channel.messages.insert(index, msg)
def test_time_ns_type(self): def check_ns(sec, ns): self.assertIsInstance(ns, int) sec_ns = int(sec * 1e9) # tolerate a difference of 50 ms self.assertLess((sec_ns - ns), 50 ** 6, (sec, ns)) check_ns(time.time(), time.time_ns()) check_ns(time.monotonic(), time.monotonic_ns()) check_ns(time.perf_counter(), time.perf_counter_ns()) check_ns(time.process_time(), time.process_time_ns()) if hasattr(time, 'clock_gettime'): check_ns(time.clock_gettime(time.CLOCK_REALTIME), time.clock_gettime_ns(time.CLOCK_REALTIME))