def move_to_tempfile_sync( src: typing.Union[os_PathLike_str, str], *, wait_for_src: bool = False, suffix: str = "", prefix: str = "", dir: typing.Optional[typing.Union[os_PathLike_str, str]] = None ) -> pathlib.Path: """Moves file to temporary file location Similar to the :func:`tempfile.mkstemp` function, but moves an existing source file rather then creating a new one.""" for _ in range(tempfile.TMP_MAX): tmp = pathlib.Path( tempfile.mktemp(suffix=suffix, prefix=prefix, dir=dir)) last_exc: BaseException try: rename_noreplace.rename_noreplace(src, tmp) return tmp # Success except FileExistsError: pass # Target file was created in the meantime except FileNotFoundError as exc: last_exc = exc if wait_for_src: # Somebody else moved the file out already, give # them extra time to move in the replacement # # If they died, those stats will be gone however… os.sched_yield() else: raise raise last_exc from last_exc
def worker(wnum, input_queue, result_queue): lgrid = np.ctypeslib.as_array(grid_shared) ldone = np.ctypeslib.as_array(done_shared) dim_mult = (self.lut_min_max[1] - self.lut_min_max[0]) / (self.lut_dim_2d - 1) dim_base = self.lut_min_max[0] os.sched_setaffinity(0, [wnum]) while True: try: idx, value = input_queue.get(block=True) if value == 'STOP': break xx = dim_base + value[0] * dim_mult yy = dim_base + value[1] * dim_mult success = False try: fit_res = self.fit(default=[xx, yy]) if fit_res[1] == False: success = True lgrid[value[0], value[1], :] = np.array(fit_res[0]) ldone[value[0], value[1]] = True except: pass result_queue.put((idx, success)) except: pass os.sched_yield()
def worker(wnum, input_queue, result_queue): os.sched_setaffinity(0, [wnum]) # print('Start worker:', wnum) while True: # try: if True: value = input_queue.get(block=True) # print('got data', value) if value == 'STOP': # print('Stopping worker', wnum) break res = (None, ) if value[0] == 'image': timages = [value[1]] tsat_models = [value[2]] if value[2] is None: tsat_models = [] # print('Working on image', timages, tsat_models) res = processImages(proc_images=timages, proc_sat_models=tsat_models, thread_mode=True) elif value[0] == 'graph': res = processGraphs([value[1]], thread_mode=True) else: print('Failed: ', value, '\n') # print('Worker done with current task', wnum) result_queue.put(res) # except: # pass os.sched_yield()
def touch(self, timeout=None, reset=False): if reset and self.fd is not None: self.fd.close() self.fd = None if timeout is not None: if timeout <= 0: return False timeout += time.monotonic() if self.fd is None: self.fd = open(self.device, 'rb') os.set_blocking(self.fd.fileno(), False) press = xabs = yabs = None while True: # It looks like some touch drivers don't support the poll method # correctly? So first try to read a packet in non-blocking mode and # select only if nothing is waiting. while True: packet = self.fd.read(16) if packet: if len(packet) == 16: break # Partial packet, maybe the rest is in the pipe? for x in range(10): os.sched_yield() packet += self.fd.read(16-len(packet)) if len(packet) == 16: break # Ugh, ignore partial packet else: s = select.select([self.fd], [], [], max(0, timeout-time.monotonic()) if timeout else None) if not s[0]: return False # Timeout! # An event packet is 16 bytes: # struct input_event { # struct timeval time; // 8 bytes # __u16 type; # __u16 code; # __s32 value; # }; # We ignore the time type, code, value = struct.unpack("8xHHi", packet) if type == 0: # EV_SYN if press == 1: if xabs is not None and yabs is not None: # scale if enabled if self.scale_width: xabs = int(xabs * (self.scale_width/self.width)) if self.scale_height: yabs = int(yabs * (self.scale_height/self.height)) return (xabs, yabs) elif press == 0: return None press = xabs = yabs = None elif type == 1 and code == self.button: # EV_KEY and our button press = value elif type == 3 and code == 0: # EV_ABS and ABS_X xabs = value elif type == 3 and code == 1: # EV_ABS and ABS_Y yabs = value
def worker(wnum, input_queue, output_queue): os.sched_setaffinity(0, [wnum]) while True: try: idx, value = input_queue.get(block=False) if value == 'STOP': break output_queue.put((idx, functor(value))) except: pass os.sched_yield()
def test_uart_monitor(serial_simulator_type: typing.Type) -> None: """ Verify the nanaimo.ConcurrentUart class using a mock serial port. """ serial = serial_simulator_type(material.FAKE_TEST_SUCCESS) last_line = material.FAKE_TEST_SUCCESS[-1] with nanaimo.connections.uart.ConcurrentUart(serial) as monitor: while True: line = monitor.readline() if line is None: os.sched_yield() continue elif line == last_line: break
def idle(cls): """ Application idle -- Except for Idle Time handler execution Arguments: cls: The IdleTime class object Returns: None """ #traceStk = [lix.strip() for lix in traceback.format_stack()] #leoG.trace('Trace: {0}'.format(traceStk[-2])) itoLast = 0 while True: if not cls.list_active: break # pylint: disable=no-member os.sched_yield() timeCur = time.clock() idleTimeObj = cls.list_active.pop(0) #leoG.trace('Popped {0} leaving {1}'.format(id(idleTimeObj), [id(ent) for ent in cls.list_active])) if timeCur >= idleTimeObj._nexttime: nexttime = timeCur + idleTimeObj._delay idleTimeObj._nexttime = nexttime for idx, idleTimeObj2 in enumerate(cls.list_active): if nexttime < idleTimeObj2._nexttime: #leoG.trace('Insert at {0}'.format(idx)) cls.list_active.insert(idx, idleTimeObj) break else: #leoG.trace('Append') cls.list_active.append(idleTimeObj) if itoLast != idleTimeObj: itoLast = idleTimeObj #leoG.trace('Run {0} cls.list_active={1}'.format(id(idleTimeObj), [id(ent) for ent in cls.list_active])) idleTimeObj._handler(idleTimeObj) #leoG.trace('Handler return. cls.list_active={0}'.format([id(ent) for ent in cls.list_active])) else: # Nothing to run yet cls.list_active.insert(0, idleTimeObj)
def idle(cls): """ Application idle -- Except for Idle Time handler execution Arguments: cls: The IdleTime class object Returns: None """ #traceStk = [lix.strip() for lix in traceback.format_stack()] #leoG.trace('Trace: {0}'.format(traceStk[-2])) itoLast = 0 while True: if not cls.list_active: break # pylint: disable=no-member os.sched_yield() timeCur = time.process_time() idleTimeObj = cls.list_active.pop(0) #leoG.trace('Popped {0} leaving {1}'.format(id(idleTimeObj), [id(ent) for ent in cls.list_active])) if timeCur >= idleTimeObj._nexttime: nexttime = timeCur + idleTimeObj._delay idleTimeObj._nexttime = nexttime for idx, idleTimeObj2 in enumerate(cls.list_active): if nexttime < idleTimeObj2._nexttime: #leoG.trace('Insert at {0}'.format(idx)) cls.list_active.insert(idx, idleTimeObj) break else: #leoG.trace('Append') cls.list_active.append(idleTimeObj) if itoLast != idleTimeObj: itoLast = idleTimeObj #leoG.trace('Run {0} cls.list_active={1}'.format(id(idleTimeObj), [id(ent) for ent in cls.list_active])) idleTimeObj._handler(idleTimeObj) #leoG.trace('Handler return. cls.list_active={0}'.format([id(ent) for ent in cls.list_active])) else: # Nothing to run yet cls.list_active.insert(0, idleTimeObj)
def wrapper(*args, **kwargs): did_sched = False try: pri = os.sched_get_priority_max(os.SCHED_RR) sched = os.sched_param(pri) os.sched_setscheduler(0, os.SCHED_RR, sched) did_sched = True except PermissionError as e: msg = ("Looks like you haven't set scheduling capabilities " "for your python executable, so you can't run with " "high priority. To disable this message, either consider " "running `sudo setcap cap_sys_nice+ep /path/to/python3` or " "comment out the `@hi_priority` decorator above the " "rf_send function, since it's not working anyway, or just " "increase the logging threadshold higher than `info`. NB: " "`setcap` will *not* work on symlinks, you must provide " "the path to the actual python3 executable.") logger.info(e) logger.info(msg) func(*args, **kwargs) if did_sched: os.sched_yield()
def multiprocess_progress(data, functor, finished, data_size, early_clip=None): from multiprocessing import Process, current_process, Queue num_procs = os.cpu_count()-1 def worker(wnum, input_queue, output_queue): os.sched_setaffinity(0, [wnum]) while True: try: idx, value = input_queue.get(block=False) if value == 'STOP': break output_queue.put((idx, functor(value))) except: pass os.sched_yield() task_queue = Queue(2*num_procs) done_queue = Queue(2*num_procs) # Launch workers. print('Running {} workers ...'.format(num_procs)) processes = [] for i in range(num_procs): processes.append(Process(target = worker, args = (i, task_queue, done_queue), name = 'worker {}'.format(i), daemon = True)) processes[-1].start() # Push input data, and check for output data. num_sent = 0 num_done = 0 num_clipped = 0 iterator = iter(data) perc = 0 def print_progress(msg=None): msg_str = '' if msg is not None: msg_str = '['+msg+']' print('\033[2K\r{} sent, {} done, {} clipped, {} total ({} %) {}'.format(num_sent, num_done, num_clipped, data_size, perc, msg_str), end='') while num_done < data_size: print_progress('sending work') while num_sent < data_size and not task_queue.full(): nextval = next(iterator) clipped = False if early_clip is not None: clipped, clip_result = early_clip(num_sent, nextval) if clipped: finished(num_sent, clip_result) num_clipped += 1 num_done += 1 if not clipped: task_queue.put((num_sent, nextval)) num_sent += 1 os.sched_yield() while True: try: i, result = done_queue.get(block=False) finished(i, result) num_done += 1 perc = int(num_done / data_size * 100) print_progress('collecting results') except: break; time.sleep(0) print_progress() time.sleep(0) # Terminate workers. for i in range(num_procs): task_queue.put((-1, 'STOP')) for p in processes: p.join() print('\n ... done')
def print_progress(msg=None): msg_str = '' if msg is not None: msg_str = '[' + msg + ']' print('\033[2K\r{} sent, {} done, {} total ({} %) {}'.format( num_sent, num_done, num_todo, perc, msg_str), end='') while num_done < num_todo: print_progress('sending work') while num_sent < num_todo and not task_queue.full(): nextval = next(iterator) task_queue.put(nextval) num_sent += 1 os.sched_yield() while True: try: item = done_queue.get(block=False) if len(item) > 0 and (not item[0] is None): output_json['image_sets'][item[0]].append(item[1]) num_done += 1 perc = int(num_done / num_todo * 100) except: break time.sleep(0) print_progress() time.sleep(10)
def run(self) -> None: """ Implements the main loop of a forked process (spawner) :return: There is no return code since it will taken from the code returned by the spawned action and placed into the results queue. NOTE: the results codes queue will be controlled for the parent process of the spawner(s), commonly the "launcher" """ self.__spawner_label = 'PPID={},PID={},{}'.format( self.__launcher_PID, self.pid, self.name) self.__logger.info('{} started'.format(self.__spawner_label)) while True: # getting next action (task) to be spawned try: # TO BE DONE: Configuring the timeout value from XML files picked_action = self.__actions_to_be_carried_out.get(timeout=5) except queue.Empty: self.__logger.info( '{}: waiting for a new action (task)'.format( self.__spawner_label)) os.sched_yield() # relinquishing for a while # after informing it's still alive, goes for an action again continue except KeyboardInterrupt: picked_action = None self.__logger.info('KeyboardInterrupt caught by {}'.format( self.__spawner_label)) # Checking if the Launcher has put the poison pill # which is considered the "normal" ending procedure if picked_action is None: self.__logger.info( '{}: finishing, there is no more actions to be spawn'. format(self.__spawner_label)) self.__actions_to_be_carried_out.task_done() # Leaving the processing action loop break ####################### # Performing the action # NOTE: Spawner (worker) will do the needful by means of the Action class ####################### self.__logger.info('{} has picked the action {}'.format( self.__spawner_label, picked_action.get_action_xml_id())) try: # Action class (picked_action) will spawn the action by means of Popen # NOTE: Actions class will run another infinite loop to get the stdout and stderr from the action returned_code = -1 self.__stop_action_event.clear() returned_code = picked_action.spawn_the_action( stop_action_event=self.__stop_action_event) except KeyboardInterrupt: self.__logger.info('{} caught KeyboardInterrupt'.format( self.__spawner_label)) # Setting up the peremptory finishing request for the picked Action self.__stop_action_event.set() # JUST FOR TESTING ->time.sleep(1) else: self.__logger.info('{}: the <{}> action has finished'.format( self.__spawner_label, picked_action.get_action_xml_id())) # setting the action (task) as accomplished self.__actions_to_be_carried_out.task_done() # reporting the action returned code self.__returned_codes.put(returned_code)
def _relinquish(self): if self.runs_on_device: os.sched_yield() return
def __client(self): try: file_object = open(self.__mmap_path_filename, 'rb') except FileNotFoundError: self.error('{} could not be opened'.format( self.__mmap_path_filename)) return RETURN_NOT_OK try: # ACCESS_READ mmap_object = mmap.mmap(file_object.fileno(), length=0, access=mmap.ACCESS_READ, offset=0) except SyntaxError: self.error('{} could not be mmaped'.format( self.__mmap_path_filename)) return RETURN_NOT_OK # getting the MPI port name while True: self.__mpi_port_name_length = \ int.from_bytes(mmap_object[MemMap.PORT_LENGTH_LOC:MemMap.PORT_LENGTH_LENGTH], 'big') if not self.__mpi_port_name_length == 0: break os.sched_yield() self.__mpi_port_name = \ mmap_object[MemMap.PORT_NAME_LOC:MemMap.PORT_NAME_LOC + self.__mpi_port_name_length].decode() self.info('using the server port {}'.format(self.__mpi_port_name)) # Attaching to the Listening Port try: self.__mpi_info = MPI.INFO_NULL # self.__mpi_comm = self.__MPI_COMM_WORLD.Connect(self.__mpi_port_name, self.__mpi_info, self.__mpi_root) self.__mpi_comm = self.__MPI_COMM_WORLD.Connect( self.__mpi_port_name, MPI.INFO_NULL, root=0) self.info('connected to the server') except MPI.Exception as ierr: self.report_mpi_error( mpi_ierr=ierr, mpi_operation_name='self.__MPI_COMM_WORLD.Connect') return RETURN_NOT_OK self.info('sending PING value') self.__mpi_comm.send(self.__ping_value, dest=0, tag=self.__mpi_message_tag) self.info('waiting for PONG value from the server') pong_value = self.__mpi_comm.recv(source=0, tag=self.__mpi_message_tag) if pong_value == self.__pong_value: self.info( 'Gotten expected PONG value {} from SERVER'.format(pong_value)) else: self.info( 'expected PONG value from SERVER should be {}, received {}'. format(self.__pong_value, pong_value)) self.__mpi_comm.send(None, dest=0, tag=self.__mpi_message_tag) # poison pill self.__mpi_comm.Disconnect() return RETURN_NOT_OK # sending some random value self.__mpi_comm.send(time.time_ns(), dest=0, tag=self.__mpi_message_tag) self.info('Sending poison pill to the SERVER') self.__mpi_comm.send(None, dest=0, tag=self.__mpi_message_tag) self.__mpi_comm.Disconnect() return RETURN_OK
def calculateInner(self): if self.fitter_state.value != FitterState.READY.value: print('Optimizer not ready!') return False grid = np.ctypeslib.as_ctypes( np.zeros( (self.lut_dim_2d, self.lut_dim_2d, self.lut_entry_values))) grid_shared = sharedctypes.RawArray(grid._type_, grid) done = np.ctypeslib.as_ctypes( np.full((self.lut_dim_2d, self.lut_dim_2d), False, dtype=bool)) done_shared = sharedctypes.RawArray(done._type_, done) # worker process for fitting, calculates next grid point from queue until queue # delivers stop signal def worker(wnum, input_queue, result_queue): lgrid = np.ctypeslib.as_array(grid_shared) ldone = np.ctypeslib.as_array(done_shared) dim_mult = (self.lut_min_max[1] - self.lut_min_max[0]) / (self.lut_dim_2d - 1) dim_base = self.lut_min_max[0] os.sched_setaffinity(0, [wnum]) while True: try: idx, value = input_queue.get(block=True) if value == 'STOP': break xx = dim_base + value[0] * dim_mult yy = dim_base + value[1] * dim_mult success = False try: fit_res = self.fit(default=[xx, yy]) if fit_res[1] == False: success = True lgrid[value[0], value[1], :] = np.array(fit_res[0]) ldone[value[0], value[1]] = True except: pass result_queue.put((idx, success)) except: pass os.sched_yield() num_procs = self.thread_count task_queue = Queue(2 * num_procs) done_queue = Queue(2 * num_procs) # Launch workers. print('Running {} workers ...'.format(num_procs)) processes = [] for i in range(num_procs): processes.append( Process(target=worker, args=(i, task_queue, done_queue), name='worker {}'.format(i), daemon=True)) processes[-1].start() num_sent = 0 num_done = 0 num_clipped = 0 num_failed = 0 iterator = iter(self.points_to_solve) perc = 0 data_size = len(self.points_to_solve) # Push grid points to process and ceep count. When done send stop signal def print_progress(msg=None): msg_str = '' if msg is not None: msg_str = '[' + msg + ']' print( '\033[2K\r{} sent, {} done, {} clipped, {} failed, {} total ({} %) {}' .format(num_sent, num_done, num_clipped, num_failed, data_size, perc, msg_str), end='') while num_done < data_size: print_progress('sending work') while num_sent < data_size and not task_queue.full(): nextval = next(iterator) task_queue.put((num_sent, nextval)) num_sent += 1 os.sched_yield() while True: try: i, success = done_queue.get(block=False) num_done += 1 if not success: num_failed += 1 perc = int(num_done / data_size * 100) except: break time.sleep(0) print_progress() time.sleep(2) # Terminate workers. for i in range(num_procs): task_queue.put((-1, 'STOP')) for p in processes: p.join() print('\n ... done') # Copy grid data from shared memory to instance variable self.lut_data = np.ctypeslib.as_array(grid_shared) self.lut_done = np.ctypeslib.as_array(done_shared) self.fitter_state = FitterState.BASE_FIT return True