class GarbageCollectorThread(Thread): """Thread in which garbage collection actually happens.""" def __init__(self, gc): super(GarbageCollectorThread, self).__init__() self.gc = gc self.daemon = True self.pid = getpid() self.ready = Event() def run(self): s = self.gc.context.socket(zmq.PULL) s.linger = 0 s.bind(self.gc.url) self.ready.set() while True: # detect fork if getpid is None or getpid() != self.pid: return msg = s.recv() if msg == b'DIE': break fmt = 'L' if len(msg) == 4 else 'Q' key = struct.unpack(fmt, msg)[0] tup = self.gc.refs.pop(key, None) if tup and tup.event: tup.event.set() del tup s.close()
class ReceiveNotification(object): def __init__(self, address, pstream): self.received = Event() self.requester = Requester(self.received, pstream, address, False, "hci1") self.connect() self.requester.write_by_handle(0x3C, str(bytearray([0xff, 0xff]))) self.requester.write_by_handle(0x3E, str(bytearray([0x64]))) data = self.requester.read_by_handle(0x3C)[0] for d in data: print(hex(ord(d)), end=' ') print("") self.requester.write_by_handle(0x3A, str(bytearray([0x1, 0x0]))) self.wait_notification() def connect(self): print("Connecting...", end=' ') sys.stdout.flush() self.requester.connect() print("OK!") def wait_notification(self): print("\nThis is a bit tricky. You need to make your device to send\n" "some notification. I'll wait...") self.received.wait()
class TestWatchMixin(object): """Testing the watch command is hard.""" def watch_loop(self): # Hooked into the loop of the ``watch`` command. # Allows stopping the thread. self.has_looped.set() time.sleep(0.01) if getattr(self, 'stopped', False): return True def start_watching(self): """Run the watch command in a thread.""" self.has_looped = Event() t = Thread(target=self.cmd_env.watch, kwargs={'loop': self.watch_loop}) t.daemon = True # In case something goes wrong with stopping, this # will allow the test process to be end nonetheless. t.start() self.t = t # Wait for first iteration, which will initialize the mtimes. Only # after this will ``watch`` be able to detect changes. self.has_looped.wait(1) def stop_watching(self): """Stop the watch command thread.""" assert self.t.isAlive() # If it has already ended, something is wrong self.stopped = True self.t.join(1) def __enter__(self): self.start_watching() def __exit__(self, exc_type, exc_val, exc_tb): self.stop_watching()
def request_token_spotty(spotty, use_creds=True): '''request token by using the spotty binary''' token_info = None if spotty.playback_supported: try: args = ["-t", "--client-id", CLIENTID, "--scope", ",".join(SCOPE), "-n", "temp-spotty"] done = Event() spotty = spotty.run_spotty(arguments=args, use_creds=use_creds) watcher = Thread(target=kill_on_timeout, args=(done, 5, spotty)) watcher.daemon = True watcher.start() stdout, stderr = spotty.communicate() done.set() result = None log_msg("request_token_spotty stdout: %s" % stdout) for line in stdout.split(): line = line.strip() if line.startswith("{\"accessToken\""): result = json.loads(line) # transform token info to spotipy compatible format if result: token_info = {} token_info["access_token"] = result["accessToken"] token_info["expires_in"] = result["expiresIn"] token_info["token_type"] = result["tokenType"] token_info["scope"] = ' '.join(result["scope"]) token_info['expires_at'] = int(time.time()) + token_info['expires_in'] token_info['refresh_token'] = result["accessToken"] except Exception as exc: log_exception(__name__, exc) return token_info
class Actor: def __init__(self): self._mailbox = Queue() def send(self, msg): self._mailbox.put(msg) def recv(self): msg = self._mailbox.get() if msg is ActorExit: raise ActorExit() return msg def start(self): self._terminated = Event() t = Thread(target=self._bootstrap) t.daemon = True t.start() def _bootstrap(self): try: self.run() except ActorExit: pass finally: self._terminated.set() def join(self): self._terminated.wait() def run(self): while True: msg = self.recv()
def test_ask_shutdown(self): q = queue.Queue() done = Event() done.set() channel = controller.Channel(q, done) with tutils.raises(Kill): channel.ask("test", Mock(name="test_ask_shutdown"))
class Dotter(object): def __init__(self,delay=100,symbol='.'): self.event=Event() self.delay=delay self.symbol=symbol self.status=False def __loop(self): while not self.event.is_set(): stdout.write(self.symbol) stdout.flush() sleep(self.delay/1000) def start(self): if not self.status: self.event.clear() Thread(target=self.__loop).start() self.status=True def stop(self,newLine=True): if self.status: self.event.set() if newLine: stdout.write('\n') self.status=False def set(self,delay=None,symbol=None): if delay!=None: self.delay=delay if symbol!=None: self.symbol=symbol if self.status: self.stop(False) self.start()
def _callback(self, request): """ This method is called by the ROS framework when a Service request has arrived. Each call runs in a separate thread and has to block until a response is present, because the return value of this method is used as response to the request. """ msgID = uuid4().hex event = Event() with self._pendingLock: self._pending[msgID] = event self._reactor.callFromThread(self.received, request._buff, msgID) # Block execution here until the event is set, i.e. a response has # arrived event.wait() with self._pendingLock: response = self._pending.pop(msgID, None) if not isinstance(response, Message): # TODO: Change exception? raise rospy.ROSInterruptException('Interrupted.') return response
class DeferredResponse( object ): """ A deferred that resolves to a response from TSServer. """ def __init__( self, timeout = RESPONSE_TIMEOUT_SECONDS ): self._event = Event() self._message = None self._timeout = timeout def resolve( self, message ): self._message = message self._event.set() def result( self ): self._event.wait( timeout = self._timeout ) if not self._event.isSet(): raise RuntimeError( 'Response Timeout' ) message = self._message if not message[ 'success' ]: raise RuntimeError( message[ 'message' ] ) if 'body' in message: return self._message[ 'body' ]
def __start__(self): self.UDPSock = socket(AF_INET,SOCK_DGRAM) # Initialise the socket self.port = 50000 bound = 0 while bound == 0 : bound = 1 try : addr = ('0.0.0.0',self.port) self.UDPSock.bind(addr) except : bound = 0 self.port += 1 print "xPL plugin, bound to port " + str(self.port) print "xPL is started" # start the heartbeat thread self.hbThreadEvent = Event() hbThread = Thread(target=self.SendHeartbeat, args=(self.hbThreadEvent,)) hbThread.start() # start the main thread that scans for incoming xPL msgs self.mainThreadEvent = Event() mainThread = Thread(target=self.main, args=(self.mainThreadEvent,)) mainThread.start()
class ResponseEvent: """Event which is fired when the response is returned for a request. For each request sent this event is created. An application can wait for the event to create a blocking request. """ def __init__(self): self.__evt = Event() def waiting(self): return not self.__evt.isSet() def waitForResponse(self, timeOut=None): """blocks until the response arrived or timeout is reached.""" self.__evt.wait(timeOut) if self.waiting(): raise Timeout() else: if self.response["error"]: raise Exception(self.response["error"]) else: return self.response["result"] def handleResponse(self, resp): self.response = resp self.__evt.set()
class TestInterruptibleDecorator(TestCase): def setUp(self): self.quit_condition = False self.thread_started = Event() @interruptible def never_ending(self, cancellation_point): self.thread_started.set() while True: time.sleep(0.01) cancellation_point() def test_interruptible_decorator(self): """ Tests for the @interruptible decorator. """ thread = Thread(target=self.never_ending, args=( lambda: _cancellation_point(lambda: self.quit_condition),)) thread.start() # Wait until thread comes to live self.thread_started.wait() # Ask to it to quit within 20ms self.quit_condition = True time.sleep(0.02) # Thread is finished self.assertFalse(thread.is_alive())
def sync(loop, func, *args, **kwargs): """ Run coroutine in loop running in separate thread """ if not loop._running: try: return loop.run_sync(lambda: func(*args, **kwargs)) except RuntimeError: # loop already running pass from threading import Event e = Event() result = [None] error = [False] traceback = [False] @gen.coroutine def f(): try: result[0] = yield gen.maybe_future(func(*args, **kwargs)) except Exception as exc: logger.exception(exc) result[0] = exc error[0] = exc exc_type, exc_value, exc_traceback = sys.exc_info() traceback[0] = exc_traceback finally: e.set() a = loop.add_callback(f) while not e.is_set(): e.wait(1000000) if error[0]: six.reraise(type(error[0]), error[0], traceback[0]) else: return result[0]
def test_delayed_body_read_timeout(self): timed_out = Event() def socket_handler(listener): sock = listener.accept()[0] buf = b'' body = 'Hi' while not buf.endswith(b'\r\n\r\n'): buf = sock.recv(65536) sock.send(('HTTP/1.1 200 OK\r\n' 'Content-Type: text/plain\r\n' 'Content-Length: %d\r\n' '\r\n' % len(body)).encode('utf-8')) timed_out.wait() sock.send(body.encode('utf-8')) sock.close() self._start_server(socket_handler) pool = HTTPConnectionPool(self.host, self.port) response = yield From(pool.urlopen('GET', '/', retries=0, preload_content=False, timeout=Timeout(connect=1, read=0.1))) try: self.aioAssertRaises(ReadTimeoutError, response.read) finally: timed_out.set()
def __init__(self, pvName, mnemonic): """ **Constructor** See :class:`py4syn.epics.StandardDevice` Parameters ---------- pvName : `string` Power supply base naming of the PV (Process Variable) mnemonic : `string` Temperature controller mnemonic """ super().__init__(mnemonic) self.device = Device(pvName + ':', ['termopar', 'target', 'status', 'stepNum', 'programTable', 'programming', 'run', 'stop', 'advance', 'setPatternCount', 'timeScale', 'level1', 'reset', 'pause', 'sendCommand']) self.programmingDone = Event() self.newTemperature = Event() self.newStep = Event() self.device.add_callback('programming', self.onProgrammingChange) self.device.add_callback('termopar', self.onTemperatureChange) self.device.add_callback('stepNum', self.onStepChange) self.timeScaleCache = self.device.get('timeScale') self.pvName = pvName self.rate = 5 self.presetDone = False
def test_poll(self, mock_prep, mock_proc, mock_get): e = Event() blk = RESTBlock(e) mock_get.return_value = Response() mock_get.return_value.status_code = 200 mock_proc.return_value = [None, None] self.configure_block(blk, { "polling_interval": { "seconds": 1 }, "retry_interval": { "seconds": 1 }, "queries": [ "foobar" ] }) blk.start() e.wait(2) mock_prep.assert_called_once_with(False) self.assertEqual(mock_get.call_count, 1) self.assertEqual(mock_proc.call_count, 1) blk.stop()
def TriggerEventWait( self, suffix, payload=None, prefix="Main", source=eg ): event = EventGhostEvent(suffix, payload, prefix, source) if event.source in self.filters: for filterFunc in self.filters[event.source]: if filterFunc(event) is True: return event executed = Event() def Execute(): try: event.Execute() finally: executed.set() def Transfer(): ActionThreadCall(Execute) event.SetShouldEnd() self.AppendAction(Transfer) executed.wait(5.0) if not executed.isSet(): eg.PrintWarningNotice( "timeout TriggerEventWait\n", traceback.format_stack() ) return event
def test_paging(self, mock_prep, mock_proc, mock_get): e = Event() blk = RESTBlock(e) mock_get.return_value = Response() mock_get.return_value.status_code = 200 mock_proc.side_effect = [([None, None], True), ([None, None], True), ([None, None], False)] self.configure_block(blk, { "polling_interval": { "seconds": 1 }, "retry_interval": { "seconds": 1 }, "queries": [ "foobar" ] }) blk.start() e.wait(2) self.assertEqual(blk.page_num, 3) blk.stop()
def test_no_dupes(self, mock_prep, mock_proc, mock_get): e = Event() blk = MultiQueryREST(e) mock_get.return_value = Response() mock_get.return_value.status_code = 200 mock_proc.return_value = [ Signal({'_id': 1}), Signal({'_id': 2}) ], False self.configure_block(blk, { "polling_interval": { "seconds": 0.5 }, "retry_interval": { "seconds": 1 }, "queries": [ "foobar", "bazqux" ] }) blk.start() e.wait(2) self.assert_num_signals_notified(2, blk) blk.stop()
def _score_scenes(self, mov, scene_list, score_obj, analysis_budget): '''Get all the scenes scored. Results stored in score_obj ''' frame_q = Queue.Queue(maxsize=100) halter = Event() threads = [ Thread(target=self._frame_extractor, args=(mov, scene_list, frame_q, halter))] for i in range(options.workers): threads.append(Thread(target=self._worker, args=(score_obj, frame_q, halter))) for t in threads: t.daemon = True t.start() # Run the frame scoring until we are out of time if not halter.wait(analysis_budget): _log.info('Out of time sampling frames') halter.set() for t in threads: t.join() _log.info('Finished scoring scenes')
class ThreadedRunner(Runnable): def __init__(self, runnable): self._runnable = runnable self._notifier = Event() self._result = None self._error = None self._traceback = None self._thread = None def run(self): try: self._result = self._runnable() except: self._error, self._traceback = sys.exc_info()[1:] self._notifier.set() __call__ = run def run_in_thread(self, timeout): self._thread = Thread(self, name=TIMEOUT_THREAD_NAME) self._thread.setDaemon(True) self._thread.start() self._notifier.wait(timeout) return self._notifier.isSet() def get_result(self): if self._error: raise self._error, None, self._traceback return self._result def stop_thread(self): self._thread.stop()
class Task(object): def __init__(self, name, start_time, calc_next_time, func): """ Initialize a Task. Arguments: name - Name of task. start_time - First time for task to run calc_next_time - Function to calculate the time of next run, gets one argument, the last run time as a datetime. Returns None when task should no longer be run func - A function to run """ self.name = name self.start_time = start_time self.scheduled_time = start_time self.calc_next_time = calc_next_time self.func = func self.halt_flag = Event() def run(self): logging.debug("Running %s task, scheduled at: %s" % (self.name, self.scheduled_time,)) if not self.halt_flag.isSet(): try: try: self.func() except: raise finally: self.scheduled_time = self.calc_next_time(self.scheduled_time) logging.debug("Scheduled next run of %s for: %s" % (self.name, self.scheduled_time,)) def halt(self): self.halt_flag.set()
def FromWatchdogWithSubservers_test( self ): all_servers_are_running = Event() def KeepServerAliveInAnotherThread(): while not all_servers_are_running.is_set(): try: self.GetRequest( 'ready' ) except requests.exceptions.ConnectionError: pass finally: time.sleep( 0.1 ) self.Start( idle_suicide_seconds = 2, check_interval_seconds = 1 ) StartThread( KeepServerAliveInAnotherThread ) try: filetypes = [ 'cs', 'go', 'java', 'javascript', 'typescript', 'rust' ] for filetype in filetypes: self.StartSubserverForFiletype( filetype ) self.AssertServersAreRunning() finally: all_servers_are_running.set() self.AssertServersShutDown( timeout = SUBSERVER_SHUTDOWN_TIMEOUT + 10 ) self.AssertLogfilesAreRemoved()
def join(self,server,channel,nick=None,port=6667): channel = channel.lower() self.connect(server,port,nick) status = self.get_status(server) connected_servers = set(status['servers']) if not server in connected_servers: print("Not connected...") if channel in set(status['servers'][server]['channels']): print("Joined already.") return else: print("Joining",channel) e = Event() def channel_joined(addr,event): if event['kind']=='irc': if event['command']=='JOIN': if event['trailing'].lower()==channel: status['servers'][server]['channels'].append(channel) self.events.unlisten(server,channel_joined) e.set() self.events.listen(server,channel_joined) self.events.broadcast({ 'event':'irc.command:'+server, 'command':"JOIN", "arguments":[channel] }) e.wait()
class PeriodicTimer(object): def __init__(self, frequency=60, *args, **kwargs): self.is_stopped = Event() self.is_stopped.clear() self.interval = frequency self._timer = Timer(self.frequency, self._check_for_event, ()) self._timer.daemon = True @property def interval(self): return self.frequency @interval.setter def interval(self, frequency): self.frequency = frequency self.stop() try: if self._timer: self._timer.cancel() del(self._timer) except AttributeError, ex: pass self._timer = Timer(self.frequency, self._check_for_event, ()) return self.frequency
class TestInterruptible(unittest.TestCase): """ Tests for interrupting cooperative threads """ def test_interruptible_decorator(self): """ Tests for the @interruptible decorator. """ self.quit_condition = False cancellation_point = lambda: _cancellation_point( lambda: self.quit_condition) self.thread_started = Event() @interruptible def never_ending(cancellation_point): self.thread_started.set() while True: time.sleep(0.1) cancellation_point() thread = Thread(target=never_ending, args=(cancellation_point, )) thread.start() self.thread_started.wait() self.quit_condition = True countdown = 10 while thread.is_alive() and countdown > 0: time.sleep(0.1) countdown -= 1 self.assertFalse(thread.is_alive())
class CheckForUpdates(Thread): INTERVAL = 24*60*60 # seconds daemon = True def __init__(self, parent): Thread.__init__(self) self.shutdown_event = Event() self.signal = Signal(parent) def run(self): while not self.shutdown_event.is_set(): calibre_update_version = NO_CALIBRE_UPDATE plugins_update_found = 0 try: version = get_newest_version() if version[:2] > numeric_version[:2]: calibre_update_version = version except Exception as e: prints('Failed to check for calibre update:', as_unicode(e)) try: update_plugins = get_plugin_updates_available(raise_error=True) if update_plugins is not None: plugins_update_found = len(update_plugins) except Exception as e: prints('Failed to check for plugin update:', as_unicode(e)) if calibre_update_version != NO_CALIBRE_UPDATE or plugins_update_found > 0: self.signal.update_found.emit(calibre_update_version, plugins_update_found) self.shutdown_event.wait(self.INTERVAL) def shutdown(self): self.shutdown_event.set()
def testMultipleLoad(self): """ In DropBox, the loading of multiple CLIs seems to lead to the wrong context being assigned to some controls. See #4749 """ import random from threading import Thread, Event event = Event() class T(Thread): def run(self, *args): pause = random.random() event.wait(pause) self.cli = CLI() self.cli.loadplugins() self.con = self.cli.controls["admin"] self.cmp = self.con.ctx threads = [T() for x in range(20)] for t in threads: t.start() event.set() for t in threads: t.join() assert len(threads) == len(set([t.cli for t in threads])) assert len(threads) == len(set([t.con for t in threads])) assert len(threads) == len(set([t.cmp for t in threads]))
def test_concurrent_rendering(): '''Best-effort testing that concurrent multi-threaded rendering works. The test has no guarantees around being deterministic, but if it fails you know something is wrong with concurrent rendering. If it passes, things are probably working.''' err = None def func(sim, event): event.wait() sim.data.qpos[:] = 0.0 sim.forward() img1 = sim.render(width=40, height=40, camera_name="camera1") img2 = sim.render(width=40, height=40, camera_name="camera2") try: assert np.sum(img1[:]) == 23255 assert np.sum(img2[:]) == 12007 except Exception as e: nonlocal err err = e model = load_model_from_xml(BASIC_MODEL_XML) sim = MjSim(model) sim.render(100, 100) event = Event() threads = [] for _ in range(100): thread = Thread(target=func, args=(sim, event)) threads.append(thread) thread.start() event.set() for thread in threads: thread.join() assert err is None, "Exception: %s" % (str(err))
class WaitApp (object): def __init__ (self): # self.event = event self.ev = Event( ) self.loop = GLib.MainLoop( ) self.expired = False def handle_emitted (self, status): print "emitted", status, self def handle_event (self): print "event", self.loop self.loop.quit( ) def until (self, event, timeout=None): self.event = event self.event.Do.connect(self.handle_event) self.event.Emit.connect(self.handle_emitted) self.background = Thread(target=self.pending, args=(timeout, self.loop.quit)) self.background.daemon = True self.background.start( ) self.loop.run( ) def pending (self, timeout, quit): print "starting background, waiting for ", timeout self.ev.wait(timeout) quit( ) self.expired = True print "Failed to find event within", timeout
def __init__(self, group=None, target=None, name=None, args=(), kwargs=None, verbose=None): super(StoppableThreadWithResult, self).__init__( group=group, target=target, name=name, args=args, kwargs=kwargs) self._stop = Event()
def getSocket(ip = ''): def trySocketConnect(queue_ip, exitEvent): while True: if exitEvent.is_set(): break ip_current = queue_ip.get() sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) try: if sock.connect_ex((ip_current, 8333)) == 0: if exitEvent.is_set(): sock.close() break exitEvent.set() queue_socket.put(sock) queue_ip.task_done() print('Used IP for peer connection: %s' % ip_current) break except: pass def fillAdress(queue_addr, queue_ip, exitEvent): list_used = [] while True: address = queue_addr.get() if exitEvent.is_set(): break try: info = socket.getaddrinfo(address, 80) for item in info: if exitEvent.is_set(): break if socket.AF_INET == item[0]: ip = item[4][0] if ip not in list_used: list_used.append(ip) queue_ip.put(ip) except: pass if ip != '': # ip is defined, so connecting to it try: sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock.connect((ip, 8333)) return sock except: print('Connection error to host %ip' % ip) return else: num_threads = 4 queue_ip = Queue() queue_socket = Queue() queue_addrress = Queue() exit_event = Event() for addr in addressToFindPeers: th_addr = Thread(target=fillAdress, args=(queue_addrress, queue_ip, exit_event,)) th_addr.daemon = True th_addr.start() for i in range(num_threads): th_sock = Thread(target=trySocketConnect, args=(queue_ip, exit_event,)) th_sock.daemon = True th_sock.start() for addr in addressToFindPeers: queue_addrress.put(addr) queue_ip.join() return queue_socket.get()
def _run_task( test_template: Callable, tasks_queue: Queue, events_queue: Queue, generator_done: threading.Event, checks: Iterable[CheckFunction], targets: Iterable[Target], settings: hypothesis.settings, seed: Optional[int], results: TestResultSet, stateful: Optional[Stateful], stateful_recursion_limit: int, **kwargs: Any, ) -> None: def _run_tests(maker: Callable, recursion_level: int = 0) -> None: if recursion_level > stateful_recursion_limit: return for _result, _data_generation_method in maker(test_template, settings, seed): # `result` is always `Ok` here _operation, test = _result.ok() feedback = Feedback(stateful, _operation) for _event in run_test( _operation, test, checks, data_generation_method, targets, results, recursion_level=recursion_level, feedback=feedback, **kwargs, ): events_queue.put(_event) _run_tests(feedback.get_stateful_tests, recursion_level + 1) with capture_hypothesis_output(): while True: try: result, data_generation_method = tasks_queue.get(timeout=0.001) except queue.Empty: # The queue is empty & there will be no more tasks if generator_done.is_set(): break # If there is a possibility for new tasks - try again continue if isinstance(result, Ok): operation = result.ok() test_function = create_test( operation=operation, test=test_template, settings=settings, seed=seed, data_generation_method=data_generation_method, ) items = ( Ok((operation, test_function)), data_generation_method, ) # This lambda ignores the input arguments to support the same interface for # `feedback.get_stateful_tests` _run_tests(lambda *_: (items,)) else: for event in handle_schema_error(result.err(), results, data_generation_method, 0): events_queue.put(event)
def _execute( self, results: TestResultSet, stop_event: threading.Event ) -> Generator[events.ExecutionEvent, None, None]: """All events come from a queue where different workers push their events.""" # Instead of generating all tests at once, we do it when there is a free worker to pick it up # This is extremely important for memory consumption when testing large schemas # IMPLEMENTATION NOTE: # It would be better to have a separate producer thread and communicate via threading events. # Though it is a bit more complex, so the current solution is suboptimal in terms of resources utilization, # but good enough and easy enough to implement. tasks_generator = ( (operation, data_generation_method) for operation in self.schema.get_all_operations() for data_generation_method in self.schema.data_generation_methods ) generator_done = threading.Event() tasks_queue: Queue = Queue() # Add at least `workers_num` tasks first, so all workers are busy for _ in range(self.workers_num): try: # SAFETY: Workers didn't start yet, direct modification is OK tasks_queue.queue.append(next(tasks_generator)) except StopIteration: generator_done.set() break # Events are pushed by workers via a separate queue events_queue: Queue = Queue() workers = self._init_workers(tasks_queue, events_queue, results, generator_done) def stop_workers() -> None: for worker in workers: # workers are initialized at this point and `worker.ident` is set with an integer value ident = cast(int, worker.ident) stop_worker(ident) worker.join() is_finished = False try: while not is_finished: # Sleep is needed for performance reasons # each call to `is_alive` of an alive worker waits for a lock # iterations without waiting are too frequent, and a lot of time will be spent on waiting for this locks time.sleep(0.001) is_finished = all(not worker.is_alive() for worker in workers) while not events_queue.empty(): event = events_queue.get() if stop_event.is_set() or isinstance(event, events.Interrupted) or self._should_stop(event): # We could still have events in the queue, but ignore them to keep the logic simple # for now, could be improved in the future to show more info in such corner cases stop_workers() is_finished = True if stop_event.is_set(): # Discard the event. The invariant is: the next event after `stream.stop()` is `Finished` break yield event # When we know that there are more tasks, put another task to the queue. # The worker might not actually finish the current one yet, but we put the new one now, so # the worker can immediately pick it up when the current one is done if isinstance(event, events.BeforeExecution) and not generator_done.is_set(): try: tasks_queue.put(next(tasks_generator)) except StopIteration: generator_done.set() except KeyboardInterrupt: stop_workers() yield events.Interrupted()
class SSHReaderThread(Thread): # pylint: disable=too-many-instance-attributes """ Thread that reads data from ssh session socket and forwards it to Queue. It is needed because socket buffer gets overflowed if data is sent faster than watchers can process it, so we have to have Queue as a buffer with 'endless' memory, and fast reader that reads data from the socket and forward it to the Queue. As part of this process it splits data into lines, because watchers expect it is organized in this way. """ def __init__(self, session: Session, channel: Channel, timeout: NullableTiming, timeout_read_data: NullableTiming): self.stdout = Queue() self.stderr = Queue() self.timeout_reached = False self._session = session self._channel = channel self._timeout = timeout self._timeout_read_data = timeout_read_data self.raised = None self._can_run = Event() self._can_run.set() super().__init__(daemon=True) def run(self): try: self._read_output(self._session, self._channel, self._timeout, self._timeout_read_data, self.stdout, self.stderr) except Exception as exc: # pylint: disable=broad-except self.raised = exc def _read_output( # pylint: disable=too-many-arguments,too-many-branches self, session: Session, channel: Channel, timeout: NullableTiming, timeout_read_data: NullableTiming, stdout_stream: Queue, stderr_stream: Queue): """Reads data from ssh session, split it into lines and forward lines into stderr ad stdout pipes It is required for it to be fast, that is why there is code duplications and non-pythonic code """ # pylint: disable=too-many-locals stdout_remainder = stderr_remainder = b'' if timeout is None: end_time = float_info.max else: end_time = perf_counter() + timeout eof_result = stdout_size = stderr_size = 1 while eof_result == LIBSSH2_ERROR_EAGAIN or stdout_size == LIBSSH2_ERROR_EAGAIN or \ stdout_size > 0 or stderr_size == LIBSSH2_ERROR_EAGAIN or stderr_size > 0: # pylint: disable=consider-using-in if not self._can_run.is_set(): break if perf_counter() > end_time: self.timeout_reached = True break with session.lock: if stdout_size == LIBSSH2_ERROR_EAGAIN and stderr_size == LIBSSH2_ERROR_EAGAIN: # pylint: disable=consider-using-in session.simple_select(timeout=timeout_read_data) stdout_size, stdout_chunk = channel.read() stderr_size, stderr_chunk = channel.read_stderr() eof_result = channel.eof() if stdout_chunk and stdout_stream is not None: data_splitted = stdout_chunk.split(LINESEP) if len(data_splitted) == 1: stdout_remainder = stdout_remainder + data_splitted.pop() else: if stdout_remainder: stdout_stream.put(stdout_remainder + data_splitted.pop(0)) stdout_remainder = data_splitted.pop() for chunk in data_splitted: stdout_stream.put(chunk) if stderr_chunk and stderr_stream is not None: data_splitted = stderr_chunk.split(LINESEP) if len(data_splitted) == 1: stderr_remainder = stderr_remainder + data_splitted.pop() else: if stderr_remainder: stderr_stream.put(stderr_remainder + data_splitted.pop(0)) stderr_remainder = data_splitted.pop() for chunk in data_splitted: stderr_stream.put(chunk) if stdout_remainder: stdout_stream.put(stdout_remainder) if stderr_remainder: stderr_stream.put(stderr_remainder) def stop(self, timeout: float = None): self._can_run.clear() self.join(timeout)
def __init__(self, session: Session, keepalive_timeout: NullableTiming): self._keep_running = Event() self._keep_running.set() self._session = session self._keepalive_timeout = keepalive_timeout super().__init__(daemon=True)
# app = Flask(__name__) # app = Flask(__name__, static_folder='C:/Users/lnkngoc/Desktop/FACE/display/') app = Flask( __name__, static_folder='C:\\Users\\Administrator\\Documents\\FACE\\web_face_image\\' ) app.config['SECRET_KEY'] = 'secret!' app.config['DEBUG'] = True #turn the flask app into a socketio app socketio = SocketIO(app, async_mode=None, logger=True, engineio_logger=True) #random number Generator Thread thread = Thread() thread_stop_event = Event() # app = Flask(__name__, static_folder='C:\\Users\\Administrator\\Documents\\FACE\\web_face\\') path_check = "./templates/display_image/img/images/" # path_check = "C:\\Users\\Administrator\\Documents\\FACE\\web_face\\templates\\display_image\\img\\images" count = 0 socketio = SocketIO(app) # ============= Face =============== parser = argparse.ArgumentParser() parser.add_argument("--mode", type=str, help="Run camera recognition", default="camera") args = parser.parse_args(sys.argv[1:]) FRGraph = FaceRecGraph()
class IOProcess(object): _DEBUG_VALGRIND = False _TRACE_DEBUGGING = False _log = logging.getLogger("IOProcessClient") _sublog = logging.getLogger("IOProcess") _counter = itertools.count() def __init__(self, max_threads=0, timeout=60, max_queued_requests=-1, name=None, wait_until_ready=2): self.timeout = timeout self._max_threads = max_threads self._max_queued_requests = max_queued_requests self._name = name or "ioprocess-%d" % next(self._counter) self._wait_until_ready = wait_until_ready self._commandQueue = Queue() self._eventFdReciever, self._eventFdSender = os.pipe() self._reqId = 0 self._isRunning = True self._started = Event() self._lock = Lock() self._partialLogs = "" self._pid = None self._log.info("(%s) Starting client", self.name) self._run() @property def name(self): return self._name @property def pid(self): return self._pid def _run(self): self._log.debug("(%s) Starting ioprocess", self.name) myRead, hisWrite = os.pipe() hisRead, myWrite = os.pipe() for fd in (hisRead, hisWrite): # Python 3 creates fds with the close-on-exec flag set. clear_cloexec(fd) self._partialLogs = "" cmd = [ config.TASKSET_PATH, '--cpu-list', _ANY_CPU, config.IOPROCESS_PATH, "--read-pipe-fd", str(hisRead), "--write-pipe-fd", str(hisWrite), "--max-threads", str(self._max_threads), "--max-queued-requests", str(self._max_queued_requests), ] if self._TRACE_DEBUGGING: cmd.append("--trace-enabled") if self._DEBUG_VALGRIND: cmd = ["valgrind", "--log-file=ioprocess.valgrind.log", "--leak-check=full", "--tool=memcheck"] + cmd + \ ["--keep-fds"] p = subprocess.Popen(cmd, pass_fds=(hisRead, hisWrite), stderr=subprocess.PIPE) self._pid = p.pid os.close(hisRead) os.close(hisWrite) setNonBlocking(myRead) setNonBlocking(myWrite) self._startCommunication(p, myRead, myWrite) def _pingPoller(self): try: os.write(self._eventFdSender, b'0') except OSError as e: if e.errno == errno.EAGAIN: return if not self._isRunning: raise Closed("Client %s was closed" % self.name) raise def _startCommunication(self, proc, readPipe, writePipe): self._log.debug("(%s) Starting communication thread", self.name) self._started.clear() args = (ref(self), proc, readPipe, writePipe) self._commthread = start_thread( _communicate, args, name="ioprocess/%d" % (proc.pid, ), ) if self._started.wait(self._wait_until_ready): self._log.debug("(%s) Communication thread started", self.name) else: self._log.warning("(%s) Timeout waiting for communication thread", self.name) def _getRequestId(self): self._reqId += 1 return self._reqId def _requestToBytes(self, cmd, reqId): methodName, args = cmd reqDict = {'id': reqId, 'methodName': methodName, 'args': args} reqStr = json.dumps(reqDict) res = Size.pack(len(reqStr)) res += reqStr.encode('utf8') return res def _processLogs(self, data): if self._partialLogs: data = self._partialLogs + data self._partialLogs = b'' lines = data.splitlines(True) for line in lines: if not line.endswith(b"\n"): self._partialLogs = line return # We must decode the line becuase python3 does not log bytes # properly (e.g. you get "b'text'" intead of "text"). line = line.decode('utf8', 'replace') try: level, logDomain, message = line.strip().split("|", 2) except: self._log.warning("(%s) Invalid log message %r", self.name, line) continue if level == "ERROR": self._sublog.error("(%s) %s", self.name, message) elif level == "WARNING": self._sublog.warning("(%s) %s", self.name, message) elif level == "DEBUG": self._sublog.debug("(%s) %s", self.name, message) elif level == "INFO": self._sublog.info("(%s) %s", self.name, message) def _sendCommand(self, cmdName, args, timeout=None): res = CmdResult() self._commandQueue.put(((cmdName, args), res)) self._pingPoller() res.event.wait(timeout) if not res.event.isSet(): raise Timeout(os.strerror(errno.ETIMEDOUT)) if res.result.get('errcode', 0) != 0: errcode = res.result['errcode'] errstr = res.result.get('errstr', os.strerror(errcode)) raise OSError(errcode, errstr) return res.result.get('result', None) def ping(self): return self._sendCommand("ping", {}, self.timeout) def echo(self, text, sleep=0): return self._sendCommand("echo", { 'text': text, "sleep": sleep }, self.timeout) def crash(self): try: self._sendCommand("crash", {}, self.timeout) return False except OSError as e: if e.errno == ERR_IOPROCESS_CRASH: return True return False def stat(self, path): resdict = self._sendCommand("stat", {"path": path}, self.timeout) return dict2namedtuple(resdict, StatResult) def lstat(self, path): resdict = self._sendCommand("lstat", {"path": path}, self.timeout) return dict2namedtuple(resdict, StatResult) def statvfs(self, path): resdict = self._sendCommand("statvfs", {"path": path}, self.timeout) return dict2namedtuple(resdict, StatvfsResult) def pathExists(self, filename, writable=False): check = os.R_OK if writable: check |= os.W_OK if self.access(filename, check): return True return self.access(filename, check) def lexists(self, path): return self._sendCommand("lexists", {"path": path}, self.timeout) def fsyncPath(self, path): self._sendCommand("fsyncPath", {"path": path}, self.timeout) def access(self, path, mode): try: return self._sendCommand("access", { "path": path, "mode": mode }, self.timeout) except OSError: # This is how python implements access return False def mkdir(self, path, mode=DEFAULT_MKDIR_MODE): return self._sendCommand("mkdir", { "path": path, "mode": mode }, self.timeout) def listdir(self, path): return self._sendCommand("listdir", {"path": path}, self.timeout) def unlink(self, path): return self._sendCommand("unlink", {"path": path}, self.timeout) def rmdir(self, path): return self._sendCommand("rmdir", {"path": path}, self.timeout) def rename(self, oldpath, newpath): return self._sendCommand("rename", { "oldpath": oldpath, "newpath": newpath }, self.timeout) def link(self, oldpath, newpath): return self._sendCommand("link", { "oldpath": oldpath, "newpath": newpath }, self.timeout) def symlink(self, oldpath, newpath): return self._sendCommand("symlink", { "oldpath": oldpath, "newpath": newpath }, self.timeout) def chmod(self, path, mode): return self._sendCommand("chmod", { "path": path, "mode": mode }, self.timeout) def readfile(self, path, direct=False): b64result = self._sendCommand("readfile", { "path": path, "direct": direct }, self.timeout) return b64decode(b64result) def writefile(self, path, data, direct=False): self._sendCommand("writefile", { "path": path, "data": b64encode(data).decode('utf8'), "direct": direct }, self.timeout) def readlines(self, path, direct=False): return self.readfile(path, direct).splitlines() def memstat(self): return self._sendCommand("memstat", {}, self.timeout) def glob(self, pattern): return self._sendCommand("glob", {"pattern": pattern}, self.timeout) def touch(self, path, flags, mode): return self._sendCommand("touch", { "path": path, "flags": flags, "mode": mode }, self.timeout) def truncate(self, path, size, mode, excl): return self._sendCommand("truncate", { "path": path, "size": size, "mode": mode, "excl": excl }, self.timeout) def close(self, sync=True): with self._lock: if not self._isRunning: return self._isRunning = False self._log.info("(%s) Closing client", self.name) self._pingPoller() os.close(self._eventFdReciever) os.close(self._eventFdSender) if sync: self._log.debug("(%s) Waiting for communication thread", self.name) self._commthread.join() def __del__(self): self.close(False)
def main(args=None): # noqa args = args or make_argument_parser().parse_args() for path in args.path: sys.path.insert(0, path) if args.use_spawn: multiprocessing.set_start_method("spawn") try: if args.pid_file: setup_pidfile(args.pid_file) except RuntimeError as e: with file_or_stderr(args.log_file) as stream: logger = setup_parent_logging(args, stream=stream) logger.critical(e) return RET_PIDFILE canteen = multiprocessing.Value(Canteen) worker_pipes = [] worker_processes = [] worker_process_events = [] for worker_id in range(args.processes): read_pipe, write_pipe = multiprocessing.Pipe(duplex=False) event = multiprocessing.Event() proc = multiprocessing.Process( target=worker_process, args=(args, worker_id, StreamablePipe(write_pipe), canteen, event), daemon=False, ) proc.start() worker_pipes.append(read_pipe) worker_processes.append(proc) worker_process_events.append(event) # Wait for all worker processes to come online before starting the # fork processes. This is required to avoid race conditions like # in #297. for event in worker_process_events: if proc.is_alive(): if not event.wait(timeout=30): break fork_pipes = [] fork_processes = [] for fork_id, fork_path in enumerate(chain(args.forks, canteen_get(canteen))): read_pipe, write_pipe = multiprocessing.Pipe(duplex=False) proc = multiprocessing.Process( target=fork_process, args=(args, fork_id, fork_path, StreamablePipe(write_pipe)), daemon=True, ) proc.start() fork_pipes.append(read_pipe) fork_processes.append(proc) parent_read_pipe, parent_write_pipe = multiprocessing.Pipe(duplex=False) logger = setup_parent_logging(args, stream=StreamablePipe(parent_write_pipe)) logger.info("Dramatiq %r is booting up." % __version__) if args.pid_file: atexit.register(remove_pidfile, args.pid_file, logger) running, reload_process = True, False # To avoid issues with signal delivery to user threads on # platforms such as FreeBSD 10.3, we make the main thread block # the signals it expects to handle before spawning the file # watcher and log watcher threads so that those threads can # inherit the blocking behaviour. if hasattr(signal, "pthread_sigmask"): signal.pthread_sigmask( signal.SIG_BLOCK, {signal.SIGINT, signal.SIGTERM, signal.SIGHUP}, ) if HAS_WATCHDOG and args.watch: if not hasattr(signal, "SIGHUP"): raise RuntimeError("Watching for source changes is not supported on %s." % sys.platform) file_watcher = setup_file_watcher(args.watch, args.watch_use_polling) log_watcher_stop_event = Event() log_watcher = Thread( target=watch_logs, args=(args.log_file, [parent_read_pipe, *worker_pipes, *fork_pipes], log_watcher_stop_event), daemon=False, ) log_watcher.start() def stop_subprocesses(signum): nonlocal running running = False for proc in chain(worker_processes, fork_processes): try: os.kill(proc.pid, signum) except OSError: # pragma: no cover if proc.exitcode is None: logger.warning("Failed to send %r to PID %d.", signum.name, proc.pid) def sighandler(signum, frame): nonlocal reload_process reload_process = signum == getattr(signal, "SIGHUP", None) if signum == signal.SIGINT: signum = signal.SIGTERM logger.info("Sending signal %r to subprocesses...", getattr(signum, "name", signum)) stop_subprocesses(signum) # Now that the watcher threads have been started, it should be # safe to unblock the signals that were previously blocked. if hasattr(signal, "pthread_sigmask"): signal.pthread_sigmask( signal.SIG_UNBLOCK, {signal.SIGINT, signal.SIGTERM, signal.SIGHUP}, ) retcode = RET_OK signal.signal(signal.SIGINT, sighandler) signal.signal(signal.SIGTERM, sighandler) if hasattr(signal, "SIGHUP"): signal.signal(signal.SIGHUP, sighandler) if hasattr(signal, "SIGBREAK"): signal.signal(signal.SIGBREAK, sighandler) # Wait for all workers to terminate. If any of the processes # terminates unexpectedly, then shut down the rest as well. The # use of `waited' here avoids a race condition where the processes # could potentially exit before we even get a chance to wait on # them. waited = False while not waited or any(p.exitcode is None for p in worker_processes): waited = True for proc in worker_processes: proc.join(timeout=1) if proc.exitcode is None: continue if running: # pragma: no cover logger.critical("Worker with PID %r exited unexpectedly (code %r). Shutting down...", proc.pid, proc.exitcode) stop_subprocesses(signal.SIGTERM) retcode = proc.exitcode break else: retcode = retcode or proc.exitcode # The log watcher can't be a daemon in case we log to a file so we # have to wait for it to complete on exit. log_watcher_stop_event.set() log_watcher.join() if HAS_WATCHDOG and args.watch: file_watcher.stop() file_watcher.join() if reload_process: if sys.argv[0].endswith("/dramatiq/__main__.py"): return os.execvp(sys.executable, ["python", "-m", "dramatiq", *sys.argv[1:]]) return os.execvp(sys.argv[0], sys.argv) return RET_KILLED if retcode < 0 else retcode
def __init__(self, group=None, target=None, name=None, args=(), kwargs={}): super(GPIOThread, self).__init__(group, target, name, args, kwargs) self.stopping = Event() self.daemon = True
def __init__(self, reachy): self.reachy = reachy self.running = Event()
def __init__(self): self.event = Event() self.result = None
def __init__(self, http, uid): Thread.__init__(self) self.stopped = Event() self.sender = WeiboSender(http, uid)
def run(self): while not self.stopEvent.is_set(): rancilio_temperature_status_handler() Rancilio.update() rancilio_heater_status_handler() rancilio_ready_handler() time.sleep(1) # stop event is set Rancilio.setHeaterOutput(0) configs = Configurator.instance() # Threading stuff stopEvent = Event() error_in_method_event = Event() dataLogger = DataLogger(stopEvent, error_in_method_event) dataLogger.addTemperatureSensor('boiler', configs.boilerTempSensor1) dataLogger.addTemperatureSensor('boiler', configs.boilerTempSensor2) configs.dataLogger = dataLogger temperatureAcquisitionProcess = Thread(target=dataLogger.acquireData) temperatureAcquisitionProcess.start() Rancilio = RancilioSilvia() configs.Rancilio = Rancilio rancilioError = RancilioError.instance() rancilioError.blynkShutDownFcn = blynk_shut_down rancilioError.blynkAliveFcn = blynk_check_live_connection
class DatabaseThread(Thread): subs = [] DB_FILENAME = "pyload.db" VERSION_FILENAME = "db.version" def __init__(self, core): super().__init__() self.daemon = True self.pyload = core self._ = core._ datadir = os.path.join(self.pyload.userdir, "data") os.makedirs(datadir, exist_ok=True) self.db_path = os.path.join(datadir, self.DB_FILENAME) self.version_path = os.path.join(datadir, self.VERSION_FILENAME) self.jobs = Queue() self.setuplock = Event() style.set_db(self) def setup(self): self.start() self.setuplock.wait() def run(self): """ main loop, which executes commands. """ convert = self._check_version() #: returns None or current version self.conn = sqlite3.connect(self.db_path, isolation_level=None) os.chmod(self.db_path, 0o600) self.c = self.conn.cursor() #: compatibility if convert is not None: self._convert_db(convert) self._create_tables() self._migrate_user() self.conn.commit() self.setuplock.set() while True: j = self.jobs.get() if j == "quit": self.c.close() self.conn.close() break j.process_job() @style.queue def shutdown(self): self.conn.commit() self.jobs.put("quit") def _check_version(self): """ check db version and delete it if needed. """ if not os.path.exists(self.version_path): with open(self.version_path, mode="w") as fp: fp.write(str(__version__)) return with open(self.version_path) as fp: v = int(fp.read().strip()) if v < __version__: if v < 2: self.pyload.log.warning( self. _("Filedatabase was deleted due to incompatible version.")) os.remove(self.version_path) shutil.move(self.db_path, "files.backup.db") with open(self.version_path, mode="w") as fp: fp.write(str(__version__)) return v def _convert_db(self, v): try: getattr(self, f"_convertV{v}")() except Exception: self.pyload.log.error( self._("Filedatabase could NOT be converted.")) # --convert scripts start def _convertV2(self): self.c.execute( 'CREATE TABLE IF NOT EXISTS "storage" ("id" INTEGER PRIMARY KEY AUTOINCREMENT, "identifier" TEXT NOT NULL, "key" TEXT NOT NULL, "value" TEXT DEFAULT "")' ) self.pyload.log.info(self._("Database was converted from v2 to v3.")) self._convertV3() def _convertV3(self): self.c.execute( 'CREATE TABLE IF NOT EXISTS "users" ("id" INTEGER PRIMARY KEY AUTOINCREMENT, "name" TEXT NOT NULL, "email" TEXT DEFAULT "" NOT NULL, "password" TEXT NOT NULL, "role" INTEGER DEFAULT 0 NOT NULL, "permission" INTEGER DEFAULT 0 NOT NULL, "template" TEXT DEFAULT "default" NOT NULL)' ) self.pyload.log.info(self._("Database was converted from v3 to v4.")) # --convert scripts end def _create_tables(self): """ create tables for database. """ self.c.execute( 'CREATE TABLE IF NOT EXISTS "packages" ("id" INTEGER PRIMARY KEY AUTOINCREMENT, "name" TEXT NOT NULL, "folder" TEXT, "password" TEXT DEFAULT "", "site" TEXT DEFAULT "", "queue" INTEGER DEFAULT 0 NOT NULL, "packageorder" INTEGER DEFAULT 0 NOT NULL)' ) self.c.execute( 'CREATE TABLE IF NOT EXISTS "links" ("id" INTEGER PRIMARY KEY AUTOINCREMENT, "url" TEXT NOT NULL, "name" TEXT, "size" INTEGER DEFAULT 0 NOT NULL, "status" INTEGER DEFAULT 3 NOT NULL, "plugin" TEXT DEFAULT "DefaultPlugin" NOT NULL, "error" TEXT DEFAULT "", "linkorder" INTEGER DEFAULT 0 NOT NULL, "package" INTEGER DEFAULT 0 NOT NULL, FOREIGN KEY(package) REFERENCES packages(id))' ) self.c.execute( 'CREATE INDEX IF NOT EXISTS "p_id_index" ON links(package)') self.c.execute( 'CREATE TABLE IF NOT EXISTS "storage" ("id" INTEGER PRIMARY KEY AUTOINCREMENT, "identifier" TEXT NOT NULL, "key" TEXT NOT NULL, "value" TEXT DEFAULT "")' ) self.c.execute( 'CREATE TABLE IF NOT EXISTS "users" ("id" INTEGER PRIMARY KEY AUTOINCREMENT, "name" TEXT NOT NULL, "email" TEXT DEFAULT "" NOT NULL, "password" TEXT NOT NULL, "role" INTEGER DEFAULT 0 NOT NULL, "permission" INTEGER DEFAULT 0 NOT NULL, "template" TEXT DEFAULT "default" NOT NULL)' ) self.c.execute('CREATE VIEW IF NOT EXISTS "pstats" AS \ SELECT p.id AS id, SUM(l.size) AS sizetotal, COUNT(l.id) AS linkstotal, linksdone, sizedone\ FROM packages p JOIN links l ON p.id = l.package LEFT OUTER JOIN\ (SELECT p.id AS id, COUNT(*) AS linksdone, SUM(l.size) AS sizedone \ FROM packages p JOIN links l ON p.id = l.package AND l.status in (0,4,13) GROUP BY p.id) s ON s.id = p.id \ GROUP BY p.id') # try to lower ids self.c.execute("SELECT max(id) FROM LINKS") fid = self.c.fetchone()[0] if fid: fid = int(fid) else: fid = 0 self.c.execute("UPDATE SQLITE_SEQUENCE SET seq=? WHERE name=?", (fid, "links")) self.c.execute("SELECT max(id) FROM packages") pid = self.c.fetchone()[0] if pid: pid = int(pid) else: pid = 0 self.c.execute("UPDATE SQLITE_SEQUENCE SET seq=? WHERE name=?", (pid, "packages")) self.c.execute("VACUUM") def _migrate_user(self): if os.path.exists("pyload.db"): self.pyload.log.info(self._("Converting old Django DB")) with sqlite3.connect("pyload.db", isolation_level=None) as conn: with closing(conn.cursor()) as c: c.execute( "SELECT username, password, email from auth_user WHERE is_superuser" ) users = [] for r in c: pw = r[1].split("$") users.append((r[0], pw[1] + pw[2], r[2])) self.c.executemany( "INSERT INTO users(name, password, email) VALUES (?, ?, ?)", users) shutil.move("pyload.db", "pyload.old.db") def create_cursor(self): return self.conn.cursor() @style.async_ def commit(self): self.conn.commit() @style.queue def sync_save(self): self.conn.commit() @style.async_ def rollback(self): self.conn.rollback() def async_(self, f, *args, **kwargs): args = (self, ) + args job = DatabaseJob(f, *args, **kwargs) self.jobs.put(job) def queue(self, f, *args, **kwargs): args = (self, ) + args job = DatabaseJob(f, *args, **kwargs) self.jobs.put(job) job.wait() return job.result @classmethod def register_sub(cls, klass): cls.subs.append(klass) @classmethod def unregister_sub(cls, klass): cls.subs.remove(klass) def __getattr__(self, attr): for sub in DatabaseThread.subs: if hasattr(sub, attr): return getattr(sub, attr) raise AttributeError( f"'{self.__class__.__name__}' object has no attribute '{attr}'")
def show_thread(e:threading.Event): while not e.wait(3): logging.info(threading.enumerate())
class Robot(): def __init__(self): """Creates connection to robot.""" self.next_msg_id = 1 self.futures = {} self.out_lock = Lock() self.connected_event = Event() self.error_connecting = False #print ("In robot constructor. Fetching IP.") robotName, host = getIdleRobotIP() if (host is None): print("Sorry, no IDLE corobot available.") raise CorobotException print("Got IP as : %s" % str(host)) print("Robot assigned : %s" % robotName) port = 15001 self.client = CorobotClient(host, port, self) self.io_thread = Thread(target=self._io_loop) self.io_thread.start() self.connected_event.wait() if self.error_connecting: raise CorobotException("Couldn't connect to robot at %s:%d" % (host, port)) else: print("Connected to corobot\n") def _io_loop(self): asyncore.loop(0.1) def _robot_response(self, msg): tokens = msg.split(" ") msg_id = int(tokens[0]) key = tokens[1] data = tokens[2:] future = self.futures.pop(msg_id) if key == "POS": data = tuple(map(float, data)) elif key == "CONFIRM": data = bool(data) elif key == "LOG": print(data) return else: data = None if key != "ERROR": future._fulfilled(data) else: future._error_occured(" ".join(data)) def _write_message(self, msg): with self.out_lock: msg_id = self.next_msg_id self.next_msg_id += 1 self.client.write_line("%d %s" % (msg_id, msg)) future = Future() self.futures[msg_id] = future return future def nav_to(self, location): """Drives the robot to the given location with path planning.""" print("Navigating to " + location + "\n") return self._write_message("NAVTOLOC " + location.upper()) def nav_to_xy(self, x, y): """Drives the robot to the given location with path planning.""" print("Navigating to position (" + x + ", " + y + ")\n") return self._write_message("NAVTOXY %f %f" % (x, y)) def go_to(self, location): """Drives the robot in a straight line to the given location.""" print("Going to " + location + "\n") return self._write_message("GOTOLOC " + location.upper()) def go_to_xy(self, x, y): """Drives the robot in a straight line to the given coordinates.""" print("Going to position (" + x + ", " + y + ")\n") return self._write_message("GOTOXY %f %f" % (x, y)) def get_pos(self): """Returns the robot's position as an (x, y, theta) tuple.""" print("Getting position\n") return self._write_message("GETPOS") def display_message(self, msg, timeout=120): """Requests the robot to display a message on its monitor.""" print("Displaying message\n") return self._write_message("SHOW_MSG %d %s" % (timeout, msg)) def request_confirm(self, msg, timeout=120): """Requests the robot to wait for confirmation from a local human.""" print("Displaying confirmation\n") return self._write_message("SHOW_MSG_CONFIRM %d %s" % (timeout, msg)) def get_closest_loc(self): """Returns the closest node to the current robot location.""" raise NotImplementedError() def close(self): print("Closing connection\n") self.client.close_when_done() def __enter__(self): return self def __exit__(self, type, value, traceback): self.close()
def stream_worker( source: str, options: dict[str, str], segment_buffer: SegmentBuffer, quit_event: Event, ) -> None: """Handle consuming streams.""" try: container = av.open(source, options=options, timeout=SOURCE_TIMEOUT) except av.AVError: _LOGGER.error("Error opening stream %s", redact_credentials(str(source))) return try: video_stream = container.streams.video[0] except (KeyError, IndexError): _LOGGER.error("Stream has no video") container.close() return try: audio_stream = container.streams.audio[0] except (KeyError, IndexError): audio_stream = None # These formats need aac_adtstoasc bitstream filter, but auto_bsf not # compatible with empty_moov and manual bitstream filters not in PyAV if container.format.name in {"hls", "mpegts"}: audio_stream = None # Some audio streams do not have a profile and throw errors when remuxing if audio_stream and audio_stream.profile is None: audio_stream = None dts_validator = TimestampValidator() container_packets = PeekIterator( filter(dts_validator.is_valid, container.demux((video_stream, audio_stream)))) def is_video(packet: av.Packet) -> Any: """Return true if the packet is for the video stream.""" return packet.stream == video_stream # Have to work around two problems with RTSP feeds in ffmpeg # 1 - first frame has bad pts/dts https://trac.ffmpeg.org/ticket/5018 # 2 - seeking can be problematic https://trac.ffmpeg.org/ticket/7815 # # Use a peeking iterator to peek into the start of the stream, ensuring # everything looks good, then go back to the start when muxing below. try: if audio_stream and unsupported_audio(container_packets.peek(), audio_stream): audio_stream = None container_packets.replace_underlying_iterator( filter(dts_validator.is_valid, container.demux(video_stream))) # Advance to the first keyframe for muxing, then rewind so the muxing # loop below can consume. first_keyframe = next( filter(lambda pkt: is_keyframe(pkt) and is_video(pkt), container_packets)) # Deal with problem #1 above (bad first packet pts/dts) by recalculating # using pts/dts from second packet. Use the peek iterator to advance # without consuming from container_packets. Skip over the first keyframe # then use the duration from the second video packet to adjust dts. next_video_packet = next(filter(is_video, container_packets.peek())) # Since the is_valid filter has already been applied before the following # adjustment, it does not filter out the case where the duration below is # 0 and both the first_keyframe and next_video_packet end up with the same # dts. Use "or 1" to deal with this. start_dts = next_video_packet.dts - (next_video_packet.duration or 1) first_keyframe.dts = first_keyframe.pts = start_dts except (av.AVError, StopIteration) as ex: _LOGGER.error("Error demuxing stream while finding first packet: %s", str(ex)) container.close() return segment_buffer.set_streams(video_stream, audio_stream) segment_buffer.reset(start_dts) # Mux the first keyframe, then proceed through the rest of the packets segment_buffer.mux_packet(first_keyframe) while not quit_event.is_set(): try: packet = next(container_packets) except (av.AVError, StopIteration) as ex: _LOGGER.error("Error demuxing stream: %s", str(ex)) break segment_buffer.mux_packet(packet) # Close stream segment_buffer.close() container.close()
class DispersyInstance(object): ''' Instance of Dispersy ''' def __init__(self, dest_dir, swift_binpath, dispersy_work_dir=u".", sqlite_database=":memory:", swift_work_dir=None, swift_zerostatedir=None, listen=[], peers=[], file_directories=[], files=[], file_timestamp_min=None, run_time=-1, bloomfilter_update=-1, walker=False, gateways={}, mtu=MAX_MTU, callback=None): """ @param dest_dir: Directory in which downloads will be placed as well as logs @param swift_binpath: Path to the swift executable @param dispersy_work_dir: Working directory for Dispersy @param sqlite_database: Location of the sqlite_database, :memory: is in memory @param swift_work_dir: Working directory for Swift @param swift_zerostatedir: Zero state directory for Swift @param listen: Addresses of local sockets to bind to @param peers: Addresses of peers to communicate with @param files_directory: Directory to monitor for files that should be disseminated @param files: Files that should be disseminated @param file_timestamp_min: Minimum file modification time @param run_time: Total run time in seconds @param bloomfilter_update: Period after which another introduction request is sent in seconds @param walker: If enable the Dispersy walker will be enabled @param gateways: Ip addresses of the gateways for specific interfaces (eth0=192.168.0.1) @param mtu: Maximum transmission unit directs the maximum size of messages @param callback: Callback function that will called on certain events """ self._dest_dir = dest_dir self._swift_binpath = swift_binpath self._dispersy_work_dir = unicode(dispersy_work_dir) self._sqlite_database = unicode( sqlite_database) # :memory: is in memory self._swift_work_dir = swift_work_dir if swift_work_dir is not None else dest_dir self._swift_zerostatedir = swift_zerostatedir self._listen = [Address.unknown(l) for l in listen] # Local socket addresses self._peers = [Address.unknown(p) for p in peers] # Peer addresses self._file_directories = file_directories # Directory to monitor for new files (or changes in files) self._files = files # Files to monitor self._file_timestamp_min = file_timestamp_min # Minimum file modification time self._run_time = int( run_time) # Time after which this process stops, -1 is infinite self._bloomfilter_update = float( bloomfilter_update ) # Update every # seconds the bloomfilter to peers, -1 for never self._walker = walker # Turn walker on self._api_callback = callback # Subscription to various callbacks self._gateways = {} for g in gateways: a = g.split("=") if len(a) == 2: self._gateways[a[0]] = a[1] self._mtu = mtu self._filepusher = FilePusher(self._register_some_message, self._swift_binpath, directories=self._file_directories, files=self._files, file_size=self._mtu - DISPERSY_MESSAGE_MINIMUM, min_timestamp=self._file_timestamp_min) self._loop_event = Event() # Loop # redirect swift output: sys.stderr = open(self._dest_dir + "/" + str(os.getpid()) + ".err", "w") # redirect standard output: sys.stdout = open(self._dest_dir + "/" + str(os.getpid()) + ".out", "w") self._state = STATE_INITIALIZED def start(self): try: self.run() except Exception: logger.exception("Dispersy instance failed to run properly") finally: return self._stop() def run(self): # Create Dispersy object self._callback = Callback("Dispersy-Callback-" + str(random.randint(0, 1000000))) self._swift = self.create_swift_instance(self._listen) self.do_callback(MESSAGE_KEY_SWIFT_STATE, STATE_INITIALIZED) self._endpoint = MultiEndpoint(self._swift, self._api_callback) self._dispersy = MyDispersy(self._callback, self._endpoint, self._dispersy_work_dir, self._sqlite_database) # Timeout determines how long the bootstrappers should try before continuing (at the moment) timeout = BOOTSTRAPPERS_RESOLVE_TIME if self._walker else 0.0 # 0.0 bootstrap will be registered instead of blocking call self._dispersy.start(timeout=timeout) print "Dispersy is listening on port %d" % self._dispersy.lan_address[1] self._community = self._callback.call(self.create_mycommunity) self._community.dest_dir = self.dest_dir # Will be used to put swift downloads self._community.update_bloomfilter = self._bloomfilter_update # Remove all duplicate ip addresses, regardless of their ports. # We assume that same ip means same dispersy instance for now. addrs = self.remove_duplicate_ip(self._peers) for a in addrs: self.send_introduction_request(a.addr()) self._filepusher.start() self.state = STATE_RUNNING self._loop() def _loop(self): logger.debug("Start loop") while self._run_time < 0 and not self._loop_event.is_set(): self._loop_event.wait( 10.0 ) # Wait some time, so that we can actually receive keyboard signals (need that timeout) else: for _ in range(int(self._run_time / SLEEP_TIME)): if not self._loop_event.is_set(): self._loop_event.wait(SLEEP_TIME) def stop(self): self.state = STATE_STOPPED self._loop_event.set() def _stop(self): logger.debug("Stop instance") try: self._filepusher.stop() return self._dispersy.stop(timeout=0.0) except AttributeError: logger.error("Could not stop Dispersy") finally: self.state = STATE_DONE # Now we should be totally done, if anything is still running for some reason, the user should forcibly kill it @property def state(self): return self._state @state.setter def state(self, state): self._state = state logger.info("STATECHANGE %d", state) self.do_callback(MESSAGE_KEY_STATE, state) @property def dest_dir(self): return self._dest_dir def create_mycommunity(self): master_member = self._dispersy.get_member(MASTER_MEMBER_PUBLIC_KEY) my_member = self._dispersy.get_new_member(SECURITY) args = () kwargs = {"enable": self._walker, "api_callback": self._api_callback} return MyCommunity.join_community(self._dispersy, master_member, my_member, *args, **kwargs) def _register_some_message(self, message, count=DEFAULT_MESSAGE_COUNT, delay=DEFAULT_MESSAGE_DELAY): """ Send certain messages Update is False, otherwise the message will be handled locally as well @type message: SmallFileCarrier, FileHashCarrier, APIMessageCarrier """ logger.info("Registered %d messages: %s with delay %f", count, message, delay) if isinstance(message, SmallFileCarrier): self._callback.register(self._community.create_small_file_messages, (count, message), kargs={"update": False}, delay=delay) elif isinstance(message, FileHashCarrier): self._community.create_file_hash_messages(count, message, delay, update=False) elif isinstance(message, APIMessageCarrier): self._callback.register(self._community.create_api_messages, (count, message), kargs={"update": False}, delay=delay) else: logger.debug("Don't know what this is: %s", message) def create_swift_instance(self, addrs): addrs = verify_addresses_are_free(addrs) httpgwport = None cmdgwport = None spmgr = None return MySwiftProcess(self._swift_binpath, self._swift_work_dir, self._swift_zerostatedir, addrs, httpgwport, cmdgwport, spmgr, gateways=self._gateways) def remove_duplicate_ip(self, addrs): faddrs = [] for a in addrs: if all([a.ip != f.ip for f in faddrs]): faddrs.append(a) return faddrs def send_introduction_request(self, address): # Each new candidate will be sent an introduction request once # If update_bloomfilter > 0, then every so many seconds an introduction request will be sent # Introduction request contains the Dispersy address self._community.create_candidate(address, True, address, address, u"unknown") def do_callback(self, key, *args, **kwargs): if self._api_callback is not None: self._api_callback(key, *args, **kwargs)
""" event 线程互斥方法演示 """ from threading import Thread,Event s = None e = Event() def 杨子荣(): print("杨子荣前来拜山头") global s s = "天王盖地虎" e.set() t = Thread(target = 杨子荣) t.start() print("说对口令就是自己人") e.wait() # 等待e被set if s == '天王盖地虎': print("宝塔镇河妖") print("确认过眼神,你是对的人") else: print("打死他!") t.join()
class WorkerQuests(MITMBase): def _valid_modes(self): return ["pokestops"] def __init__(self, args, id, last_known_state, websocket_handler, route_manager_daytime, route_manager_nighttime, mitm_mapper, devicesettings, db_wrapper, timer, pogoWindowManager): MITMBase.__init__(self, args, id, last_known_state, websocket_handler, route_manager_daytime, route_manager_nighttime, devicesettings, db_wrapper=db_wrapper, NoOcr=False, timer=timer, mitm_mapper=mitm_mapper, pogoWindowManager=pogoWindowManager) self.first_round = True self.clear_thread = None # 0 => None # 1 => clear box # 2 => clear quest self.clear_thread_task = 0 self._start_inventory_clear = Event() self._delay_add = int(self._devicesettings.get("vps_delay", 0)) self._stop_process_time = 0 def _pre_work_loop(self): if self.clear_thread is not None: return self.clear_thread = Thread(name="clear_thread_%s" % str(self._id), target=self._clear_thread) self.clear_thread.daemon = False self.clear_thread.start() self._get_screen_size() reached_main_menu = self._check_pogo_main_screen(5, True) if not reached_main_menu: if not self._restart_pogo(): # TODO: put in loop, count up for a reboot ;) raise InternalStopWorkerException def _health_check(self): """ Not gonna check for main screen here since we will do health checks in post_move_location_routine :return: """ pass def _pre_location_update(self): self._start_inventory_clear.set() self._update_injection_settings() def _move_to_location(self): routemanager = self._get_currently_valid_routemanager() if routemanager is None: raise InternalStopWorkerException if self._db_wrapper.check_stop_quest(self.current_location.lat, self.current_location.lng): return False, False distance = get_distance_of_two_points_in_meters(float(self.last_processed_location.lat), float(self.last_processed_location.lng), float(self.current_location.lat), float(self.current_location.lng)) log.info('main: Moving %s meters to the next position' % distance) delay_used = 0 log.debug("Getting time") speed = routemanager.settings.get("speed", 0) max_distance = routemanager.settings.get("max_distance", None) if (speed == 0 or (max_distance and 0 < max_distance < distance) or (self.last_location.lat == 0.0 and self.last_location.lng == 0.0)): log.info("main: Teleporting...") self._communicator.setLocation(self.current_location.lat, self.current_location.lng, 0) cur_time = math.floor(time.time()) # the time we will take as a starting point to wait for data... delay_used = self._devicesettings.get('post_teleport_delay', 7) # Test for cooldown / teleported distance TODO: check this block... if self.first_round: delay_used = 3 self.first_round = False else: if distance < 200: delay_used = 5 elif distance < 500: delay_used = 15 elif distance < 1000: delay_used = 30 elif distance > 1000: delay_used = 100 elif distance > 5000: delay_used = 200 elif distance > 10000: delay_used = 400 elif distance > 20000: delay_used = 800 log.info("Need more sleep after Teleport: %s seconds!" % str(delay_used)) else: log.info("main: Walking...") self._communicator.walkFromTo(self.last_location.lat, self.last_location.lng, self.current_location.lat, self.current_location.lng, speed) cur_time = math.floor(time.time()) # the time we will take as a starting point to wait for data... delay_used = self._devicesettings.get('post_walk_delay', 7) walk_distance_post_teleport = self._devicesettings.get('walk_after_teleport_distance', 0) if 0 < walk_distance_post_teleport < distance: # TODO: actually use to_walk for distance lat_offset, lng_offset = get_lat_lng_offsets_by_distance(walk_distance_post_teleport) to_walk = get_distance_of_two_points_in_meters(float(self.current_location.lat), float(self.current_location.lng), float(self.current_location.lat) + lat_offset, float(self.current_location.lng) + lng_offset) log.info("Walking roughly: %s" % str(to_walk)) time.sleep(0.3) self._communicator.walkFromTo(self.current_location.lat, self.current_location.lng, self.current_location.lat + lat_offset, self.current_location.lng + lng_offset, 11) log.debug("Walking back") time.sleep(0.3) self._communicator.walkFromTo(self.current_location.lat + lat_offset, self.current_location.lng + lng_offset, self.current_location.lat, self.current_location.lng, 11) log.debug("Done walking") time.sleep(1) log.info("Sleeping %s" % str(delay_used)) time.sleep(float(delay_used)) self.last_processed_location = self.current_location return cur_time, True def _post_move_location_routine(self, timestamp): if self._stop_worker_event.is_set(): raise InternalStopWorkerException self._work_mutex.acquire() log.info("Processing Stop / Quest...") data_received = '-' reachedMainMenu = self._check_pogo_main_screen(5, True) if not reachedMainMenu: self._restart_pogo() log.info('Open Stop') data_received = self._open_pokestop() if data_received == 'Stop' : self._handle_stop(data_received) log.debug("Releasing lock") self._work_mutex.release() def _start_pogo(self): pogo_topmost = self._communicator.isPogoTopmost() if pogo_topmost: return True if not self._communicator.isScreenOn(): # TODO self._communicator.startApp("de.grennith.rgc.remotegpscontroller") log.warning("Turning screen on") self._communicator.turnScreenOn() time.sleep(self._devicesettings.get("post_turn_screen_on_delay", 7)) cur_time = time.time() start_result = False while not pogo_topmost: start_result = self._communicator.startApp("com.nianticlabs.pokemongo") time.sleep(1) pogo_topmost = self._communicator.isPogoTopmost() reached_raidtab = False if start_result: log.warning("startPogo: Starting pogo...") time.sleep(self._devicesettings.get("post_pogo_start_delay", 60)) self._last_known_state["lastPogoRestart"] = cur_time self._check_pogo_main_screen(15, True) reached_raidtab = True return reached_raidtab def _cleanup(self): if self.clear_thread is not None: self.clear_thread.join() def _clear_thread(self): log.info('Starting clear Quest Thread') while not self._stop_worker_event.is_set(): # wait for event signal while not self._start_inventory_clear.is_set(): if self._stop_worker_event.is_set(): return time.sleep(0.5) if self.clear_thread_task > 0: self._work_mutex.acquire() try: # TODO: less magic numbers? time.sleep(1) if self.clear_thread_task == 1: log.info("Clearing box") self.clear_box(self._delay_add) self.clear_thread_task = 0 elif self.clear_thread_task == 2: log.info("Clearing quest") self._clear_quests(self._delay_add) self.clear_thread_task = 0 time.sleep(1) self._start_inventory_clear.clear() except WebsocketWorkerRemovedException as e: log.error("Worker removed while clearing quest/box") self._stop_worker_event.set() return self._work_mutex.release() def clear_box(self, delayadd): log.info('Cleanup Box') not_allow = ('Gift', 'Raid Pass', 'Camera', 'Lucky Egg', 'Geschenk', 'Raidpass', 'Kamera', 'Glücks-Ei', 'Cadeau', 'Passe de Raid', 'Appareil photo') x, y = self._resocalc.get_close_main_button_coords(self)[0], self._resocalc.get_close_main_button_coords(self)[ 1] self._communicator.click(int(x), int(y)) time.sleep(1 + int(delayadd)) x, y = self._resocalc.get_item_menu_coords(self)[0], self._resocalc.get_item_menu_coords(self)[1] self._communicator.click(int(x), int(y)) time.sleep(1 + int(delayadd)) data_received = '-' _data_err_counter = 0 text_x1, text_x2, text_y1, text_y2 = self._resocalc.get_delete_item_text(self) x, y = self._resocalc.get_delete_item_coords(self)[0], self._resocalc.get_delete_item_coords(self)[1] click_x1, click_x2, click_y = self._resocalc.get_swipe_item_amount(self)[0], \ self._resocalc.get_swipe_item_amount(self)[1], \ self._resocalc.get_swipe_item_amount(self)[2] to = 0 while int(to) <= 7 and int(y) <= int(self._screen_y): self._takeScreenshot() # filename, hash, x1, x2, y1, y2 item_text = self._pogoWindowManager.get_inventory_text(os.path.join(self._applicationArgs.temp_path, 'screenshot%s.png' % str(self._id)), self._id, text_x1, text_x2, text_y1, text_y2) log.info('Found item text: %s' % str(item_text)) if item_text in not_allow: log.info('Dont delete that!!!') y += self._resocalc.get_next_item_coord(self) text_y1 += self._resocalc.get_next_item_coord(self) text_y2 += self._resocalc.get_next_item_coord(self) else: self._communicator.click(int(x), int(y)) time.sleep(1 + int(delayadd)) self._communicator.touchandhold(click_x1, click_y, click_x2, click_y) time.sleep(.5) delx, dely = self._resocalc.get_confirm_delete_item_coords(self)[0], \ self._resocalc.get_confirm_delete_item_coords(self)[1] curTime = time.time() self._communicator.click(int(delx), int(dely)) data_received = self._wait_for_data(timestamp=curTime, proto_to_wait_for=4, timeout=15) if data_received is not None: if 'Clear' in data_received: to += 1 else: self._communicator.backButton() data_received = '-' y += self._resocalc.get_next_item_coord(self) text_y1 += self._resocalc.get_next_item_coord(self) text_y2 += self._resocalc.get_next_item_coord(self) else: log.info('Click Gift / Raidpass') if not self._checkPogoButton(): self._checkPogoClose() data_received = '-' y += self._resocalc.get_next_item_coord(self) text_y1 += self._resocalc.get_next_item_coord(self) text_y2 += self._resocalc.get_next_item_coord(self) x, y = self._resocalc.get_close_main_button_coords(self)[0], self._resocalc.get_close_main_button_coords(self)[ 1] self._communicator.click(int(x), int(y)) time.sleep(1 + int(delayadd)) return True def _update_injection_settings(self): # we don't wanna do anything other than questscans, set ids_iv to null ;) self._mitm_mapper.update_latest(origin=self._id, timestamp=int(time.time()), key="ids_iv", values_dict=None) injected_settings = {} scanmode = "quests" injected_settings["scanmode"] = scanmode self._mitm_mapper.update_latest(origin=self._id, timestamp=int(time.time()), key="injected_settings", values_dict=injected_settings) def _open_pokestop(self): to = 0 data_received = '-' while 'Stop' not in data_received and int(to) < 3: self._stop_process_time = time.time() self._open_gym(self._delay_add) data_received = self._wait_for_data(timestamp=self._stop_process_time, proto_to_wait_for=104, timeout=25) if data_received is not None: if 'Gym' in data_received: log.info('Clicking GYM') time.sleep(1) x, y = self._resocalc.get_close_main_button_coords(self)[0], \ self._resocalc.get_close_main_button_coords(self)[1] self._communicator.click(int(x), int(y)) time.sleep(1) if not self._checkPogoButton(): self._checkPogoClose() self._turn_map(self._delay_add) if 'Mon' in data_received: time.sleep(1) log.info('Clicking MON') x, y = self._resocalc.get_leave_mon_coords(self)[0], self._resocalc.get_leave_mon_coords(self)[1] self._communicator.click(int(x), int(y)) time.sleep(.5) if not self._checkPogoButton(): self._checkPogoClose() self._turn_map(self._delay_add) if data_received is None: data_received = '-' to += 1 return data_received def _handle_stop(self, data_received): to = 0 while not 'Quest' in data_received and int(to) < 3: log.info('Spin Stop') data_received = self._wait_for_data(timestamp=self._stop_process_time, proto_to_wait_for=101, timeout=20) if data_received is not None: if 'Box' in data_received: log.error('Box is full ... Next round!') self.clear_thread_task = 1 break if 'Quest' in data_received: log.info('Getting new Quest') self.clear_thread_task = 2 break if 'SB' in data_received or 'Time' in data_received: log.error('Softban - waiting...') time.sleep(10) self._open_pokestop() else: log.error('Other Return: %s' % str(data_received)) to += 1 else: data_received = '-' log.info('Did not get any data ... Maybe already spinned or softban.') self._close_gym(self._delay_add) time.sleep(5) self._open_pokestop() to += 1 def _wait_data_worker(self, latest, proto_to_wait_for, timestamp): data_requested = None if latest is None: log.debug("Nothing received since MAD started") time.sleep(0.5) elif proto_to_wait_for not in latest: log.debug("No data linked to the requested proto since MAD started.") time.sleep(0.5) elif 156 in latest and latest[156].get('timestamp', 0) >= timestamp: return 'Gym' elif 102 in latest and latest[102].get('timestamp', 0) >= timestamp: return 'Mon' else: # proto has previously been received, let's check the timestamp... # TODO: int vs str-key? latest_proto = latest.get(proto_to_wait_for, None) try: current_routemanager = self._get_currently_valid_routemanager() except InternalStopWorkerException as e: log.info("Worker %s is to be stopped due to invalid routemanager/mode switch" % str(self._id)) raise InternalStopWorkerException if current_routemanager is None: # we should be sleeping... log.warning("%s should be sleeping ;)" % str(self._id)) return None latest_timestamp = latest_proto.get("timestamp", 0) if latest_timestamp >= timestamp: # TODO: consider reseting timestamp here since we clearly received SOMETHING latest_data = latest_proto.get("values", None) if latest_data is None: time.sleep(0.5) return None elif proto_to_wait_for == 101: if latest_data['payload']['result'] == 1 and len(latest_data['payload']['items_awarded']) > 0: return 'Quest' elif (latest_data['payload']['result'] == 1 and len(latest_data['payload']['items_awarded']) == 0): return 'Time' elif latest_data['payload']['result'] == 2: return 'SB' elif latest_data['payload']['result'] == 4: return 'Box' elif proto_to_wait_for == 104 and latest_data['payload']['type'] == 1: return 'Stop' if proto_to_wait_for == 4 and len(latest_data['payload']['inventory_delta']['inventory_items']) > 0: return 'Clear' else: log.debug("latest timestamp of proto %s (%s) is older than %s" % (str(proto_to_wait_for), str(latest_timestamp), str(timestamp))) # TODO: timeout error instead of data_error_counter? Differentiate timeout vs missing data (the # TODO: latter indicates too high speeds for example time.sleep(0.5) return data_requested
def __init__(self, dest_dir, swift_binpath, dispersy_work_dir=u".", sqlite_database=":memory:", swift_work_dir=None, swift_zerostatedir=None, listen=[], peers=[], file_directories=[], files=[], file_timestamp_min=None, run_time=-1, bloomfilter_update=-1, walker=False, gateways={}, mtu=MAX_MTU, callback=None): """ @param dest_dir: Directory in which downloads will be placed as well as logs @param swift_binpath: Path to the swift executable @param dispersy_work_dir: Working directory for Dispersy @param sqlite_database: Location of the sqlite_database, :memory: is in memory @param swift_work_dir: Working directory for Swift @param swift_zerostatedir: Zero state directory for Swift @param listen: Addresses of local sockets to bind to @param peers: Addresses of peers to communicate with @param files_directory: Directory to monitor for files that should be disseminated @param files: Files that should be disseminated @param file_timestamp_min: Minimum file modification time @param run_time: Total run time in seconds @param bloomfilter_update: Period after which another introduction request is sent in seconds @param walker: If enable the Dispersy walker will be enabled @param gateways: Ip addresses of the gateways for specific interfaces (eth0=192.168.0.1) @param mtu: Maximum transmission unit directs the maximum size of messages @param callback: Callback function that will called on certain events """ self._dest_dir = dest_dir self._swift_binpath = swift_binpath self._dispersy_work_dir = unicode(dispersy_work_dir) self._sqlite_database = unicode( sqlite_database) # :memory: is in memory self._swift_work_dir = swift_work_dir if swift_work_dir is not None else dest_dir self._swift_zerostatedir = swift_zerostatedir self._listen = [Address.unknown(l) for l in listen] # Local socket addresses self._peers = [Address.unknown(p) for p in peers] # Peer addresses self._file_directories = file_directories # Directory to monitor for new files (or changes in files) self._files = files # Files to monitor self._file_timestamp_min = file_timestamp_min # Minimum file modification time self._run_time = int( run_time) # Time after which this process stops, -1 is infinite self._bloomfilter_update = float( bloomfilter_update ) # Update every # seconds the bloomfilter to peers, -1 for never self._walker = walker # Turn walker on self._api_callback = callback # Subscription to various callbacks self._gateways = {} for g in gateways: a = g.split("=") if len(a) == 2: self._gateways[a[0]] = a[1] self._mtu = mtu self._filepusher = FilePusher(self._register_some_message, self._swift_binpath, directories=self._file_directories, files=self._files, file_size=self._mtu - DISPERSY_MESSAGE_MINIMUM, min_timestamp=self._file_timestamp_min) self._loop_event = Event() # Loop # redirect swift output: sys.stderr = open(self._dest_dir + "/" + str(os.getpid()) + ".err", "w") # redirect standard output: sys.stdout = open(self._dest_dir + "/" + str(os.getpid()) + ".out", "w") self._state = STATE_INITIALIZED
class Module(MgrModule): MODULE_OPTIONS = [ { 'name': 'sleep_interval', 'default': str(600), }, { 'name': 'predict_interval', 'default': str(86400), }, { 'name': 'predictor_model', 'default': 'prophetstor', }, ] COMMANDS = [] def __init__(self, *args, **kwargs): super(Module, self).__init__(*args, **kwargs) # options for opt in self.MODULE_OPTIONS: setattr(self, opt['name'], opt['default']) # other self._run = True self._event = Event() def config_notify(self): for opt in self.MODULE_OPTIONS: setattr(self, opt['name'], self.get_module_option(opt['name'])) self.log.debug(' %s = %s', opt['name'], getattr(self, opt['name'])) if self.get_ceph_option('device_failure_prediction_mode') == 'local': self._event.set() def refresh_config(self): for opt in self.MODULE_OPTIONS: setattr(self, opt['name'], self.get_module_option(opt['name']) or opt['default']) self.log.debug(' %s = %s', opt['name'], getattr(self, opt['name'])) def handle_command(self, _, cmd): self.log.debug('handle_command cmd: %s', cmd) raise NotImplementedError(cmd['prefix']) def self_test(self): self.log.debug('self_test enter') ret, out, err = self.predict_all_devices() assert ret == 0 return 0, 'self test succeed', '' def serve(self): self.log.info('Starting diskprediction local module') self.config_notify() last_predicted = None ls = self.get_store('last_predicted') if ls: try: last_predicted = datetime.datetime.strptime(ls, TIME_FORMAT) except ValueError: pass self.log.debug('Last predicted %s', last_predicted) while self._run: self.refresh_config() mode = self.get_ceph_option('device_failure_prediction_mode') if mode == 'local': now = datetime.datetime.utcnow() if not last_predicted: next_predicted = now else: predicted_frequency = int(self.predict_interval) or 86400 seconds = ( last_predicted - datetime.datetime.utcfromtimestamp(0)).total_seconds() seconds -= seconds % predicted_frequency seconds += predicted_frequency next_predicted = datetime.datetime.utcfromtimestamp( seconds) if last_predicted: self.log.debug('Last scrape %s, next scrape due %s', last_predicted.strftime(TIME_FORMAT), next_predicted.strftime(TIME_FORMAT)) else: self.log.debug('Last scrape never, next scrape due %s', next_predicted.strftime(TIME_FORMAT)) if now >= next_predicted: self.predict_all_devices() last_predicted = now self.set_store('last_predicted', last_predicted.strftime(TIME_FORMAT)) sleep_interval = int(self.sleep_interval) or 60 self.log.debug('Sleeping for %d seconds', sleep_interval) self._event.wait(sleep_interval) self._event.clear() def shutdown(self): self.log.info('Stopping') self._run = False self._event.set() @staticmethod def _convert_timestamp(predicted_timestamp, life_expectancy_day): """ :param predicted_timestamp: unit is nanoseconds :param life_expectancy_day: unit is seconds :return: date format '%Y-%m-%d' ex. 2018-01-01 """ return datetime.datetime.fromtimestamp( predicted_timestamp / (1000**3) + life_expectancy_day).strftime('%Y-%m-%d') def _predict_life_expentancy(self, devid): predicted_result = '' health_data = {} predict_datas = [] try: r, outb, outs = self.remote('devicehealth', 'show_device_metrics', devid=devid, sample='') if r != 0: self.log.error('failed to get device %s health', devid) health_data = {} else: health_data = json.loads(outb) except Exception as e: self.log.error('failed to get device %s health data due to %s', devid, str(e)) # initialize appropriate disk failure predictor model from .predictor import get_diskfailurepredictor_path if self.predictor_model == 'prophetstor': from .predictor import PSDiskFailurePredictor obj_predictor = PSDiskFailurePredictor() ret = obj_predictor.initialize("{}/models/{}".format( get_diskfailurepredictor_path(), self.predictor_model)) if ret is not None: self.log.error('Error initializing predictor') return predicted_result elif self.predictor_model == 'redhat': from .predictor import RHDiskFailurePredictor obj_predictor = RHDiskFailurePredictor() ret = obj_predictor.initialize("{}/models/{}".format( get_diskfailurepredictor_path(), self.predictor_model)) if ret is not None: self.log.error('Error initializing predictor') return predicted_result else: self.log.error( 'invalid value received for MODULE_OPTIONS.predictor_model') return predicted_result if len(health_data) >= 6: o_keys = sorted(health_data.keys(), reverse=True) for o_key in o_keys: # get values for current day (?) dev_smart = {} s_val = health_data[o_key] # add all smart attributes ata_smart = s_val.get('ata_smart_attributes', {}) for attr in ata_smart.get('table', []): # get raw smart values if attr.get('raw', {}).get('string') is not None: if str(attr.get('raw', {}).get('string', '0')).isdigit(): dev_smart['smart_%s_raw' % attr.get('id')] = \ int(attr.get('raw', {}).get('string', '0')) else: if str(attr.get('raw', {}).get( 'string', '0')).split(' ')[0].isdigit(): dev_smart['smart_%s_raw' % attr.get('id')] = \ int(attr.get('raw', {}).get('string', '0').split(' ')[0]) else: dev_smart['smart_%s_raw' % attr.get('id')] = \ attr.get('raw', {}).get('value', 0) # get normalized smart values if attr.get('value') is not None: dev_smart['smart_%s_normalized' % attr.get('id')] = \ attr.get('value') # add power on hours manually if not available in smart attributes if s_val.get('power_on_time', {}).get('hours') is not None: dev_smart['smart_9_raw'] = int( s_val['power_on_time']['hours']) # add device capacity if s_val.get('user_capacity') is not None: if s_val.get('user_capacity').get('bytes') is not None: dev_smart['user_capacity'] = s_val.get( 'user_capacity').get('bytes') else: self.log.debug( 'user_capacity not found in smart attributes list') # add device model if s_val.get('model_name') is not None: dev_smart['model_name'] = s_val.get('model_name') # add vendor if s_val.get('vendor') is not None: dev_smart['vendor'] = s_val.get('vendor') # if smart data was found, then add that to list if dev_smart: predict_datas.append(dev_smart) if len(predict_datas) >= 12: break else: self.log.error( 'unable to predict device due to health data records less than 6 days' ) if predict_datas: predicted_result = obj_predictor.predict(predict_datas) return predicted_result def predict_life_expectancy(self, devid): result = self._predict_life_expentancy(devid) if result.lower() == 'good': return 0, '>6w', '' elif result.lower() == 'warning': return 0, '>=2w and <=6w', '' elif result.lower() == 'bad': return 0, '<2w', '' else: return 0, 'unknown', '' def _reset_device_life_expectancy(self, device_id): result = CommandResult('') self.send_command( result, 'mon', '', json.dumps({ 'prefix': 'device rm-life-expectancy', 'devid': device_id }), '') ret, _, outs = result.wait() if ret != 0: self.log.error('failed to reset device life expectancy, %s' % outs) return ret def _set_device_life_expectancy(self, device_id, from_date, to_date=None): result = CommandResult('') if to_date is None: self.send_command( result, 'mon', '', json.dumps({ 'prefix': 'device set-life-expectancy', 'devid': device_id, 'from': from_date }), '') else: self.send_command( result, 'mon', '', json.dumps({ 'prefix': 'device set-life-expectancy', 'devid': device_id, 'from': from_date, 'to': to_date }), '') ret, _, outs = result.wait() if ret != 0: self.log.error('failed to set device life expectancy, %s' % outs) return ret def predict_all_devices(self): self.log.debug('predict_all_devices') devices = self.get('devices').get('devices', []) for devInfo in devices: if not devInfo.get('daemons'): continue if not devInfo.get('devid'): continue self.log.debug('%s' % devInfo) result = self._predict_life_expentancy(devInfo['devid']) if result == 'unknown': self._reset_device_life_expectancy(devInfo['devid']) continue predicted = int(time.time() * (1000**3)) if result.lower() == 'good': life_expectancy_day_min = (TIME_WEEK * 6) + TIME_DAYS life_expectancy_day_max = None elif result.lower() == 'warning': life_expectancy_day_min = (TIME_WEEK * 2) life_expectancy_day_max = (TIME_WEEK * 6) elif result.lower() == 'bad': life_expectancy_day_min = 0 life_expectancy_day_max = (TIME_WEEK * 2) - TIME_DAYS else: predicted = None life_expectancy_day_min = None life_expectancy_day_max = None if predicted and devInfo['devid'] and life_expectancy_day_min: from_date = None to_date = None try: if life_expectancy_day_min: from_date = self._convert_timestamp( predicted, life_expectancy_day_min) if life_expectancy_day_max: to_date = self._convert_timestamp( predicted, life_expectancy_day_max) self._set_device_life_expectancy(devInfo['devid'], from_date, to_date) self._logger.info( 'succeed to set device {} life expectancy from: {}, to: {}' .format(devInfo['devid'], from_date, to_date)) except Exception as e: self._logger.error( 'failed to set device {} life expectancy from: {}, to: {}, {}' .format(devInfo['devid'], from_date, to_date, str(e))) else: self._reset_device_life_expectancy(devInfo['devid']) return 0, 'succeed to predicted all devices', ''
## ## EPITECH PROJECT, 2020 ## Astropi ## File description: ## astropi ## from sys import argv, stderr, exit, stdin from datetime import datetime from threading import Thread, Event import cv2 import os import numpy as np event = Event() def generate_mask(image, seuil): """Generate a mask composed by black or white pixels.""" height, width = image.shape mask = np.zeros([height, width], np.uint8) image = image.astype(np.int32) for y in range(height): for x in range(width): if abs(0 - image[y][x]) > seuil: mask[y][x] = 255 kernel = np.ones((5, 5), np.uint8) mask = cv2.erode(mask, kernel, iterations=1) mask = cv2.dilate(mask, kernel, iterations=3) return mask
def __init__(self, info): super().__init__() self.setDaemon(True) self.event_started = Event() self.thread_info = info self.queue = Queue()
class Camera(object): """Camera class. Attributes: cap (VideoCapture): OpenCV VideoCapture element. cam_id (string): Camera or V4L id (ex: /dev/video0 /dev/v4l_by_id/...). cam_path (string): Camera path. height (int): Camera frame height in pixels. width (int): Camera frame width in pixels. settings (list): List of OpenCV VideoCapture (v4l) settings. thread_ready (Event): Thread is ready Event. thread (threading.Thread): VideoCapture reading thread. t0 (time.time): Time counter buffer. """ def __init__(self, cam_id, vertical_flip=None, settings=None): """Initialize the Camera object variables. Args: cam_id (string): Camera or V4L id. vertical_flip (bool): Trigger vertical frame flipping. settings (list): list of tuple with specific camera settings. """ if not Path(cam_id).exists(): raise ValueError('Camera path does not exist.' ' Check hardware links: %s' % cam_id) # Resolve cam_id v4l path if platform == "linux" or platform == "linux2": if isinstance(cam_id, int): raise ValueError('Camera id must be a valid path, not <int>', ' (ex: /dev/video0).') self.cam_path = str(cam_id) if 'v4l' in cam_id: cam_path = str(Path(cam_id).resolve()) self.cam_id = int(cam_path.replace('/dev/video', '')) print(' Found a v4l camera path, resolved to: %s' ', cam_id: %d' % (cam_path, self.cam_id)) else: if isinstance(cam_id, str): self.cam_id = int(cam_id) self.cam_path = cam_id else: self.cam_id = cam_id self.cam_path = str(cam_id) if vertical_flip is True: print('Set vertical flip.') self.vertical_flip = True else: self.vertical_flip = False self.settings = settings self.t0 = time.time() # VideoCapture reading Thread self.thread_ready = Event() self.thread = Thread(target=self._update_frame, args=()) def initialize(self): """Initialize the camera Thread.""" self._setup() # Start the VideoCapture read() thread self.stop = False self.start_camera_thread() self.thread_ready.wait() # Quick test self.test_camera() print('Camera %s initialization done!\n' % self.cam_id) def draw_fps(self, frame): """Compute and draw fps on frame. Return: frame (OpenCV Mat): A frame read from the VideoCapture method. """ frame = self.draw_text(frame, '%d fps' % (1.0 / (time.time() - self.t0)), x=self.width / 35, y=self.height - self.height / 20) self.t0 = time.time() return frame def draw_text(self, frame, text, x=None, y=None, color=(0, 255, 0), thickness=1, size=0.75): """Draw text on frame. Arguments: frame (OpenCV Mat): A frame read from the VideoCapture method. text (string): The string to be written. x (int): Written text horizontal coordinate. y (int): Written text vertical coordinate. color (int tuple): RGB text color. thickness (int): Lines thickness. size (float): Police size. Return: frame (OpenCV Mat): new frame with the text written. """ # Hint: https://stackoverflow.com/a/42694604 if x is None: x = self.width / 35 if y is None: y = self.height / 20 return cv2.putText(frame, text, (int(x), int(y)), cv2.FONT_HERSHEY_SIMPLEX, size, color, int(thickness), lineType=cv2.LINE_AA) def read(self): """Read the current camera frame. Return: frame (OpenCV Mat): A frame read from the VideoCapture method. """ return self.current_frame def release(self): """Release the VideoCapture object.""" self.stop = True time.sleep(0.1) # 0.05 self.cap.release() def set_camera_settings(self): """Set all the camera settings.""" if self.settings: print('Camera settings:') for setting in self.settings: self.cap.set(setting[0], setting[1]) for setting in self.settings: print(' %s: %d' % (CV_CAP_PROP(setting[0]).name, self.cap.get(setting[0]))) def _setup(self): """Set up the camera.""" if platform == "linux" or platform == "linux2": self.cap = cv2.VideoCapture(self.cam_path, cv2.CAP_V4L2) else: self.cap = cv2.VideoCapture(self.cam_id) if not self.cap.isOpened(): raise ValueError('Camera', self.cam_id, 'not found!') self.set_camera_settings() # Current camera recording frame size self.height = int(self.cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) self.width = int(self.cap.get(cv2.CAP_PROP_FRAME_WIDTH)) # create thread for capturing images def start_camera_thread(self): """Start the Camera frame update Thread.""" self.thread.start() self.thread_ready.wait() # block until thread created a current_frame def test_camera(self): """Basic camera test.""" # Testing camera setup test_frame = self.read() # Simple self-test if test_frame.shape[0] != self.height: print('WARNING: Camera height is different from the setted one!\n' 'Check the defaultConfig.xml camera_resolution.') if test_frame.shape[1] != self.width: raise ValueError('Camera width is different from the setted one!\n' 'Check the defaultConfig.xml camera_resolution.') def _update_frame(self): """Read VideoCapture to update Camera current frame.""" while (True): if self.stop: break ret, frame = self.cap.read() if ret is False: print('Cam %s | Error reading frame!' % self.cam_id) if self.vertical_flip: frame = cv2.flip(frame, -1) self.current_frame = frame self.thread_ready.set()
class Client: """Simple client class to schedule jobs to remote workers, currently supports a synchronous way of calling tasks awaiting for results and an asynchronous one which collect results in a dedicated dictionary Attributes ---------- :type client: tasq.remote.client.Client :param client: The Client reference needed to communicate with remote runners, can be either a `ZMQBackendConnection` or a generic `BackendConnection` for backends other than a ZMQ socket. :type signkey: str or None :param signkey: String representing a sign, marks bytes passing around through sockets """ def __init__(self, connection): # Client backend dependency, can be a ZMQBackendConnection or a generic # BackendConnection for backends other than ZMQ self._connection = connection # Connection flag self._is_connected = False # Results dictionary, mapping task_name -> result self._results = {} # Pending requests while not connected self._pending = deque() # Gathering results, making the client unblocking self._gatherer = None # threading.Event to run and control the gatherer loop self._gather_loop = Event() # Logging settings self._log = get_logger(__name__) def __repr__(self): return f"Client({self._connection})" def __enter__(self): if not self.is_connected(): self.connect() return self def __exit__(self, exc_type, exc_value, exc_traceback): while self.pending_results(): pass self.close() def _gather_results(self): """Gathering subroutine, must be run in another thread to concurrently listen for results and store them into a dedicated dictionary """ while not self._gather_loop.is_set(): try: job_result = self._connection.recv_result() except BackendCommunicationErrorException as e: self._log.warning( "Backend error while receiving results back: %s", str(e)) else: if not job_result: continue self._log.debug("Gathered result: %s", job_result) try: self._results[job_result.name].set_result(job_result) except KeyError: self._log.error( "Can't update result: key %s not found", job_result.name, ) except asyncio.InvalidStateError: self._log.warning("Result already gathered, discarding it") def is_connected(self): return self._is_connected def pending_jobs(self): """Returns the pending jobs""" return self._pending def pending_results(self): """Retrieve pending jobs from the results dictionary""" return {k: v for k, v in self._results.items() if v.done() is False} def connect(self): """Connect to the remote workers, setting up PUSH and PULL channels, respectively used to send tasks and to retrieve results back """ if self.is_connected(): return # Gathering results, making the client unblocking if not self._gatherer: self._gatherer = Thread(target=self._gather_results, daemon=True) # Start gathering thread self._gatherer.start() elif not self._gatherer.is_alive(): self._gather_loop.clear() # Start gathering thread self._gatherer.start() self._connection.connect() self._is_connected = True # Check if there are pending requests and in case, empty the queue while self._pending: job = self._pending.pop() self.schedule(job.func, *job.args, name=job.job_id, **job.kwargs) def disconnect(self): """Disconnect PUSH and PULL sockets""" if self.is_connected(): self._connection.disconnect() self._gather_loop.set() self._gatherer.join() self._is_connected = False def schedule(self, func, *args, **kwargs): """Schedule a job to a remote worker, without blocking. Require a func task, and arguments to be passed with, cloudpickle will handle dependencies shipping. Optional it is possible to give a name to the job, otherwise a UUID will be defined Args: ----- :type func: func :param func: A function to be executed on a worker by enqueing it :rtype: tasq.remote.client.TasqFuture :return: A future eventually containing the result of the func execution """ job = Job(kwargs.pop("name", ""), func, *args, **kwargs) name = job.job_id # If not connected enqueue for execution at the first connection if not self.is_connected(): self._log.debug( "Client not connected, appending job to pending queue.") self._pending.appendleft(job) return None # Create a Future and return it, _gatherer thread will set the # result once received future = TasqFuture() if name in self._results: self._results.pop(name) self._results[name] = future # Send job to worker self._connection.send(job) return future def schedule_blocking(self, func, *args, **kwargs): """Schedule a job to a remote worker wating for the result to be ready. Like `schedule` it require a func task, and arguments to be passed with, cloudpickle will handle dependencies shipping. Optional it is possible to give a name to the job, otherwise a UUID will be defined Args: ----- :type func: func :param func: A function to be executed on a worker by enqueing it :rtype: tasq.remote.client.TasqFuture :return: The result of the func execution :raise: tasq.exception.ClientNotConnectedException, in case of not connected client """ if not self.is_connected(): raise ClientNotConnectedException( "Client not connected to no worker") timeout = kwargs.pop("timeout", None) future = self.schedule(func, *args, **kwargs) result = future.result(timeout) return result
class MainLoop(): def __init__(self, controller, network): self.controller = controller self.releaseEvent = Event() self.c_thread = ControlThread(self.controller) self.t_thread = TimerThread(self.controller, self.releaseEvent) self.g_thread = GraphicsThread(self.controller) self.r_thread = ReadThread(self.controller, network) self.q_state = 0 self.e_state = 0 def parse_events(self): for event in pygame.event.get(): if event.type == pygame.QUIT: pass elif event.type == pygame.KEYDOWN: if event.key == K_w: if self.controller.isDriving == 0: self.t_thread.pressed.add('w') self.controller.isDriving = 1 else: self.t_thread.pressed.remove('w') self.controller.isDriving = 0 elif event.key == K_s: pass elif event.key == K_a: if self.controller.isSteering == 0: self.t_thread.pressed.add('a') self.controller.isSteering = 1 else: if 'd' in self.t_thread.pressed: self.t_thread.pressed.remove('d') self.t_thread.pressed.add('a') else: self.t_thread.pressed.remove('a') self.controller.isSteering = 0 elif event.key == K_d: if self.controller.isSteering == 0: self.t_thread.pressed.add('d') self.controller.isSteering = 1 else: if 'a' in self.t_thread.pressed: self.t_thread.pressed.remove('a') self.t_thread.pressed.add('d') else: self.t_thread.pressed.remove('d') self.controller.isSteering = 0 elif event.key == K_q: #self.t_thread.pressed.add('q') if self.controller.turnSig_state == 1: self.g_thread.gui.turnSig_state(0) self.controller.turnSig_state = 0 elif self.controller.turnSig_state == 0: self.g_thread.gui.turnSig_state(1) self.controller.turnSig_state = 1 else: self.g_thread.gui.turnSig_state(1) self.controller.turnSig_state = 1 elif event.key == K_e: #self.t_thread.pressed.add('e') if self.controller.turnSig_state == 1: self.g_thread.gui.turnSig_state(2) self.controller.turnSig_state = 2 elif self.controller.turnSig_state == 0: self.g_thread.gui.turnSig_state(2) self.controller.turnSig_state = 2 else: self.g_thread.gui.turnSig_state(0) self.controller.turnSig_state = 0 self.releaseEvent.set() elif event.type == pygame.KEYUP: if event.key == K_w: pass elif event.key == K_s: pass elif event.key == K_a: pass elif event.key == K_d: pass elif event.key == K_q: pass elif event.key == K_e: pass def mainLoop(self): # Collect events until released os.system("stty -echo") print( "\nControls: W (accelerate), S (brake), A (steer left), D (steer right), Q (left turn signal), " "E (right turn signal)\n") self.c_thread.start() self.t_thread.start() self.g_thread.start() self.r_thread.start() while True: self.parse_events()
def __init__(self, serial_port: str, baud_rate: Optional[int] = None) -> None: super().__init__(serial_port, connection_type=ConnectionType.UsbConnection) self.setName(catalog.i18nc("@item:inmenu", "USB printing")) self.setShortDescription( catalog.i18nc("@action:button Preceded by 'Ready to'.", "Print via USB")) self.setDescription(catalog.i18nc("@info:tooltip", "Print via USB")) self.setIconName("print") self._serial = None # type: Optional[Serial] self._serial_port = serial_port self._address = serial_port self._timeout = 3 # List of gcode lines to be printed self._gcode = [] # type: List[str] self._gcode_position = 0 self._use_auto_detect = True self._baud_rate = baud_rate self._all_baud_rates = [ 115200, 250000, 500000, 230400, 76800, 57600, 38400, 19200, 9600 ] # Instead of using a timer, we really need the update to be as a thread, as reading from serial can block. self._update_thread = Thread(target=self._update, daemon=True, name="USBPrinterUpdate") self._last_temperature_request = None # type: Optional[int] self._firmware_idle_count = 0 self._is_printing = False # A print is being sent. ## Set when print is started in order to check running time. self._print_start_time = None # type: Optional[float] self._print_estimated_time = None # type: Optional[int] self._accepts_commands = True self._paused = False self._printer_busy = False # When printer is preheating and waiting (M190/M109), or when waiting for action on the printer self.setConnectionText( catalog.i18nc("@info:status", "Connected via USB")) # Queue for commands that need to be sent. self._command_queue = Queue() # type: Queue # Event to indicate that an "ok" was received from the printer after sending a command. self._command_received = Event() self._command_received.set() self._firmware_name_requested = False self._firmware_updater = AvrFirmwareUpdater(self) plugin_path = PluginRegistry.getInstance().getPluginPath("USBPrinting") if plugin_path: self._monitor_view_qml_path = os.path.join(plugin_path, "MonitorItem.qml") else: Logger.log( "e", "Cannot create Monitor QML view: cannot find plugin path for plugin [USBPrinting]" ) self._monitor_view_qml_path = "" CuraApplication.getInstance().getOnExitCallbackManager().addCallback( self._checkActivePrintingUponAppExit)
# coding: utf-8 from threading import Thread,Event import time def test_event(e): while 1: print("run...") e.wait() time.sleep(1) if __name__=='__main__': e = Event() t = Thread(target=test_event,args=(e,)) t.start() e.set() #初始e.set说明标志位为True,函数中的e.wait不会阻塞 time.sleep(2) e.clear()#e.clear说明标志位为False,函数中的e.wait此时处于阻塞. time.sleep(5) e.set() print ('over')