def test_new_thread_schedule_action_cancel(self): ran = [False] scheduler = NewThreadScheduler() def action(scheduler, state): ran[0] = True d = scheduler.schedule_relative(timedelta(milliseconds=1), action) d.dispose() sleep(0.1) assert (not ran[0])
def test_new_thread_schedule_action(self): scheduler = NewThreadScheduler() ran = [False] def action(scheduler, state): ran[0] = True scheduler.schedule(action) sleep(0.1) assert (ran[0] == True)
def test_new_thread_schedule_action(self): scheduler = NewThreadScheduler() ran = False def action(scheduler, state): nonlocal ran ran = True scheduler.schedule(action) sleep(0.1) assert ran is True
def test_new_thread_schedule_action_due(self): scheduler = NewThreadScheduler() starttime = datetime.utcnow() endtime = [None] def action(scheduler, state): endtime[0] = datetime.utcnow() scheduler.schedule_relative(timedelta(milliseconds=200), action) sleep(0.3) diff = endtime[0]-starttime assert(diff > timedelta(milliseconds=180))
def test_new_thread_schedule_periodic_cancel(self): scheduler = NewThreadScheduler() period = 0.05 counter = 3 def action(state): nonlocal counter if state: counter -= 1 return state - 1 disp = scheduler.schedule_periodic(period, action, counter) sleep(0.10) disp.dispose() assert 0 < counter < 3
def test_new_thread_schedule_action_due(self): scheduler = NewThreadScheduler() starttime = default_now() endtime = None def action(scheduler, state): nonlocal endtime endtime = default_now() scheduler.schedule_relative(timedelta(milliseconds=200), action) sleep(0.3) assert endtime is not None diff = endtime - starttime assert diff > timedelta(milliseconds=180)
def test_new_thread_schedule_periodic(self): scheduler = NewThreadScheduler() gate = threading.Semaphore(0) period = 0.05 counter = 3 def action(state): nonlocal counter if state: counter -= 1 return state - 1 if counter == 0: gate.release() scheduler.schedule_periodic(period, action, counter) gate.acquire() assert counter == 0
def startCalibration(self, sensorName, port): """ Starts calibration and instantiates the EMT system :param sensorName: the uncalibrated sensor :param port: the port the uncalibrated sensor is connected to """ if self.mode == MODE_IDLE: try: config = guiutils.import_default_config_settings() primaryChannels = config['system']['primary_channels'] sensor = utils.get_sensor(sensorName) sensor.channel = utils.get_active_channel(sensor.dof, port, primaryChannels) calibration = EMCalibration(sensor, config) qtScheduler = QtScheduler(QtCore) newScheduler = NewThreadScheduler() self.systemMonitor = Monitor(calibration.anser) self.subscriptions.append(calibration.anser.sampleNotifications.sample(500, scheduler=newScheduler)\ .subscribe(self.systemMonitor.run_system_test)) self.subscriptions.append(self.systemMonitor.systemNotifications.subscribe_on(scheduler=qtScheduler)\ .subscribe(self.SYS_EVENT_SYSTEM_STATUS_NOTIFICATION.emit)) #remove this for i in range(3): calibration.anser.sample_update() self.calibrationThread = CalibrationThread(calibration, self.SYS_EVENT_POINT_CAPTURED, self.SYS_EVENT_READY_TO_CALIBRATE, self.SYS_EVENT_CALIBRATION_COMPLETED) self.calibrationThread.start() self.systemStatus = True self.SYS_EVENT_SYSTEM_STATUS.emit(self.systemStatus) self.SYS_EVENT_MODE_CALIBRATION.emit() self.SYS_EVENT_POINT_CAPTURED.emit(1) self.mode = MODE_CALIBRATING self.SYS_EVENT_MODE_CHANGED.emit(self.mode) logging.info('Started Calibration') except Exception as e: logging.info('Device cannot be accessed. Possible causes: ' '\n - Computer is not connected to DAQ port ' '\n - After plugging device into the USB Port, ' 'wait a few moments to let the driver install' '\n - Ensure device specified is correct. ' '\n (Go to -> Developer Tab, in the configuration file under \'system\' change the \'device_name\' to your DevX indentifier) \n') print(str(e)) elif self.mode == MODE_CALIBRATING: self.stopCalibration() else: logging.info('System is currently in use. Please stop tracking to continue')
def __init__(self, serial: Serial, scanner: BusScanner) -> None: super().__init__() self.__serial = serial self.__scanner = scanner self.__added_devices: Subject[Device[T]] = Subject() self.__device_list: List[Device[T]] = list() self.__disposable = CompositeDisposable() self.__disposable.add(self.__added_devices) self.__disposable.add(self.__scanner.scan().subscribe( on_next=self.__add_device, on_error=self.close, scheduler=NewThreadScheduler()))
def start(c:Config): from websocket import WebSocketApp from simplejson import loads,dumps from rx import Observable from rx.subjects import Subject from rx.concurrency import NewThreadScheduler from .notifier import Slack, Jenkins slack = Slack(config=c, log=log) jenkins = Jenkins(config=c, log=log) # create streams stream = Subject() events = (stream .map(loads).filter(lambda p: p.get('name') == 'resource.change') .map(lambda payload: payload.get('data',{})) .filter(lambda data: isinstance(data, dict)) .map(lambda data: data.get('resource',{})) .filter(lambda res: isinstance(res,dict)) .filter(lambda res: res.get('type') in SUPPORTED_TYPES) .map(from_resource) .distinct_until_changed()) slack_events = (events .filter(lambda event: event.type == 'service' and event.state == 'upgraded')) jenkins_events = (events .filter(lambda event: event.type == 'host' and event.state == 'reconnecting')) # subscribe async = NewThreadScheduler() subs = [] subs += [events.subscribe(log.debug)] subs += [slack_events.observe_on(async).subscribe(slack.notify)] subs += [jenkins_events.observe_on(async).subscribe(jenkins.notify)] # connect to websocket ws = WebSocketApp(c.RANCHER_WS_URL, on_message = lambda ws,msg: stream.on_next(msg), on_error = lambda ws,err: log.error(err), on_close = lambda ws: log.info('Websocket connection closed'), on_open = lambda ws: log.info('Websocket connection opened'), header = {"Authorization": c.RANCHER_WS_AUTH}) ws.run_forever() # clean subscriptions for sub in subs: sub.dispose()
def startTracking(self, sensorNames, ports, sliderPos): """ Starts EMT tracking and instantiates the EMT system :param sensorNames: a list containing the selected sensor for each of the ports :param ports: a boolean list indicating whether the selected sensors is to be tracked for each of the ports :param sliderPos: the given tracking speed for the EMT system (1-4, where 1 is fastest and 4 is the most accurate) """ if self.mode == MODE_IDLE: selectedSensors = [] selectedSensorNames = [] selectedPorts = [] config = guiutils.import_default_config_settings() if config is not None: primaryChannels = config['system']['primary_channels'] else: logging.info('No configuration file found. Go to -> Developer Tab and select configuration file. Click Make Default.') return for index, (sensorName, port) in enumerate(zip(sensorNames, ports)): if port is True: portNo = index + 1 sensor = utils.get_sensor(sensorName) if sensor is not None: sensor.channel = utils.get_active_channel(sensor.dof, portNo, primary_channels=primaryChannels) selectedSensors.append(sensor) selectedSensorNames.append(sensorName) selectedPorts.append(portNo) if len(selectedSensors) == 0: logging.info('No ports or sensors were selected') elif len(selectedSensorNames) != len(set(selectedSensorNames)): logging.info('Duplicate sensors selected') else: config['system']['speed'] = sliderPos try: self.anser = EMTracker(selectedSensors, config) self.anser.sensors = selectedSensors self.anser.start_acquisition() # system object so we can populate views System_Template = namedtuple('System', ['freq', 'coils', 'sampling_freq', 'num_samples', 'active_ports', 'active_channels', 'sensors']) system = System_Template( freq=[freq / 1000 for freq in self.anser.filter.transFreqs], coils=[True]*8, sampling_freq=self.anser.filter.sampleFreq, num_samples=self.anser.filter.numSamples, active_ports=selectedPorts, active_channels = self.anser.active_channels, sensors=self.anser.sensors) qtScheduler = QtScheduler(QtCore) newScheduler = NewThreadScheduler() self.systemMonitor = Monitor(self.anser) self.subscriptions.append(self.anser.positionNotifications.sample(15, scheduler=qtScheduler).subscribe(self.sendPositions)) self.subscriptions.append(self.anser.sampleNotifications.sample(30, scheduler=qtScheduler).subscribe(self.sendSamples)) self.subscriptions.append(self.anser.sampleNotifications.sample(1300, scheduler=newScheduler)\ .subscribe(on_next=self.systemMonitor.run_system_test)) self.subscriptions.append(self.systemMonitor.systemNotifications.subscribe_on(scheduler=qtScheduler)\ .subscribe(on_next=self.SYS_EVENT_SYSTEM_STATUS_NOTIFICATION.emit)) self.anser.start() self.systemStatus = True self.SYS_EVENT_SYSTEM_STATUS.emit(self.systemStatus) self.SYS_EVENT_MODE_TRACKING.emit(system) self.mode = MODE_TRACKING self.SYS_EVENT_MODE_CHANGED.emit(self.mode) logging.info('Started Tracking') except Exception as e: logging.info('Device cannot be accessed. Possible causes: ' '\n - Computer is not connected to DAQ port ' '\n - After plugging device into the USB Port, ' 'wait a few moments to let the driver install' '\n - Ensure device specified is correct. ' '\n (Go to -> Developer Tab, in the configuration file under \'system\' change the \'device_name\' to your DevX indentifier) \n') print(str(e)) elif self.mode == MODE_TRACKING: self.stopTracking() logging.info('Stopped Tracking') else: logging.info('System is currently in use. Please stop calibration to continue')
print("KONIEC", pid, len(known_processes), current_thread().name) if not known_processes: try: global app_proc app_proc.kill() app_proc = None except SystemError: print('Error during killing chroma app') # access global variables safely processing_scheduler = ThreadPoolScheduler(1) scanner = Observable.create(lambda subscriber: scan_processes(subscriber)) \ .distinct() \ .subscribe_on(NewThreadScheduler()) \ .publish() # start chroma app when valid process shows up scanner.observe_on(processing_scheduler) \ .subscribe(handle_audio_start) # tear down chroma app when all valid processes are terminated scanner.flat_map(lambda pid: Observable.from_callable(wait_for_end(pid), NewThreadScheduler())) \ .observe_on(processing_scheduler) \ .subscribe(handle_audio_end) scanner.connect() input()
# Example of a 'cold' Observable import rx from rx.concurrency import NewThreadScheduler import time def observer_function(value): time.sleep(1) print(value * value) rx.range(1, 100000, scheduler=NewThreadScheduler())\ .subscribe(observer_function) # As the observable runs in a separate thread need # ensure that the main thread does not terminate input('Press enter to finish')
def test_new_thread_now(self): scheduler = NewThreadScheduler() res = scheduler.now() - datetime.utcnow() assert res < timedelta(microseconds=1000)
def test_new_thread_now_units(self): scheduler = NewThreadScheduler() diff = scheduler.now sleep(0.1) diff = scheduler.now - diff assert timedelta(milliseconds=80) < diff < timedelta(milliseconds=180)
result['topleft']['x'] * resize_rate, result['bottomright']['y'] * resize_rate - result['topleft']['y'] * resize_rate) tracker = cv2.TrackerKCF_create() labels[i] = result['label'] confidences[i] = result['confidence'] multiTracker.add(tracker, small, box) alarm_delay.clear() print(time.time() - start) flags['ready_dec'] = True obs_dec = subjects.Subject() obs_dec.observe_on(NewThreadScheduler()).subscribe(on_next=detect) def cover(box1, box2): left = max(box1[0], box2[0]) rignt = min(box1[2], box2[2]) top = max(box1[1], box2[1]) bottom = min(box1[3], box2[3]) if rignt < left or bottom < top: return 0 arb2 = (box2[2] - box2[0]) * (box2[3] - box2[1]) arbi = (rignt - left) * (bottom - top) return arbi / arb2 def ralarm(v):
def create_app(): def setup_logging(): handler = StreamHandler(stream=sys.stdout) handler.setLevel(config.log_level) formatter = logging.Formatter( '%(asctime)s - %(name)s - %(levelname)s - %(message)s') handler.setFormatter(formatter) flask_app.logger.addHandler(handler) flask_app.logger.setLevel(config.log_level) flask_app.logger.name = "docker_enforcer" flask_app = Flask(__name__) if not flask_app.debug: setup_logging() flask_app.logger.info( "Starting docker-enforcer v{0} with docker socket {1}".format( config.version, config.docker_socket)) task_scheduler = NewThreadScheduler() # task_scheduler = ThreadPoolScheduler(multiprocessing.cpu_count()) if config.run_start_events: events = Observable.from_iterable(docker_helper.get_events_observable()) \ .observe_on(scheduler=task_scheduler) \ .where(lambda e: is_configured_event(e)) \ .map(lambda e: e['id']) \ .map(lambda cid: docker_helper.check_container(cid, CheckSource.Event, remove_from_cache=True)) if config.run_periodic: periodic = Observable.interval(config.interval_sec * 1000) if config.immediate_periodical_start: flask_app.logger.debug("Run periodic immediately") periodic = periodic.start_with(-1) periodic = periodic.observe_on(scheduler=task_scheduler) \ .map(lambda _: docker_helper.check_containers(CheckSource.Periodic)) \ .flat_map(lambda c: c) detections = Observable.empty() if config.run_start_events: detections = detections.merge(events) if config.run_periodic: detections = detections.merge(periodic) verdicts = detections \ .map(lambda container: judge.should_be_killed(container)) \ .where(lambda v: v.verdict) threaded_verdicts = verdicts \ .retry() \ .subscribe_on(task_scheduler) \ .publish() \ .auto_connect(2) if not config.run_start_events and not config.run_periodic: flask_app.logger.info( "Neither start events or periodic checks are enabled. Docker Enforcer will be working in " "authz plugin mode only.") else: killer_subs = threaded_verdicts.subscribe(jurek) trigger_subs = threaded_verdicts.subscribe(trigger_handler) def on_exit(sig, frame): flask_app.logger.info("Stopping docker monitoring") if config.run_start_events or config.run_periodic: killer_subs.dispose() trigger_subs.dispose() flask_app.logger.debug("Complete, ready to finish") quit() signal.signal(signal.SIGINT, on_exit) signal.signal(signal.SIGTERM, on_exit) return flask_app
def test_new_thread_now(self): scheduler = NewThreadScheduler() diff = scheduler.now - default_now() assert abs(diff) < timedelta(milliseconds=1)
observer.on_next("Alpha") observer.on_next("Beta") observer.on_next("Gamma") observer.on_next("Delta") observer.on_next("Epsilon") observer.on_completed() def intense_calculation(value): # sleep for a random short duration between 0.5 to 2.0 seconds to simulate a long-running calculation time.sleep(random.randint(5, 20) * .1) return value # calculate number of CPU's, then create a ThreadPoolScheduler with that number of threads pool_scheduler = NewThreadScheduler() def p(e): print(e) # Create Process 1 o = Observable.from_(["Alpha", "Beta", "Gamma", "Delta", "Epsilon"]) \ .map(lambda s: intense_calculation(s)) \ .subscribe_on(pool_scheduler)# \ # .subscribe(on_next=p, # on_error=p, # on_completed=lambda: p("PROCESS 1 done!")) # # Create Process 2s
def create_app(): def setup_logging(): handler = StreamHandler(stream=sys.stdout) handler.setLevel(config.log_level) formatter = logging.Formatter( '%(asctime)s - %(name)s - %(levelname)s - %(message)s') handler.setFormatter(formatter) flask_app.logger.addHandler(handler) flask_app.logger.setLevel(config.log_level) flask_app.logger.name = "docker_enforcer" flask_app = Flask(__name__) if not flask_app.debug: setup_logging() flask_app.logger.info( "Starting docker-enforcer v{0} with docker socket {1}".format( version, config.docker_socket)) if not (config.run_start_events or config.run_periodic): raise ValueError( "Either RUN_START_EVENTS or RUN_PERIODIC must be set to True") task_scheduler = NewThreadScheduler() # task_scheduler = ThreadPoolScheduler(multiprocessing.cpu_count()) if config.run_start_events: events = Observable.from_iterable(docker_helper.get_events_observable()) \ .observe_on(scheduler=task_scheduler) \ .where(lambda e: is_configured_event(e)) \ .map(lambda e: e['id']) \ .map(lambda cid: docker_helper.check_container(cid, remove_from_cache=True)) if config.run_periodic: periodic = Observable.interval(config.interval_sec * 1000) if config.immediate_periodical_start: flask_app.logger.debug("Run periodic immediately") periodic = periodic.start_with(-1) periodic = periodic.observe_on(scheduler=task_scheduler) \ .map(lambda _: docker_helper.check_containers()) \ .flat_map(lambda c: c) if not config.run_start_events and not config.run_periodic: flask_app.logger.fatal( "Either start events or periodic checks need to be enabled") raise Exception( "No run mode specified. Please set either RUN_START_EVENTS or RUN_PERIODIC" ) detections = Observable.empty() if config.run_start_events: detections = detections.merge(events) if config.run_periodic: detections = detections.merge(periodic) verdicts = detections \ .where(lambda c: not_on_white_list(c)) \ .map(lambda container: judge.should_be_killed(container)) \ .where(lambda v: v.verdict) threaded_verdicts = verdicts \ .retry() \ .subscribe_on(task_scheduler) \ .publish()\ .auto_connect(2) killer_subs = threaded_verdicts.subscribe(jurek) trigger_subs = threaded_verdicts.subscribe(trigger_handler) def on_exit(sig, frame): flask_app.logger.info("Stopping docker monitoring") killer_subs.dispose() trigger_subs.dispose() flask_app.logger.debug("Complete, ready to finish") quit() signal.signal(signal.SIGINT, on_exit) signal.signal(signal.SIGTERM, on_exit) return flask_app
import logging from rx.subjects import Subject from rx.concurrency import NewThreadScheduler from slackoff import threadfactory queue = Subject() # Use a new thread scheduler, this will ensure that subscriptions are non blocking scheduler = NewThreadScheduler(thread_factory=threadfactory.default_factory) def subscribe(): """ Subscribes the decorated function to all messages from the messagequeue. Example: @subscribe() """ def func_wrapper(func): queue.observe_on(scheduler).subscribe(func) return func return func_wrapper def subscribe_to(type_to_subscribe_to=None): """ Subscribes the decorated function to messages of the given type. The function will be observed on its own thread. Example: @subscribe_to(str) @subscribe_to(int) Keyword arguments:
import rx from rx.concurrency import NewThreadScheduler, ThreadPoolScheduler, ImmediateScheduler observable = rx.from_list([2, 3, 5]) observable.subscribe(lambda v: print('Lambda1 Received', v), scheduler=ThreadPoolScheduler(3)) observable.subscribe(lambda v: print('Lambda2 Received', v), scheduler=ImmediateScheduler()) observable.subscribe(lambda v: print('Lambda3 Received', v), scheduler=NewThreadScheduler()) # As the observable runs in a separate thread we need # to ensure that the main thread does not terminate input('Press enter to finish')
from typing import List from rx.core import Observable from rx.core.typing import Scheduler, RelativeTime from rx.concurrency import NewThreadScheduler new_thread_scheduler = NewThreadScheduler() def _to_marbles(scheduler: Scheduler = None, timespan: RelativeTime = 0.1): def to_marbles(source: Observable) -> Observable: """Convert an observable sequence into a marble diagram string. Args: timespan: [Optional] duration of each character in second. If not specified, defaults to 0.1s. scheduler: [Optional] The scheduler used to run the the input sequence on. Returns: Observable stream. """ def subscribe(observer, scheduler=None): scheduler = scheduler or new_thread_scheduler result: List[str] = [] last = scheduler.now def add_timespan(): nonlocal last
new_position = None # Relative positioning send_gcode(b'G91') # Rapid move send_gcode('G1 X{:.3f} Y{:.3f} F{}'.format(dx, -dy, FEEDRATE).encode('ascii')) consumer_thread = threading.Thread(target=execute_move) consumer_thread.daemon = True consumer_thread.start() def move_printer(delta): global cv, new_position if np.linalg.norm(delta) > MIN_NORM: dx, dy = np.array(delta) * MAX_REACH print(dx, dy) with cv: new_position = dx, dy cv.notify() joystick_positions \ .filter(lambda pos: all(val is not None for val in pos)) \ .combine_latest(Observable.interval(20), lambda a,b: a) \ .observe_on(NewThreadScheduler()) \ .subscribe(on_next=move_printer) input("Press any key to exit\n")