示例#1
0
    def test_new_thread_schedule_action(self):
        scheduler = NewThreadScheduler()
        ran = False

        def action(scheduler, state):
            nonlocal ran
            ran = True

        scheduler.schedule(action)

        sleep(0.1)
        assert ran is True
示例#2
0
    def test_new_thread_schedule_action_cancel(self):
        ran = False
        scheduler = NewThreadScheduler()

        def action(scheduler, state):
            nonlocal ran
            ran = True

        d = scheduler.schedule_relative(timedelta(milliseconds=1), action)
        d.dispose()

        sleep(0.1)
        assert ran is False
示例#3
0
    def test_new_thread_schedule_action_due(self):
        scheduler = NewThreadScheduler()
        starttime = default_now()
        endtime = None

        def action(scheduler, state):
            nonlocal endtime
            endtime = default_now()

        scheduler.schedule_relative(timedelta(milliseconds=200), action)

        sleep(0.4)
        assert endtime is not None
        diff = endtime - starttime
        assert diff > timedelta(milliseconds=180)
示例#4
0
    def test_new_thread_schedule_periodic_cancel(self):
        scheduler = NewThreadScheduler()
        period = 0.1
        counter = 4

        def action(state):
            nonlocal counter
            if state:
                counter -= 1
                return state - 1

        disp = scheduler.schedule_periodic(period, action, counter)
        sleep(0.4)
        disp.dispose()
        assert 0 <= counter < 4
示例#5
0
 def test_with_observe_on(self):
     self.source.pipe(
         operators.do_action(lambda x: print(f"Producing {x}")),
         operators.observe_on(NewThreadScheduler()),
         operators.map(self.slow_op),
         operators.do_action(self.stop),
         operators.take_until(self._stop),
     ).run()
示例#6
0
    def test_new_thread_schedule_periodic(self):
        scheduler = NewThreadScheduler()
        gate = threading.Semaphore(0)
        period = 0.05
        counter = 3

        def action(state):
            nonlocal counter
            if state:
                counter -= 1
                return state - 1
            if counter == 0:
                gate.release()

        scheduler.schedule_periodic(period, action, counter)
        gate.acquire()
        assert counter == 0
        def dispose():
            def _async_dispose(*args):
                # prevent join in the same thread.
                self.logger.info("Stopping image acquisition")
                self.acquirer.stop_image_acquisition()
                self.acquirer.destroy()
                self.logger.info("Stopped image acquisition")

            (scheduler or NewThreadScheduler()).schedule(_async_dispose)
示例#8
0
 def step(self):
     last_result = (rx.from_iterable(self.islands).pipe(
         ops.subscribe_on(NewThreadScheduler()),
         ops.flat_map(lambda island: island.epoch(self.epoch_length).pipe(
             ops.last())),
         ops.buffer_with_count(len(self.islands)),
     ).run())
     self.migration()
     self.update_cost(last_result)
示例#9
0
def from_process(worker: Callable,
                 *args,
                 executor: ProcessPoolExecutor = None,
                 **kwargs):
    executor = executor if executor else default_process_executor

    def run_as_process():
        future = executor.submit(worker, *args, **kwargs)
        return future.result()

    return rx.from_callable(run_as_process, NewThreadScheduler())
示例#10
0
    def execute_this_method(self):
        self.stream = Subject()
        url = URL

        websocket.enableTrace(True)
        ws = websocket.WebSocketApp(
            url,
            on_message=self.on_message,
            on_error=self.on_error,
        )
        ws.run_forever()

        react = self.stream.pipe(NewThreadScheduler())
        return react.subscribe()
    def configure_timed_read(self):
        interval = self.config.getfloat("fp50", "interval")

        if interval > 0:
            logger.info("Configuring timed read")
            # enabled
            rx.interval(interval, scheduler=NewThreadScheduler()).pipe(
                operators.flat_map(lambda x: self.control.get_power()),
                operators.map(lambda x: self.upload_power(x)),
                operators.delay(self.config.getfloat("fp50", "query_delay")),
                operators.flat_map(
                    lambda x: self.control.get_internal_temperature()),
                operators.map(lambda x: self.upload_internal_temperature(x)),
                operators.catch(error_handler)).subscribe()
示例#12
0
 def subscribe(
         self,
         observer: Optional[Union[typing.Observer, typing.OnNext]] = None,
         on_error: Optional[typing.OnError] = None,
         on_completed: Optional[typing.OnCompleted] = None,
         on_next: Optional[typing.OnNext] = None,
         *,
         scheduler: Optional[typing.Scheduler] = None) -> typing.Disposable:
     scheduler = scheduler or NewThreadScheduler()
     return super().subscribe(observer,
                              on_error,
                              on_completed,
                              on_next,
                              scheduler=scheduler)
示例#13
0
    def test_create_and_run_all_supported_algorithms(self):
        sys = ActorSystem("multiprocTCPBase", logDefs=log_helper.EVOGIL_LOG_CONFIG)
        test_cases = run_config.algorithms
        for test_case in test_cases:
            with self.subTest(algorithm=test_case):
                algo_factory, _ = prepare(test_case, "ZDT1")
                algorithm = algo_factory()

                simple_simulation = StepsRun(1)
                result = list(
                    simple_simulation.create_job(algorithm)
                    .pipe(ops.subscribe_on(NewThreadScheduler()), ops.to_iterable())
                    .run()
                )
                self.assertEqual(1, len(result))
                self.assertIsInstance(result[0], ProgressMessage)
        sys.shutdown()
示例#14
0
    def run(self):
        print('Starting server {}'.format(self._server))

        self._subscription = rx.create(self._receive).pipe(
            ops.subscribe_on(NewThreadScheduler()),
            ops.map(lambda msg: self._mapper(msg)),
            ops.filter(lambda gfx: gfx is not None),
        ).subscribe(lambda gfx: self._entities.append(gfx))


        print('Start')
        while True:
            with canvas(self._virtual) as draw:
                for entity in self._entities:
                    entity.render(draw)
                    entity.update()

            self._entities[:] = [ent for ent in self._entities if not ent.can_destroy()]
            time.sleep(0.010)
示例#15
0
    def __init__(self, name="pump_control"):
        super().__init__(name)
        self.pump_control_config = self.config["pumpControl"]
        self.control_delay = self.pump_control_config["controlDelay"]
        self.tq = Subject()
        self.scheduler = NewThreadScheduler()
        self.update_subject = Subject()

        def on_next(job):
            try:
                job()
            except Exception as ex:
                self.logger.error(ex)

        self.tq.pipe(operators.observe_on(self.scheduler)).subscribe(
            on_next, lambda ex: self.logger.error(ex),
            lambda: self.serial.close())
        self.state = [0.0, 0.0]
        self.enable_remote_control(True)
示例#16
0
    def run_metaepoch(self):
        node_jobs = []

        for node in self.level_nodes[2]:
            node_jobs.append(node.run_metaepoch())
        for node in self.level_nodes[1]:
            node_jobs.append(node.run_metaepoch())
        for node in self.level_nodes[0]:
            node_jobs.append(node.run_metaepoch())
            # _plot_node(node, 'r', [[0, 1], [0, 3]])
        node_costs = []
        for node_job in node_jobs:
            node_job.pipe(
                ops.subscribe_on(NewThreadScheduler()),
                ops.map(lambda message: self._update_cost(message)), ops.sum(),
                ops.do_action(
                    on_next=lambda cost: node_costs.append(cost))).run()
        # self.cost += max(node_costs)
        self.cost += sum(node_costs)
示例#17
0
    def __init__(self, client: MQTTClientWrapper, name="fp50", config=None):
        super().__init__(name, config=config)
        self.client = client

        self.topic_base = self.config["waterBath"][name]["topicBase"]
        self.topic = self.topic_base + "/setpoint"
        self.message_subject = Subject()

        self.client.subscribe(self.topic_base + "/crystallizer_temperature")
        self.client.subscribe(self.topic_base + "/setpoint")
        self.interval_scheduler = NewThreadScheduler()

        def update(x, scheduler=None):
            self.client.publish(self.topic_base + "/crystallizer_temperature",
                                None)

        rx.interval(self.config["waterBath"][name]["interval"],
                    self.interval_scheduler).subscribe(update)

        def convert(x):
            payloads = [xx[2].payload for xx in x]
            for p in payloads:
                if not p:
                    # skipping conversion request
                    return None

            return {
                # "power": float(payloads[0]),
                # "internal_temperature": float(payloads[1]),
                "crystallizer_temperature": float(payloads[0]),
                "setpoint": float(payloads[1]),
            }

        rx.combine_latest(
            from_callback(
                self.client.message_callback_add)(self.topic_base +
                                                  "/crystallizer_temperature"),
            from_callback(self.client.message_callback_add)(self.topic_base +
                                                            "/setpoint"),
        ).pipe(operators.map(convert),
               operators.filter(lambda x: x is not None),
               operators.debounce(0.6)).subscribe(self.message_subject)
示例#18
0
def main():
    def create_server_graph():
        inputs = [Input(shape=(None,), dtype="bytes")]
        x = inputs[0]
        outputs = [x]
        return Model(inputs=inputs, outputs=outputs)

    def create_client_graph():
        inputs = [Input(shape=(None,), dtype="bytes")]
        x = inputs[0]
        x = ServerSubgraph(
            addr=("localhost", 5678),
            graph=create_server_graph(),
        )(x)
        outputs = [x]
        return Model(inputs=inputs, outputs=outputs)

    client_model = create_client_graph()
    frames = rx.from_iterable(["abc", "def", "ghi"])
    outputs = client_model.to_rx(frames)
    outputs[0].subscribe(print, subscribe_on=NewThreadScheduler())
示例#19
0
    def test_imga_cost_calculation(self):
        final_driver, problem_mod = prepare("IMGA+NSGAII", "ZDT1")

        imga = final_driver()

        steps_run = StepsRun(4)

        total_costs = []
        islands_costs = []

        def on_imga_result(result):
            total_costs.append(result.cost),
            islands_costs.append(
                sum([island.driver.cost for island in imga.islands]))

        steps_run.create_job(imga).pipe(
            ops.subscribe_on(NewThreadScheduler()),
            ops.do_action(on_next=on_imga_result),
        ).run()

        self.assertListEqual(total_costs, islands_costs)
示例#20
0
 def update_scheduler(self):
     self.clear_scheduler_if_running()
     self.schedule_obj = NewThreadScheduler().schedule_relative(
         self.absence_due_second, self.detect_absence)
示例#21
0
import rx
import rx.operators as ops
from rx.scheduler import NewThreadScheduler
import threading
import time

new_thread_scheduler = NewThreadScheduler()
numbers = rx.from_([1, 2, 3, 4], scheduler=new_thread_scheduler)

subscription = numbers.pipe(
    ops.map(lambda i: i * 2),
    ops.map(lambda i: "number is: {}".format(i)),
).subscribe(
    on_next=lambda i: print("on_next({}) {}".format(threading.get_ident(), i)),
    on_error=lambda e: print("on_error({}): {}".format(threading.get_ident(), e
                                                       )),
    on_completed=lambda: print("on_completed({})".format(threading.get_ident())
                               ))

print("main({})".format(threading.get_ident()))
time.sleep(1.0)
示例#22
0
 def accept_conn_rx(item: Tuple[str, socket.socket]) -> Any:
     return rx.from_iterable([item]).pipe(
         ops.observe_on(NewThreadScheduler()),
         ops.map(lambda x: accept_conn(x)),
     )
示例#23
0
        ops.subscribe_on(pool_scheduler),
        ops.map(lambda r: generate_content(r)),
    ).subscribe(
        on_next=dataSubscriber.on_next,
        on_error=dataSubscriber.on_error,
        on_completed=dataSubscriber.on_completed,
    )


if __name__ == "__main__":
    requestA = Request("A", 10 * 1000)
    requestB = Request("B", 10 * 1000)
    requestC = Request("C", 10 * 1000)
    dataSubscriberC = DataSubscriber(requestC)

    scheduler = NewThreadScheduler()
    #optimal_thread_count = multiprocessing.cpu_count()
    pool_schedulerA = scheduler  #NewThreadScheduler()#EventLoopScheduler() #ThreadPoolScheduler(optimal_thread_count)
    runProcess(requestA, pool_schedulerA)
    #pool_schedulerB = EventLoopScheduler()
    #runProcess(requestB, pool_schedulerB)
    pool_schedulerC = scheduler  #EventLoopScheduler()
    runProcessSubscriber(requestC, pool_schedulerC, dataSubscriberC)

    print("running background")
    print("main thread sleep 5 seconds")
    time.sleep(5)
    print("force stop all")
    requestA.stop()
    requestB.stop()
    requestC.stop()
示例#24
0
    if args.enable_monitor_wakeup:
        try:
            enable_monitor_mode(args.interface)
        except:
            raise RuntimeError("Failed while enabling monitor mode")

    print(f"Listening on {args.interface}")

    queue = FileQueue(args.queue_path)

    connection_string = os.environ.get("IOTHUB_DEVICE_CONNECTION_STRING")
    assert len(connection_string) > 0, "IoTHub connection string should not be empty"
    upload_service = AttendanceUploadService.create(connection_string, queue, is_dry_run=False)
    user_querier = UserQuerier(args.configs)
    device_sniffer = DeviceSniffer(user_querier, args.interface)
    state_context_manager = AttendanceStateContextManager(args.configs, device_sniffer.get_observable(), upload_service)
    if args.enable_regular_queue_check:
        regular_check_task = NewThreadScheduler().schedule_periodic(
            args.regular_queue_check_interval, 
            lambda x: send_queued_messages_if_connected_to_internet(upload_service, SimpleConnectionService())
        )
    try:
        device_sniffer.start()
        while True:
            time.sleep(1)

    except KeyboardInterrupt:
        device_sniffer.stop()
        regular_check_task.dispose()
        print("Exiting program...")
示例#25
0
 def test_new_thread_now(self):
     scheduler = NewThreadScheduler()
     diff = scheduler.now - default_now()
     assert abs(diff) < timedelta(milliseconds=5)
示例#26
0
 def test_new_thread_now_units(self):
     scheduler = NewThreadScheduler()
     diff = scheduler.now
     sleep(1.1)
     diff = scheduler.now - diff
     assert timedelta(milliseconds=1000) < diff < timedelta(milliseconds=1300)