コード例 #1
0
ファイル: clock.py プロジェクト: fpierfed/kivy
 def __init__(self, interupt_next_only=False, **kwargs):
     super(ClockBaseInterruptBehavior, self).__init__(**kwargs)
     self._event = MultiprocessingEvent() if PY2 else ThreadingEvent()
     if async_event is not None:
         self._async_event = async_event()
     self.interupt_next_only = interupt_next_only
     self._get_min_timeout_func = self.get_min_timeout
コード例 #2
0
 def __init__(self, host, port, sensor):
     """Initialize Fritz!Box monitor instance."""
     self.host = host
     self.port = port
     self.connection = None
     self.stopped = ThreadingEvent()
     self._sensor = sensor
コード例 #3
0
ファイル: t_messagebus.py プロジェクト: mfkiwl/lofar-1
    def test_buslistener_handling_connection_loss(self):
        msg_handled_event = ThreadingEvent()

        class SynchonizingHandler(AbstractMessageHandler):
            def handle_message(self, msg: LofarMessage):
                logger.info("handle_message(%s) ... setting msg_handled_event", msg)
                msg_handled_event.set()

        with BusListenerJanitor(BusListener(handler_type=SynchonizingHandler,
                                            exchange=self.tmp_exchange.address)) as listener:
            with ToBus(self.tmp_exchange.address) as tobus:
                # send test message
                tobus.send(EventMessage())

                # wait until mesage is handled...
                self.assertTrue(msg_handled_event.wait(2))
                msg_handled_event.clear()

                # magic lookup of the listeners receiver...
                frombus = list(listener._threads.values())[0]['receiver']
                # ... to force server-side connection loss
                self._close_connection_of_bus_on_broker(frombus)

                # send another test message...
                tobus.send(EventMessage())

                # listener should have handled the 2ns msg as well, even though the connection was broken
                # thanks to auto reconnect
                self.assertTrue(msg_handled_event.wait(2))
コード例 #4
0
ファイル: memory_file.py プロジェクト: krzysztofz1/gateway
    def __init__(self, master_communicator=INJECTED, pubsub=INJECTED):
        # type: (CoreCommunicator, PubSub) -> None
        """
        Initializes the MemoryFile instance, reprensenting read/write to EEPROM and FRAM
        """
        if not master_communicator:
            raise RuntimeError('Could not inject argument: core_communicator')

        self._core_communicator = master_communicator
        self._pubsub = pubsub

        self._eeprom_cache = {}  # type: Dict[int, bytearray]
        self._fram_cache = {}  # type: Dict[int, Tuple[float, bytearray]]

        # The write cache is a per-thread/per-type cache of all changes that need to be written that has the page
        # as key, and a list of tuples as value, where the tuples holds the start byte and contents
        self._write_cache = {
        }  # type: Dict[int, Dict[str, Dict[int, Dict[int, int]]]]
        self._write_cache_lock = {}  # type: Dict[int, Lock]
        self._select_write_cache_lock = Lock()
        self._activate_lock = Lock()

        self._eeprom_change_callback = None  # type: Optional[Callable[[], None]]
        self._self_activated = False
        self._activation_event = ThreadingEvent()

        self._core_communicator.register_consumer(
            BackgroundConsumer(CoreAPI.event_information(), 0,
                               self._handle_event))
コード例 #5
0
 def __init__(self, host: str, port: int,
              sensor: FritzBoxCallSensor) -> None:
     """Initialize Fritz!Box monitor instance."""
     self.host = host
     self.port = port
     self.connection: FritzMonitor | None = None
     self.stopped = ThreadingEvent()
     self._sensor = sensor
コード例 #6
0
ファイル: mesh_network.py プロジェクト: aleph2c/miros
    def __init__(self, user, password, port=5672, routing_key=None):
        # create a connection and a direct exchange called 'mirror', see
        # NetworkTool.exchange_name on this ip
        self.connection = NetworkTool.get_blocking_connection(
            user, password, NetworkTool.get_working_ip_address(), port)
        self.channel = self.connection.channel()
        self.channel.exchange_declare(exchange=NetworkTool.exchange_name,
                                      exchange_type='topic')

        # destroy the rabbitmq queue when done
        result = self.channel.queue_declare(exclusive=True)
        self.queue_name = result.method.queue

        self.channel.queue_bind(exchange=NetworkTool.exchange_name,
                                queue=self.queue_name,
                                routing_key=routing_key)

        # The 'start_consuming' method of the pika library will block the program.
        # for this reason we will put it in it's own thread so that it does not harm
        # our program flow, to communicate to it we use an Event from the threading
        # class
        self.task_run_event = ThreadingEvent()
        self.task_run_event.set()

        # We provide a default message callback, but it is more than likely that the
        # client will register their own (why else use this class?)
        self.live_callback = self.default_callback
        # print(' [x] Waiting for messages. To exit press CTRL-C')

        # We wrap the tunable callback with decryption and a serial decoder
        # this way the client doesn't have to know about this complexity
        @NetworkTool.decrypt
        @NetworkTool.deserialize
        def callback(ch, method, properties, body):
            self.live_callback(ch, method, properties, body)

        # Register the above callback with the queue, turn off message
        # acknowledgements
        self.channel.basic_consume(callback,
                                   queue=self.queue_name,
                                   no_ack=True)
コード例 #7
0
ファイル: memory_file.py プロジェクト: rolaya/gateway
    def __init__(self, memory_type, master_communicator=INJECTED, pubsub=INJECTED):
        # type: (str, CoreCommunicator, PubSub) -> None
        """
        Initializes the MemoryFile instance, reprensenting one of the supported memory types.
        It provides caching for EEPROM, and direct write/read through for FRAM
        """
        if not master_communicator:
            raise RuntimeError('Could not inject argument: core_communicator')

        self._core_communicator = master_communicator
        self._pubsub = pubsub
        self.type = memory_type
        self._cache = {}  # type: Dict[int, bytearray]
        self._eeprom_change_callback = None  # type: Optional[Callable[[], None]]
        self._pages, self._page_length = MemoryFile.SIZES[memory_type]  # type: int, int
        self._self_activated = False
        self._dirty = False
        self._activation_event = ThreadingEvent()

        if memory_type == MemoryTypes.EEPROM:
            self._core_communicator.register_consumer(
                BackgroundConsumer(CoreAPI.event_information(), 0, self._handle_event)
            )
コード例 #8
0
ファイル: process.py プロジェクト: mnimmny/vgs-satellite
    def run(self):
        # We need a brand new event loop for child process since we have to
        # use fork process start method.
        loop = asyncio.new_event_loop()
        asyncio.set_event_loop(loop)

        proxy_logging.configure(self._event_queue)

        set_context(ProxyContext(mode=self.mode, port=self.port))

        self._should_stop = ThreadingEvent()

        self._command_listener = CommandListener(
            cmd_channel=self._cmd_channel,
            cmd_handler=partial(
                self._handle_command,
                loop=asyncio.get_event_loop(),
            ),
            should_stop=self._should_stop,
        )
        self._command_listener.start()

        self.master = ProxyMaster(self.mode, self.port)
        self.master.view.sig_view_add.connect(self._sig_flow_add)
        self.master.view.sig_view_remove.connect(self._sig_flow_remove)
        self.master.view.sig_view_update.connect(self._sig_flow_update)

        blinker.signal('sat_proxy_started').connect(self._sig_proxy_started)

        self._command_processor = ProxyCommandProcessor(self)

        signal.signal(signal.SIGINT, signal.SIG_IGN)

        audit_logs.subscribe(self._sig_audit_log)

        self.master.run()
コード例 #9
0
ファイル: mesh_network.py プロジェクト: aleph2c/miros
    def __init__(self, user, password, port=5672, encryption_key=None):
        self.rabbit_user = user
        self.rabbit_password = password

        if encryption_key is None:
            self.encryption_key = NetworkTool.encryption_key
        else:
            self.encryption_key = encryption_key

        credentials = pika.PlainCredentials(user, password)

        parameters = \
          pika.ConnectionParameters(
            NetworkTool.get_working_ip_address(), port, '/', credentials)
        # pika.ConnectionParameters(
        #   NetworkTool.get_working_ip_address(), port, '/', credentials,
        #   heartbeat_interval=600, blocked_connection_timeout=300)

        self.connection = pika.BlockingConnection(parameters=parameters)
        self.channel = self.connection.channel()
        self.channel.exchange_declare(exchange='spy', exchange_type='fanout')
        self.channel.exchange_declare(exchange='trace', exchange_type='fanout')

        # create new queues, and ensure they destroy themselves when we disconnect
        # from them
        spy_rx = self.channel.queue_declare(exclusive=True)
        trace_rx = self.channel.queue_declare(exclusive=True)

        # queue names are random, so we need to get their names
        spy_queue_name = spy_rx.method.queue
        trace_queue_name = trace_rx.method.queue

        # bind the exchanges to each of the queues
        self.channel.queue_bind(exchange='spy', queue=spy_queue_name)
        self.channel.queue_bind(exchange='trace', queue=trace_queue_name)

        # The 'start_consuming' method of the pika library will block the program.
        # for this reason we will put it in it's own thread so that it does not harm
        # our program flow, to communicate to it we use an Event from the threading
        # class
        self.task_run_event = ThreadingEvent()
        self.task_run_event.set()

        # make a ForeignHsm to track activity on another machine
        self.foreign_hsm = ForeignHsm()

        self.live_spy_callback = self.default_spy_callback
        self.live_trace_callback = self.default_trace_callback

        @SnoopReceiver.decrypt(self.encryption_key)
        def spy_callback(ch, method, properties, body):
            '''create a spy_callback function received messages in the queue'''
            foreign_spy_item = body
            self.foreign_hsm.append_to_spy(foreign_spy_item)
            self.live_spy_callback(ch, method, properties, body)

        @SnoopReceiver.decrypt(self.encryption_key)
        def trace_callback(ch, method, properties, body):
            '''create a trace_callback function received messages in the queue'''
            foreign_trace_item = body
            self.foreign_hsm.append_to_trace(foreign_trace_item)
            self.live_trace_callback(ch, method, properties, body)

        # register the spy_callback and trace_callback with a queue
        self.channel.basic_consume(spy_callback,
                                   queue=spy_queue_name,
                                   no_ack=True)

        self.channel.basic_consume(trace_callback,
                                   queue=trace_queue_name,
                                   no_ack=True)
コード例 #10
0
 def __init__(self, interupt_next_only=False, **kwargs):
     super(ClockBaseInterruptBehavior, self).__init__(**kwargs)
     self._event = ThreadingEvent()
     self.interupt_next_only = interupt_next_only
     self._get_min_timeout_func = self.get_min_timeout
コード例 #11
0
ファイル: test_service.py プロジェクト: gmega/callblocker
 def __init__(self):
     super().__init__()
     self.running = ThreadingEvent()
     self.should_error = True