Exemplo n.º 1
0
    def test_thread_locks(self, mock_get):
        url = "http://httpbin.org/get"
        resp = MagicMock()
        resp.status_code = 200
        resp.json = MagicMock(return_value={'url': url})
        mock_get.return_value = resp
        block = HTTPRequests()

        def _dummy_process_signals(signals):
            Event().wait()

        self.configure_block(block, {
            "http_method": "GET",
            "url": url
        })
        block.logger = MagicMock()
        block._locked_process_signals = MagicMock(
            side_effect = _dummy_process_signals
        )
        block.start()
        # first 5 signals should be blocked, sixth signal is dropped and logs
        # warning
        for r in range(5):
            spawn(
                block.process_signals, [Signal({'input_attr': 'value'})]
            )
        block.process_signals([Signal({'input_attr': 'value'})])
        self.assertEqual(block.logger.warning.call_count, 1)
        self.assertEqual(block._locked_process_signals.call_count, 1)
        block.stop()
Exemplo n.º 2
0
 def start(self):
     super().start()
     # Like crontab, check to run jobs every minute
     self._job = Job(self._cron, timedelta(minutes=1), True)
     # Run a cron cycle immediately, but in a new thread since it
     # might take some time and we don't want it to hold up start
     spawn(self._cron)
Exemplo n.º 3
0
 def start(self):
     super().start()
     # Like crontab, check to run jobs every minute
     self._job = Job(self._cron, timedelta(minutes=1), True)
     # Run a cron cycle immediately, but in a new thread since it
     # might take some time and we don't want it to hold up start
     spawn(self._cron)
Exemplo n.º 4
0
 def notify_signals(self, block, signals, output_id):
     if not signals:
         print("Block {} notified an empty signal list".format(block))
         return
     from_block_name = block.name()
     all_receivers = [
         block["receivers"] for block in self._execution
         if block["id"] == from_block_name
     ][0]
     if not all_receivers:
         return
     # If output_id isn't in receivers, then use default output
     receivers = all_receivers.get(
         output_id, all_receivers.get("__default_terminal_value", []))
     for receiver in receivers:
         receiver_id = receiver["id"]
         input_id = receiver["input"]
         to_block = self._blocks[receiver_id]
         receiver_name = to_block.name()
         print("{} -> {}".format(from_block_name, receiver_name))
         try:
             cloned_signals = deepcopy(signals)
         except:
             cloned_signals = copy(signals)
         if input_id == "__default_terminal_value":
             # don't include input_id if it's default terminal
             if self._synchronous:
                 to_block.process_signals(cloned_signals)
             else:
                 spawn(to_block.process_signals, cloned_signals)
         else:
             if self._synchronous:
                 to_block.process_signals(cloned_signals, input_id)
             else:
                 spawn(to_block.process_signals, cloned_signals, input_id)
    def test_persisted_jobs_always_schedule(self):
        """Persisted timeout jobs are not cancelled before they schedule"""

        class TestSignalTimeout(SignalTimeout):

            def __init__(self):
                super().__init__()
                self.event = Event()
                self.schedule_count = 0
                self.cancel_count = 0
                self.latest_signal = None

            def _schedule_timeout_job(self, signal, group, interval, repeatable):
                super()._schedule_timeout_job(
                    signal, group, interval, repeatable)
                self.schedule_count += 1

            def _cancel_timeout_jobs(self, group):
                super()._cancel_timeout_jobs(group)
                self.cancel_count += 1

            def process_signals(self, signals, from_test=False):
                super().process_signals(signals)
                self.latest_signal = signals[-1]
                if from_test:
                    self.event.set()

        block = TestSignalTimeout()
        # Load from persistence
        persisted_jobs = defaultdict(dict)
        persisted_jobs[1][timedelta(seconds=10)] = {
            "signal": Signal({"group": 1}), "repeatable": True}
        persisted_jobs[2][timedelta(seconds=10)] = {
            "signal": Signal({"group": 2}), "repeatable": True}
        block._jobs = persisted_jobs
        self.configure_block(block, {
            "intervals": [{
                "interval": {"seconds": 10},
                "repeatable": True
            }],
            "group_by": "{{ $group }}"})
        # This signal should not cancel the persisted job before it's scheduled
        spawn(block.process_signals, [Signal({
            "group": 2, "from_spawn": True
        })], from_test=True)
        self.assertEqual(block.schedule_count, 0)
        self.assertEqual(block.cancel_count, 0)
        block.start()
        block.event.wait(1)
        # 2 scheduled persisted jobs and one scheduled processed signal
        self.assertEqual(block.schedule_count, 3)
        # Processed signal cancels one of the scheduled jobs
        self.assertEqual(block.cancel_count, 3)
        # Make sure the last signal processed was the one from the spawn
        self.assertTrue(block.latest_signal.from_spawn)
Exemplo n.º 6
0
 def _cron(self):
     """ Called every minute to check if cron job should notify signals """
     self.logger.debug("Checking if cron emit should run")
     if (self.utc()):
         now = datetime.utcnow()
     else:
         now = datetime.now()
     now = [str(now.minute),
            str(now.hour),
            str(now.day),
            str(now.month),
            str(now.weekday())]
     if self._check_cron(now):
         spawn(self._emit)
Exemplo n.º 7
0
 def _cron(self):
     """ Called every minute to check if cron job should notify signals """
     self.logger.debug("Checking if cron emit should run")
     if (self.utc()):
         now = datetime.utcnow()
     else:
         now = datetime.now()
     now = [
         str(now.minute),
         str(now.hour),
         str(now.day),
         str(now.month),
         str(now.weekday())
     ]
     if self._check_cron(now):
         spawn(self._emit)
    def test_persisted_jobs_always_schedule(self):
        """Persisted timeout jobs are not cancelled before they schedule"""

        class TestSignalTimeout(SignalTimeout):

            def __init__(self):
                super().__init__()
                self.event = Event()
                self.schedule_count = 0
                self.cancel_count = 0

            def _schedule_timeout_job(self, signal, key, interval, repeatable):
                super()._schedule_timeout_job(
                    signal, key, interval, repeatable)
                self.schedule_count += 1

            def _cancel_timeout_jobs(self, key):
                super()._cancel_timeout_jobs(key)
                self.cancel_count += 1

            def process_signals(self, signals):
                super().process_signals(signals)
                self.event.set()

        block = TestSignalTimeout()
        # Load from persistence
        persisted_jobs = defaultdict(dict)
        persisted_jobs[1][timedelta(seconds=0.1)] = Signal({"group": 1})
        persisted_jobs[2][timedelta(seconds=0.1)] = Signal({"group": 2})
        block._repeatable_jobs = persisted_jobs
        self.configure_block(block, {
            "intervals": [{
                "interval": {"milliseconds": 100},
                "repeatable": True
            }],
            "group_by": "{{ $group }}"})
        # This signal should not cancel the persisted job before it's scheduled
        spawn(block.process_signals, [Signal({"group": 2})])
        self.assertEqual(block.schedule_count, 0)
        self.assertEqual(block.cancel_count, 0)
        block.start()
        block.event.wait(1)
        # 2 scheduled persisted jobs and one scheduled processed signal
        self.assertEqual(block.schedule_count, 3)
        # Processed signal cancels one of the scheduled jobs
        self.assertEqual(block.cancel_count, 1)
    def test_timeout_race_condition(self):
        """ Test that race conditions from timeout and process are handled """
        class SlowNotifyBlock(SignalTimeout):

            def notify_signals(self, signals):
                sleep(0.2)
                super().notify_signals(signals)

        block = SlowNotifyBlock()
        self.configure_block(block, {
            "intervals": [
                {
                    "interval": {"seconds": 10},
                    "repeatable": False,
                },
            ],
            "group_by": "{{ $group }}",
        })
        block.start()
        # Simulate a timeout happening, but this one will be slow to allow
        # another process signals call to come in while it's "timing out"
        spawn(
            block._timeout_job,
            Signal({"2pi": 6.28, "group": "foo"}),
            "foo",
            timedelta(seconds=10))
        block.process_signals([Signal({"pi": 3.14, "group": "foo"})])
        sleep(0.5)
        # After 1 second we should only have seen the signal notified
        # from the timeout job spawn call
        self.assert_num_signals_notified(1, block)
        # After 10 more seconds we shouldn't have any more notifications if
        # we keep the block alive by continuing to process signals on it
        JumpAheadScheduler.jump_ahead(5)
        block.process_signals([Signal({"pi": 3.14, "group": "foo"})])
        JumpAheadScheduler.jump_ahead(5)
        block.process_signals([Signal({"pi": 3.14, "group": "foo"})])
        sleep(0.5)
        self.assert_num_signals_notified(1, block)
        block.stop()
Exemplo n.º 10
0
 def rediscover(self):
     self.logger.info('Rediscover command recived!')
     if self._discovering:
         status = 'Discover already in progress'
     else:
         status = 'OK'
         if self.device:
             status += ', dropped device \"{}\" with MAC {}'\
                     .format(self.ip, self.mac)
         self.device = None
         self._thread = spawn(self._discover)
     self.logger.info(status)
     return {'status': status}
 def _connect(self):
     self.logger.debug('Opening HID Device {}'.format(self.device()))
     while not self.file_descriptor:
         try:
             self.file_descriptor = open(self.device(), 'rb')
         except Exception:
             if not self.status.is_set(RunnerStatus.warning):
                 self.set_status('warning')
             msg = 'Unable to open HID Device, trying again in {} seconds'
             self.logger.error(msg.format(self.reconnect_interval))
             sleep(self.reconnect_interval)
     self._kill = False
     self._thread = spawn(self._delimited_reader)
     self.set_status('ok')
Exemplo n.º 12
0
    def test_limit_lock(self, mock_client):
        ''' Test that signals are dropped when the max locks is reached '''
        blk = ModbusTCP()
        self.configure_block(blk, {})
        event = Event()

        def _process_signals(signals):
            event.wait()
            blk.notify_signals(signals)
        blk._locked_process_signals = MagicMock(side_effect=_process_signals)
        blk.logger = MagicMock()
        blk.start()
        for _ in range(5):
            spawn(blk.process_signals, [Signal(), Signal()])
        blk.process_signals([Signal(), Signal()])
        # The last signal logs a warning because limit lock is reached
        self.assertEqual(blk.logger.warning.call_count, 1)
        # Only the first signal gets to call process signals because of lock
        self.assertEqual(blk._locked_process_signals.call_count, 1)
        # Now let the signals waiting for lock get processed and notify them
        event.set()
        sleep(0.1)
        self.assert_num_signals_notified(10)
        blk.stop()
Exemplo n.º 13
0
 def _openSource(self):
     while True:
         self.logger.debug('Opening camera')
         try:
             cam = cv2.VideoCapture(0)
             if cam.isOpened():
                 self.logger.debug('Got Camera!')
                 self._camera = cam
                 self._is_broadcasting.set()
                 break
             self.logger.warning('Failed to open camera, retrying...')
         except:
             self._is_broadcasting.clear()
         sleep(1)
     self._thread = spawn(self._run)
Exemplo n.º 14
0
    def process_signals(self, signals):
        if not self.device:
            self.logger.warning(
                'No TuYa device connected, dropping {} signals'.format(
                    len(signals)))
            if self._discovering:
                return
            else:
                self._thread = spawn(self._discover)
                return
        outgoing_signals = []
        for signal in signals:
            new_signal = self.get_output_signal(
                self.execute_tuya_command(signal), signal)
            outgoing_signals.append(new_signal)

        self.notify_signals(outgoing_signals)
Exemplo n.º 15
0
    def configure(self, context):
        super().configure(context)
        self._build_socket_url_base()
        # Connect to the socket before starting the block
        # This connection won't happen with a retry, so if the socket
        # server is not running, the connection will fail. In this case,
        # if the user has specified that the service should start anyways,
        # attempt to reconnect based off of the given retry strategy.

        try:
            self._connect_to_socket()
        except:
            if self.start_without_server():
                self.logger.info('Could not connect to web socket. Service '
                                 'will be started and this block will attempt '
                                 'to reconnect using given retry strategy.')
                self._disconnect_thread = spawn(self.handle_disconnect)
            else:
                raise
Exemplo n.º 16
0
    def configure(self, context):
        super().configure(context)
        self._build_socket_url_base()
        # Connect to the socket before starting the block
        # This connection won't happen with a retry, so if the socket
        # server is not running, the connection will fail. In this case,
        # if the user has specified that the service should start anyways,
        # attempt to reconnect based off of the given retry strategy.

        try:
            self._connect_to_socket()
        except:
            if self.start_without_server():
                self.logger.info('Could not connect to web socket. Service '
                                 'will be started and this block will attempt '
                                 'to reconnect using given retry strategy.')
                self._disconnect_thread = spawn(self.handle_disconnect)
            else:
                raise
Exemplo n.º 17
0
 def _connect(self):
     self.logger.debug('Connecting to scale device...')
     self._kill = False
     while not self.device and not self._kill:
         try:
             self.device = usb.core.find(
                 idVendor=self.manufacturer_id,
                 idProduct=self.product_id)
             if self.device is None:
                 msg = 'Scale not found, trying again in {} seconds'
                 if not self.status.is_set(RunnerStatus.warning):
                     self.set_status('warning')
                     self.logger.error(
                         msg.format(self.reconnect_interval()))
                 else:
                     self.logger.warning(
                         msg.format(self.reconnect_interval()))
                 sleep(self.reconnect_interval())
                 continue
             self.logger.debug('Device discovered')
             self.device.reset()
             self.logger.debug('Device reset')
             if self.device.is_kernel_driver_active(self.device_interface):
                 self.device.detach_kernel_driver(self.device_interface)
                 self._detached = True
                 self.logger.debug('Detached kernel driver')
             else:
                 self.logger.debug('No active kernel driver found')
             self.device.set_configuration()
             self.logger.debug('Device Configured')
             endpoint = self.device[self.device_interface][(0, 0)][0]
             self._address = endpoint.bEndpointAddress
             self._packet_size = endpoint.wMaxPacketSize
         except:
             self.device = None
             if not self.status.is_set(RunnerStatus.warning):
                 self.set_status('warning')
             msg = 'Unable to connect to scale, trying again in {} seconds'
             self.logger.exception(msg.format(self.reconnect_interval()))
             sleep(self.reconnect_interval())
     self.set_status('ok')
     self._thread = spawn(self._reader)
Exemplo n.º 18
0
 def start(self):
     super().start()
     spawn(self._openSource)
Exemplo n.º 19
0
 def start(self):
     super().start()
     self._thread = spawn(self.run)
Exemplo n.º 20
0
    def _run(self):
        non_motion_timer = self.non_motion_timer()
        stream_video = False
        motion_counter = 0
        avg = None
        try:
            while self._is_broadcasting.is_set():
                # control frame rate
                if self.frame_rate():
                    sleep(1 / self.frame_rate())
                # grab a frame
                success, frame = self._camera.read()
                if not success:
                    self.logger.exception('Failed to grab frame')
                    break
                # detect motion
                motion_detected = False
                base_frame = frame
                gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
                gray = cv2.GaussianBlur(gray, (21, 21), 0)
                # if the average frame is None, initialize it
                if avg is None:
                    self.logger.debug("starting background model...")
                    avg = gray.copy().astype("float")
                    continue
                cv2.accumulateWeighted(gray, avg, 0.5)
                frameDelta = cv2.absdiff(gray, cv2.convertScaleAbs(avg))
                # threshold the delta image, dilate the thresholded image to fill
                # in holes, then find contours on thresholded image
                thresh = cv2.threshold(frameDelta, self.delta_thresh(), 255,
                                       cv2.THRESH_BINARY)[1]
                thresh = cv2.dilate(thresh, None, iterations=2)
                _, cnts, hierarchy = cv2.findContours(thresh.copy(),
                                                      cv2.RETR_EXTERNAL,
                                                      cv2.CHAIN_APPROX_SIMPLE)

                # loop over the contours
                for c in cnts:
                    # if the contour is too small, ignore it
                    if cv2.contourArea(c) < self.min_area():
                        continue

                    # compute the bounding box for the contour, draw it on the frame,
                    # and update the text
                    (x, y, w1, h1) = cv2.boundingRect(c)
                    cv2.rectangle(frame, (x, y), (x + w1, y + h1), (0, 255, 0),
                                  2)
                    motion_detected = True
                if motion_detected:
                    motion_counter += 1
                    # check to see if the number of frames with motion is high enough
                    if motion_counter >= self.min_motion_frames():
                        if not stream_video:
                            self.logger.debug('Motion Detected!')
                        stream_video = True
                        non_motion_timer = self.non_motion_timer()
                        self.notify_signals([Signal({'frame': base_frame})])

                else:
                    if stream_video is True and non_motion_timer > 0:
                        non_motion_timer -= 1
                        self.notify_signals([Signal({'frame': base_frame})])
                    else:
                        if stream_video:
                            self.logger.debug('Stream Stopped')
                        motion_counter = 0
                        stream_video = False
                        non_motion_timer = self.non_motion_timer()
            if self._is_broadcasting.is_set():
                # loop was broken, respawn!
                self._is_broadcasting.clear()
                self._camera = None
                self._thread = spawn(self._openSource)
        except:
            self.logger.exception('Error in video read loop!')
            self._is_broadcasting.clear()
            self._camera = None
            self._thread = spawn(self._openSource)
Exemplo n.º 21
0
 def start(self):
     super().start()
     self._watch_thread = spawn(self.watch_for_changes)
Exemplo n.º 22
0
 def start(self):
     super().start()
     self._job = Job(self._emit, self.interval(), True)
     # Run an emit cycle immediately, but in a new thread since it
     # might take some time and we don't want it to hold up start
     spawn(self._emit)
Exemplo n.º 23
0
 def start(self):
     super().start()
     if self._is_broadcasting.is_set():
         self._thread = spawn(self._run)
Exemplo n.º 24
0
 def start(self):
     #super.start()
     if self._is_broadcasting.is_set():
         spawn(self.run)
Exemplo n.º 25
0
 def start(self):
     super().start()
     spawn(self._connect)
Exemplo n.º 26
0
 def start(self):
     super().start()
     self._thread = spawn(self.gobabygo)
Exemplo n.º 27
0
 def configure(self, context):
     super().configure(context)
     self._thread = spawn(self._discover)
 def start(self):
     super().start()
     self.counter = 0
     self._stop_event.clear()
     spawn(self.run)
Exemplo n.º 29
0
 def start(self):
     super().start()
     self._job = Job(self._emit, self.interval(), True)
     # Run an emit cycle immediately, but in a new thread since it
     # might take some time and we don't want it to hold up start
     spawn(self._emit)