def snooze(self, interval: Optional[float] = None): self._runtime_snooze_interval = interval or self.snooze_interval self.state = AlarmState.SNOOZED self.stop_audio() get_bus().post( AlarmSnoozedEvent(name=self.name, interval=self._runtime_snooze_interval))
def handler(response): get_bus().post(ResponseEvent(response_text=response)) if not self.interactions: self.interactions.append({'response': response}) else: self.interactions[-1]['response'] = response
def on_modified(self, event): file_info = self._monitored_files.get(event.src_path) if not file_info: return try: file_size = os.path.getsize(event.src_path) except OSError as e: logger.warning('Could not get the size of {}: {}'.format( event.src_path, str(e))) return if file_info.pos > file_size: logger.warning( 'The size of {} been unexpectedly decreased from {} to {} bytes' .format(event.src_path, file_info.pos, file_size)) file_info.pos = 0 try: with file_info.lock, open(event.src_path, 'r') as f: f.seek(file_info.pos) for line in f.readlines(): evt = self._build_event(file=event.src_path, line=line) if evt and (not file_info.last_timestamp or evt.args['time'] >= file_info.last_timestamp): get_bus().post(evt) file_info.last_timestamp = evt.args['time'] file_info.pos = f.tell() except OSError as e: logger.warning('Error while reading from {}: {}'.format( self.resource.path, str(e)))
def play(self, resource, subtitles=None, **args): """ Play a resource. :param resource: Resource to play - can be a local file or a remote URL :type resource: str :param subtitles: Path to optional subtitle file :type subtitles: str :param args: Extra runtime arguments that will be passed to the mpv executable as a key-value dict (keys without `--` prefix) :type args: dict[str,str] """ get_bus().post(MediaPlayRequestEvent(resource=resource)) self._init_mpv(args) if subtitles: args['sub_file'] = self.get_subtitles_file(subtitles) resource = self._get_resource(resource) if resource.startswith('file://'): resource = resource[7:] self._player.play(resource) return self.status()
def handler(sender, path, interface, signal, params): get_bus().post( DbusSignalEvent(bus=bus, signal=signal, path=path, interface=interface, sender=sender, params=params))
def stop_conversation(self): if self.assistant: self.assistant.play_response = False if self.conversation_stream: self.conversation_stream.stop_playback() self.conversation_stream.stop_recording() get_bus().post(ConversationEndEvent(assistant=self))
def _post_event(self, connection: Connection, output_event_type: Type[IRCEvent], **kwargs): if isinstance(connection, ServerConnection): kwargs['server'] = connection.server kwargs['port'] = connection.port kwargs['connected'] = connection.connected kwargs['alias'] = self.alias event = output_event_type(**kwargs) get_bus().post(event)
def kill_process(self): """ The first created tunnel instance also starts the ``ngrok`` process. The process will stay alive until the Python interpreter is stopped or this action is invoked. """ from pyngrok import ngrok proc = ngrok.get_ngrok_process() assert proc and proc.proc, 'The ngrok process is not running' proc.proc.kill() get_bus().post(NgrokProcessStoppedEvent())
def stop(self): """ Turns off the motors """ self._direction = None if self._drive_thread: self._drive_thread.join() get_bus().post(ZeroborgStopEvent()) return {'status': 'stopped'}
def hndl(*args): ws = args[0] if len(args) > 1 else self._ws_app data = json.loads(args[1] if len(args) > 1 else args[0]) output_event = None self._send_ack(ws, data) if data['type'] == 'events_api': event = data.get('payload', {}).get('event', {}) event_args = {} if event['type'] == 'app_mention': output_event = SlackAppMentionReceivedEvent( text=event['text'], user=event['user'], channel=event['channel'], team=event['team'], timestamp=event['event_ts'], icons=event.get('icons'), blocks=event.get('blocks')) elif event['type'] == 'message': msg = event.copy() prev_msg = event.get('previous_message') event_type = SlackMessageReceivedEvent if event.get('subtype') == 'message_deleted': msg = prev_msg event_type = SlackMessageDeletedEvent event_args['timestamp'] = event['deleted_ts'] else: event_args['timestamp'] = msg.get('ts') if event.get('subtype') == 'message_changed': msg = msg.get('message', msg) event_args['previous_message'] = prev_msg event_type = SlackMessageEditedEvent event_args.update({ 'text': msg.get('text'), 'user': msg.get('user'), 'channel': msg.get('channel', event.get('channel')), 'team': msg.get('team'), 'icons': msg.get('icons'), 'blocks': msg.get('blocks'), }) output_event = event_type(**event_args) if output_event: get_bus().post(output_event)
def handler(response): get_bus().post( ResponseEvent(assistant=self, response_text=response)) if not self.interactions: self.interactions.append({'response': response}) else: self.interactions[-1]['response'] = response if self.tts_plugin: tts = get_plugin(self.tts_plugin) tts.say(response, **self.tts_args)
def process_text(self, text: str) -> None: if (not text and self._current_text) or (text and text == self._current_text): self.on_speech_detected(self._current_text) self._current_text = '' else: if text: if not self._current_text: get_bus().post(SpeechStartedEvent()) self.logger.info( 'Intermediate speech results: [{}]'.format(text)) self._current_text = text
def fire_event(self, event): """ Fires an event (instance of :class:`platypush.message.event.Event` or a subclass) to the internal bus and triggers any handler callback associated to the event type or any of its super-classes. :param event: Event to fire :type event: :class:`platypush.message.event.Event` or a subclass """ def hndl_thread(): hndl(event) from platypush.backend import Backend from platypush.context import get_bus bus = self.bus if isinstance(self, Backend) else get_bus() if not bus: self.logger.warning( 'No bus available to post the event: {}'.format(event)) else: bus.post(event) handlers = set() for cls in inspect.getmro(event.__class__): if cls in self._event_handlers: handlers.update(self._event_handlers[cls]) for hndl in handlers: threading.Thread(target=hndl_thread).start()
def Post(self, msg: dict): """ This method accepts a message as a dictionary (either representing a valid request or an event) and either executes it (request) or forwards it to the application bus (event). :param msg: Request or event, as a dictionary. :return: The return value of the request, or 0 if the message is an event. """ msg = self._parse_msg(msg) if isinstance(msg, Request): ret = run(msg.action, **msg.args) if ret is None: ret = '' # DBus doesn't like None return types return ret elif isinstance(msg, Event): get_bus().post(msg) return 0
def streaming_thread(): try: with sd.InputStream(samplerate=sample_rate, device=device, channels=channels, callback=audio_callback, dtype=dtype, latency=latency, blocksize=blocksize): with open(fifo, 'wb') as audio_queue: self.start_recording() get_bus().post(SoundRecordingStartedEvent()) self.logger.info( 'Started recording from device [{}]'.format( device)) recording_started_time = time.time() while self._get_recording_state() != RecordingState.STOPPED \ and (duration is None or time.time() - recording_started_time < duration): while self._get_recording_state( ) == RecordingState.PAUSED: self.recording_paused_changed.wait() get_args = { 'block': True, 'timeout': max( 0, duration - (time.time() - recording_started_time)), } if duration is not None else {} data = q.get(**get_args) if not len(data): continue audio_queue.write(data) except queue.Empty: self.logger.warning( 'Recording timeout: audio callback failed?') finally: self.stop_recording() get_bus().post(SoundRecordingStoppedEvent())
def recording_thread(self, block_duration: Optional[float] = None, block_size: Optional[int] = None, input_device: Optional[str] = None) -> None: """ Recording thread. It reads raw frames from the audio device and dispatches them to ``detection_thread``. :param block_duration: Audio blocks duration. Specify either ``block_duration`` or ``block_size``. :param block_size: Size of the audio blocks. Specify either ``block_duration`` or ``block_size``. :param input_device: Input device """ assert (block_duration or block_size) and not (block_duration and block_size), \ 'Please specify either block_duration or block_size' if not block_size: block_size = int(self.rate * self.channels * block_duration) self.before_recording() self.logger.debug('Recording thread started') device = self._get_input_device(input_device) self._input_stream = sd.InputStream(samplerate=self.rate, device=device, channels=self.channels, dtype='int16', latency=0, blocksize=block_size) self._input_stream.start() self.on_recording_started() get_bus().post(SpeechDetectionStartedEvent()) while self._input_stream: try: frames = self._input_stream.read(block_size)[0] except Exception as e: self.logger.warning( 'Error while reading from the audio input: {}'.format( str(e))) continue self._audio_queue.put(frames) get_bus().post(SpeechDetectionStoppedEvent()) self.on_recording_ended() self.logger.debug('Recording thread terminated')
def _animate_thread(lights): set_thread_name('HueAnimate') get_bus().post( LightAnimationStartedEvent(lights=lights, groups=groups, animation=animation)) lights = _initialize_light_attrs(lights) animation_start_time = time.time() stop_animation = False while not stop_animation and not (duration and time.time() - animation_start_time > duration): try: if animation == self.Animation.COLOR_TRANSITION: for (light, attrs) in lights.items(): self.logger.debug('Setting {} to {}'.format( light, attrs)) self.bridge.set_light(light, attrs) elif animation == self.Animation.BLINK: conf = lights[list(lights.keys())[0]] self.logger.debug('Setting lights to {}'.format(conf)) if groups: self.bridge.set_group([g.name for g in groups], conf) else: self.bridge.set_light(lights.keys(), conf) if transition_seconds: time.sleep(transition_seconds) stop_animation = _should_stop() except Exception as e: self.logger.warning(e) time.sleep(2) lights = _next_light_attrs(lights) get_bus().post( LightAnimationStoppedEvent(lights=lights, groups=groups, animation=animation)) self.animation_thread = None
def play(self, resource, subtitles=None, fullscreen=None, volume=None): """ Play a resource. :param resource: Resource to play - can be a local file or a remote URL :type resource: str :param subtitles: Path to optional subtitle file :type subtitles: str :param fullscreen: Set to explicitly enable/disable fullscreen (default: `fullscreen` configured value or False) :type fullscreen: bool :param volume: Set to explicitly set the playback volume (default: `volume` configured value or 100) :type fullscreen: bool """ get_bus().post(MediaPlayRequestEvent(resource=resource)) resource = self._get_resource(resource) if resource.startswith('file://'): resource = resource[len('file://'):] self._init_vlc(resource) if subtitles: if subtitles.startswith('file://'): subtitles = subtitles[len('file://'):] self._player.video_set_subtitle_file(subtitles) self._player.play() if self.volume: self.set_volume(volume=self.volume) if fullscreen or self._default_fullscreen: self.set_fullscreen(True) if volume is not None or self._default_volume is not None: self.set_volume( volume if volume is not None else self._default_volume) return self.status()
def _fire_event(self, event, event_hndl): bus = get_bus() bus.post(event) try: if event_hndl: event_hndl(event) except Exception as e: self.logger.warning( 'Exception in torrent event handler: {}'.format(str(e))) self.logger.exception(e)
def on_speech_detected(self, speech: str) -> None: """ Hook called when speech is detected. Triggers the right event depending on the current context. :param speech: Detected speech. """ speech = speech.strip() if speech in self.hotwords: event = HotwordDetectedEvent(hotword=speech) if self.conversation_timeout: self._conversation_event.set() threading.Timer( self.conversation_timeout, lambda: self._conversation_event.clear()).start() elif self._conversation_event.is_set(): event = ConversationDetectedEvent(speech=speech) else: event = SpeechDetectedEvent(speech=speech) get_bus().post(event)
def _callback(): while True: if self.state == AlarmState.SHUTDOWN: break if self.is_enabled(): get_bus().post(AlarmStartedEvent(name=self.name)) if self.audio_plugin and self.audio_file: self.play_audio() self.actions.execute() time.sleep(10) sleep_time = None if self.state == AlarmState.RUNNING: while True: state = self._get_audio_plugin().status().output.get( 'state') if state == PlayerState.STOP.value: if self.state == AlarmState.SNOOZED: sleep_time = self._runtime_snooze_interval else: self.state = AlarmState.WAITING break else: time.sleep(10) if self.state == AlarmState.SNOOZED: sleep_time = self._runtime_snooze_interval elif self.get_next() is None: self.state = AlarmState.SHUTDOWN break if not sleep_time: sleep_time = self.get_next() - time.time( ) if self.get_next() else 10 time.sleep(sleep_time)
def drive(self, direction): """ Drive the motors in a certain direction. """ def _run(): try: while self._direction: try: if self._direction in self.directions: self._motors = self.directions[self._direction] else: self.logger.warning( 'Invalid direction {}: stopping motors'.format( self._direction)) except Exception as e: self.logger.error( 'Error on _get_direction_from_sensors: {}'.format( str(e))) break for i, power in enumerate(self._motors): method = getattr(self.zb, 'SetMotor{}'.format(i + 1)) method(power) finally: self.zb.MotorsOff() self.zb.ResetEpo() self._drive_thread = None self._direction = direction.lower() if not self._drive_thread: drive_thread = threading.Thread(target=_run) drive_thread.start() self._drive_thread = drive_thread get_bus().post( ZeroborgDriveEvent(direction=self._direction, motors=self.directions[self._direction])) return {'status': 'running', 'direction': direction}
def detect_speech(self, frames: tuple) -> str: text, is_endpoint = self._stt_engine.process(frames) text = text.strip() if text: if not self._speech_in_progress.is_set(): self._speech_in_progress.set() get_bus().post(SpeechStartedEvent()) self._current_text += ' ' + text.strip() if is_endpoint: text = self._stt_engine.flush().strip().strip() if text: self._current_text += ' ' + text self._speech_in_progress.clear() if self._current_text: self.on_speech_detected(self._current_text) self._current_text = '' return self._current_text
def get_measurement(self): """ Extends :func:`.GpioSensorPlugin.get_measurement` :returns: Distance measurement as a scalar (in mm): """ try: distance = self._get_data() bus = get_bus() bus.post(DistanceSensorEvent(distance=distance, unit='mm')) return distance except TimeoutError as e: self.logger.warning(str(e)) return except Exception as e: self.close() raise e
def callback(log: NgrokLog): if log.msg == 'client session established': get_bus().post(NgrokProcessStartedEvent()) elif log.msg == 'started tunnel': # noinspection PyUnresolvedReferences tunnel = dict(name=log.name, url=log.url, protocol=log.url.split(':')[0]) self._active_tunnels_by_url[tunnel['url']] = tunnel get_bus().post(NgrokTunnelStartedEvent(**tunnel)) elif (log.msg == 'end' and int(getattr(log, 'status', 0)) == 204 and getattr(log, 'pg', '').startswith('/api/tunnels')): # noinspection PyUnresolvedReferences tunnel = log.pg.split('/')[-1] tunnel = self._active_tunnels_by_name.pop( tunnel, self._active_tunnels_by_url.pop(tunnel, None)) if tunnel: get_bus().post(NgrokTunnelStoppedEvent(**tunnel)) elif log.msg == 'received stop request': get_bus().post(NgrokProcessStoppedEvent())
def discover_service(self, service: str, timeout: Optional[int] = 5) -> Dict[str, Any]: """ Find all the services matching the specified type. :param service: Service type (e.g. ``_http._tcp.local.``). :param timeout: Browser timeout in seconds (default: 5). Specify None for no timeout - in such case the discovery will loop forever and generate events upon service changes. :return: A ``service_type -> [service_names]`` mapping. Example: .. code-block:: json { "host1._platypush-http._tcp.local.": { "type": "_platypush-http._tcp.local.", "name": "host1._platypush-http._tcp.local.", "info": { "addresses": ["192.168.1.11"], "port": 8008, "host_ttl": 120, "other_ttl": 4500, "priority": 0, "properties": { "name": "Platypush", "vendor": "Platypush", "version": "0.13.2" }, "server": "host1._platypush-http._tcp.local.", "weight": 0 } } } """ assert not self._discovery_in_progress, 'A discovery process is already running' self._discovery_in_progress = True evt_queue = queue.Queue() zc = Zeroconf() listener = ZeroconfListener(evt_queue=evt_queue) browser = ServiceBrowser(zc, service, listener) discovery_start = time.time() services = {} try: while timeout and time.time() - discovery_start < timeout: to = discovery_start + timeout - time.time( ) if timeout else None try: evt = evt_queue.get(block=True, timeout=to) if isinstance(evt, ZeroconfServiceAddedEvent) or isinstance( evt, ZeroconfServiceUpdatedEvent): services[evt.service_name] = { 'type': evt.service_type, 'name': evt.service_name, 'info': evt.service_info, } elif isinstance(evt, ZeroconfServiceRemovedEvent): if evt.service_name in services: del services[evt.service_name] get_bus().post(evt) except queue.Empty: if not services: self.logger.warning( 'No such service discovered: {}'.format(service)) finally: browser.cancel() zc.close() self._discovery_in_progress = False return services
def handler(on): get_bus().post( GoogleDeviceOnOffEvent(device_id=self.device_id, device_model_id=self.device_model_id, on=on))
def handler(volume): get_bus().post(VolumeChangedEvent(assistant=self, volume=volume))
def handler(phrase): get_bus().post(SpeechRecognizedEvent(assistant=self, phrase=phrase)) self.interactions.append({'request': phrase})
def handler(with_follow_on_turn): get_bus().post( ConversationEndEvent(assistant=self, with_follow_on_turn=with_follow_on_turn))