示例#1
0
    def __init__(self, master=None, sensor=None, display=None):
        tk.Frame.__init__(self, master, bg='black')
        self._sensor = sensor
        self._display = display
        self.last_time = ""
        self.master = master
        self.toggle_ampm = True
        self.fullscreen = False
        self.run_clock = False
        self._debug_lines = 2
        self._menu_showing = False

        # Screen dimensions
        self.screen_width = self.master.winfo_screenwidth()
        self.screen_height = self.master.winfo_screenheight()
        logger.debug("%d x %d", self.screen_width, self.screen_height)
        geo = "{0}x{1}".format(self.screen_width, self.screen_height)
        logger.debug("Geometry: %s", geo)
        self.master.geometry(geo)
        self.master["bg"] = 'black'

        # Set display brightness on RPi
        DisplayController.set_display_backlight(QConfiguration.backlight)

        # Font size in pixels
        if QConfiguration.fontsize:
            self.font_size = QConfiguration.fontsize
        else:
            # Default to 45% of screen height
            self.font_size = int(0.45 * self.screen_height)

        # This trick hides the cursor
        self.master.config(cursor="none")

        # Fill the whole window
        self.pack(fill=tk.BOTH, expand=1)

        self._createWidgets()

        # Set up escape key as full screen toggle
        self.master.bind("<Escape>", self.toggle_fullscreen)

        # This show set the display to full screen
        self.toggle_fullscreen()

        self.context_menu = ContextMenu(self, height=self.screen_height)

        # Capture left mouse single click anywhere in the Frame
        self.bind("<Button-1>", self._show_context_menu)
示例#2
0
    def execute(self, userdata):
        """
        Execute function called in the state machine

        Key arguments:
        userdata -- state machine userdata object being passed around

        """
        rospy.logdebug("Recognizing Hotword")
        rospy.loginfo(self.output_msg + "\n")

        hotword_client = HotwordClient()

        # Start Video Thread
        threads = []
        display_controller = DisplayController()
        video_thread = Thread(target=display_controller.play_video,
                              args=(None, True))
        video_thread.start()
        threads.append(video_thread)

        # Request hotword
        lang_code = hotword_client.make_request(userdata.lang_code_in)

        # Terminate Video Thread
        rospy.set_param(STOP_THREAD, True)
        for process in threads:
            process.join()
        rospy.set_param(STOP_THREAD, False)

        # Defining current language
        userdata.lang_code_out = lang_code

        return 'listening'
示例#3
0
    def play(self, length, display_controller):
        if len(self.sequence) == 0:
            print("No sequence loaded! Cancel play")
            return

        if is_running_on_pi == False:
            return

        if display_controller is None:
            display_controller = DisplayController()

        self.strip.begin()
        self.start_time = time.time()
        end_time = length + self.start_time

        black = ImageLoader.black()
        # angle_offset_pixels = (int) (PHYSICAL_ANGLE_OFFSET * 360.0 / PIXELS_IN_CIRCLE)
        # print "offsting image by " + str(angle_offset_pixels)
        print("playing sequence for ", length, "seconds")

        current_image = self.sequence[0]
        counter = 0

        last_switch = time.time()
        diff = 0
        if is_running_on_pi:
            while end_time > display_controller.last_update:
                diff = diff + time.time() - last_switch
                last_switch = time.time()

                current_image = self.get_frame()
                display_controller.update()
                angle = display_controller.estimate_angle()

                self.strip.show(current_image.get_sample_by_angle(angle))
                time.sleep(0.0001)

        else:
            while end_time > timing["last_update"]:
                timing["last_update"] = time.time()

        self.stop()
async def main(loop):
    """Main process for script."""
    setup_logging()
    log_git_hash()
    show_details_timeout = getattr(sonos_settings, "show_details_timeout",
                                   None)
    display = DisplayController(loop, sonos_settings.show_details,
                                sonos_settings.show_artist_and_album,
                                show_details_timeout)

    if sonos_settings.room_name_for_highres == "":
        print("No room name found in sonos_settings.py")
        print("You can specify a room name manually below")
        print(
            "Note: manual entry works for testing purposes, but if you want this to run automatically on startup then you should specify a room name in sonos_settings.py"
        )
        print("You can edit the file with the command: nano sonos_settings.py")
        print("")
        sonos_room = input("Enter a Sonos room name for testing purposes>>>  ")
    else:
        sonos_room = sonos_settings.room_name_for_highres
        _LOGGER.info("Monitoring room: %s", sonos_room)

    session = ClientSession()
    sonos_data = SonosData(
        sonos_settings.sonos_http_api_address,
        sonos_settings.sonos_http_api_port,
        sonos_room,
        session,
    )

    async def webhook_callback():
        """Callback to trigger after webhook is processed."""
        await redraw(session, sonos_data, display)

    webhook = SonosWebhook(display, sonos_data, webhook_callback)
    await webhook.listen()

    for signame in ('SIGINT', 'SIGTERM', 'SIGQUIT'):
        loop.add_signal_handler(
            getattr(signal, signame), lambda: asyncio.ensure_future(
                cleanup(loop, session, webhook, display)))

    while True:
        if sonos_data.webhook_active:
            update_interval = WEBHOOK_INTERVAL
        else:
            update_interval = POLLING_INTERVAL

        if time.time() - sonos_data.last_update > update_interval:
            await sonos_data.refresh()
            await redraw(session, sonos_data, display)
        await asyncio.sleep(1)
示例#5
0
def main():
    # Create state machine for display
    display_controller = DisplayController()

    # Start the PIR sensor monitor
    threadinst = None
    if QConfiguration.pirsensor:
        from pir_sensor_thread import SensorThread
        threadinst = SensorThread(notify=display_controller.set_display_state,
                                  pir_pin=QConfiguration.pirpin,
                                  time_off=QConfiguration.timeout,
                                  time_on=QConfiguration.timein)
        threadinst.start()

    # Create main window and run the event loop
    root = tk.Tk()
    app = LumiClockApplication(master=root,
                               sensor=threadinst,
                               display=display_controller)
    root.title('LumiClock')

    # Set up icon
    try:
        if os.name == "posix":
            # Linux or OS X
            root.iconbitmap("lumiclock.xbm")
            logger.debug("Loaded icon lumiclock.xbm")
        elif os.name == "nt":
            # Windows
            root.iconbitmap("lumiclock.ico")
            logger.debug("Loaded icon lumiclock.ico")
    except Exception as ex:
        logger.error(str(ex))

    root.mainloop()

    # Terminate sensor monitor
    if QConfiguration.pirsensor:
        threadinst.terminate()
示例#6
0
    def execute(self, userdata):
        """
        Execute function called in the state machine

        Key arguments:
        userdata -- state machine userdata object being passed around

        """
        rospy.logdebug("Recognizing Sentence")
        rospy.loginfo(self.output_msg + "\n")

        # Audio out
        self.output_msg = \
            df[(df['Type'] == self.state) & (df['Language'] == userdata.lang_code_in)]['Message'].sample(1).iloc[0]
        audio_out_client.make_request(self.output_msg, userdata.lang_code_in)

        # Start Video Thread
        threads = []
        display_controller = DisplayController()
        video_thread = Thread(target=display_controller.play_video,
                              args=(None, True))
        video_thread.start()
        threads.append(video_thread)

        # Recognizing speech
        speech_client = SpeechRecClient()
        sentence = speech_client.make_request(userdata.lang_code_in)

        # Terminate Video Thread
        rospy.set_param(STOP_THREAD, True)
        for process in threads:
            process.join()
        rospy.set_param(STOP_THREAD, False)

        userdata.sentence_out = sentence
        return 'interpretation'
    def text_to_speech_driver(self, request):
        """
        Service Call, also signaling a information to the display

        Keyword arguments:
        request -- ROS Message
        """

        # Launch Display controller
        display_controller = DisplayController()

        # Convert string to audio file
        self.text_to_speech(request)

        # Get duration of audio out
        duration = self.get_duration()

        # List for Multi-Threading
        threads = []

        # Start Video Thread
        if not rospy.get_param(FACE_TRAINING_ACTIVE):
            video_thread = Thread(target=display_controller.play_video,
                                  args=[duration])
            video_thread.start()
            threads.append(video_thread)

        # Start Audio Thread
        audio_thread = Thread(target=self.play_audio)
        audio_thread.start()
        threads.append(audio_thread)

        for process in threads:
            process.join()

        return True
示例#8
0
    def execute(self, userdata):
        """
        Execute function called in the state machine

        Key arguments:
        userdata -- state machine userdata object being passed around

        """
        rospy.logdebug("Translating Sentence")
        rospy.loginfo(self.output_msg + "\n")

        # Get your name to save your face
        speech_client = SpeechRecClient()
        response = ''
        name = ''
        while response.lower() != 'yes':
            # What is your name?
            self.output_msg = \
                df[(df['Type'] == 'FACE_NAME') & (df['Language'] == userdata.lang_code_in)]['Message'].sample(1).iloc[0]
            audio_out_client.make_request(self.output_msg,
                                          userdata.lang_code_in)
            name = speech_client.make_request(userdata.lang_code_in)

            # Is that name correct?
            self.output_msg = \
                df[(df['Type'] == 'FACE_VERIFICATION') & (df['Language'] == userdata.lang_code_in)]['Message'].sample(
                    1).iloc[0]
            self.output_msg = name + ". " + self.output_msg
            audio_out_client.make_request(self.output_msg,
                                          userdata.lang_code_in)
            response = speech_client.make_request(userdata.lang_code_in)

        response = ''
        # Start Video Feed Thread
        threads = []
        display_controller = DisplayController()
        rospy.set_param(FACE_TRAINING_ACTIVE, True)
        video_thread = Thread(target=display_controller.play_video,
                              args=(None, True))
        video_thread.start()
        threads.append(video_thread)

        # Ask for confirmation, when in video frame
        while response.lower() == '':
            self.output_msg = df[(df['Type'] == 'FACE_IN_FRAME')
                                 & (df['Language'] == userdata.lang_code_in
                                    )]['Message'].sample(1).iloc[0]
            audio_out_client.make_request(self.output_msg,
                                          userdata.lang_code_in)
            response = speech_client.make_request(userdata.lang_code_in)

        # Launch Face Training
        face_client = FaceTrainingClient()
        audio_out = AudioOutService(
            in_file=root_path + 'src/state_machine/src/resources/elevator.wav')
        face_client.make_request(name)
        audio_out.play_audio()
        rospy.set_param(TRAINING_DONE, False)

        # Terminate Video Feed Thread
        rospy.set_param(FACE_TRAINING_ACTIVE, False)
        for process in threads:
            process.join()

        # Audio out
        self.output_msg = \
            df[(df['Type'] == 'FACE_TRAINING_DONE') & (df['Language'] == userdata.lang_code_in)]['Message'].sample(
                1).iloc[
                0]
        audio_out_client.make_request(self.output_msg, userdata.lang_code_in)

        return 'face_training'
def add_variables(handler):
    '''adds the display controller and web server only mode to the context alowing it to be accessed by the '''
    web.ctx.display_controller = display_controller
    # when set to true the endpoints don't interact with the display controller as it's not running
    web.ctx.web_server_only_mode = False
    return handler()


def run_display(dc):
    '''function that runs the display controller, should be run on a different thread'''
    dc.run()

if __name__ == "__main__":
    print('creating display controller')
    display_controller = DisplayController()

    p1 = threading.Thread(target=run_display, kwargs={'dc': display_controller})
    print('starting display controller thread')
    p1.start()
    print('finished starting display controller thread')

    print('starting webserver')
    # render = web.template.render(os.path.join(get_project_path(), 'webserver/templates/'))
    render = web.template.render('webserver/templates/')

    urls = (
        '/', 'Index',
        '/change-mode/', 'ChangeMode',
        '/custom-text/', 'CustomText',
        '/shutdown/', 'Shutdown',
示例#10
0
def listen_print_loop(responses):
    """Iterates through server responses and prints them.

    The responses passed is a generator that will block until a response
    is provided by the server.

    Each response may contain multiple results, and each result may contain
    multiple alternatives; for details, see https://goo.gl/tjCPAU.  Here we
    print only the transcription for the top alternative of the top result.

    In this case, responses are provided for interim results as well. If the
    response is an interim one, print a line feed at the end of it, to allow
    the next result to overwrite it, until the response is a final one. For the
    final one, print a newline to preserve the finalized transcription.
    """
    num_chars_printed = 0
    display_controller = DisplayController()
    for response in responses:
        if not response.results:
            continue

        # The `results` list is consecutive. For streaming, we only care about
        # the first result being considered, since once it's `is_final`, it
        # moves on to considering the next utterance.
        result = response.results[0]
        if not result.alternatives:
            continue

        # Display the transcription of the top alternative.
        transcript = result.alternatives[0].transcript

        # Display interim results, but with a carriage return at the end of the
        # line, so subsequent lines will overwrite them.
        #
        # If the previous result was longer than this one, we need to print
        # some extra spaces to overwrite the previous result
        overwrite_chars = ' ' * (num_chars_printed - len(transcript))

        if not result.is_final:
            sys.stdout.write(transcript + overwrite_chars + '\r')
            rospy.set_param(FACE_TRAINING_ACTIVE, False)
            rospy.set_param(STOP_THREAD, True)
            display_controller.generate_png(transcript.capitalize() + overwrite_chars)
            display_controller.send_image()
            sys.stdout.flush()

            num_chars_printed = len(transcript)

        else:
            print(transcript + overwrite_chars)
            display_controller.generate_png(transcript.capitalize() + overwrite_chars + "?")
            display_controller.send_image()
            return transcript + overwrite_chars

            # Exit recognition if any of the transcribed phrases could be
            # one of our keywords.
            # if re.search(r'\b(exit|quit)\b', transcript, re.I):
            #     print('Exiting..')
            #     break

            num_chars_printed = 0
示例#11
0
    def __init__(self, config):
        self.logger = logging.getLogger('logger')
        self.logger.info('Starting lockd')

        #add_custom_print_exception()

        serialdevice = config.get('Master Controller', 'serialdevice')
        baudrate = config.get('Master Controller', 'baudrate')

        self.serial_interface = serialinterface.SerialInterface(serialdevice, baudrate, timeout=.1)

        self.input_queue = Queue.Queue()

        udpcommand = UDPCommand('127.0.0.1', 2323, self.input_queue)

        self.doors = {}

        self.master = None

        display = None
        
        self.display_controller = None

        self.logic = DoorLogic()

        for section in config.sections():
            if config.has_option(section, 'type'):
                t = config.get(section, 'type')
                if t == 'door':
                    door_name = section
                    self.logger.debug('Adding door "%s"'%door_name)
                    buttons = {1: 'manual_control', 2: 'bell_code'}
                    door = Door(door_name, config, self.serial_interface, self.input_queue, buttons)
                    door_address = config.get(door_name, 'address')
                    self.doors[door_address] = door
                    self.logic.add_door(door)
                else:
                    self.logger.warning('Unknown entry type "%s"', t)
            elif section == 'Master Controller':
                #txseq = int(config.get(section, 'txsequence'))
                #rxseq = int(config.get(section, 'rxsequence'))
                #key = config.get(section, 'key')
                
                buttons_section = 'Master Controller Buttons'
                buttons = {}
                for button_name in config.options(buttons_section):
                    button_pin = int(config.get(buttons_section, button_name))
                    buttons[button_pin] = button_name

                leds_section = 'Master Controller LEDs'
                leds = {}
                for led_name in config.options(leds_section):
                    led_pin = int(config.get(leds_section, led_name))
                    leds[led_name] = led_pin
                
                self.master = MasterController(self.serial_interface, self.input_queue, buttons, leds) 
            
            elif section == 'Display':
                display_type = config.get(section, 'display_type') 
                max_update_rate = float(config.get(section, 'max_update_rate'))
                if display_type == "Nokia_1600":
                    from display import Display
                    display = Display(self.serial_interface)
                elif display_type == 'simulation':
                    from display_pygame import Display
                    display = Display()
                elif display_type == 'network':
                    from display_network import Display
                    display = Display()
                elif display_type == 'None':
                    display = None
                else:
                    self.logger.warning('Unknown display type "%s"', display_type)
            elif section == 'Status Receiver':
                host = config.get(section, 'host')
                port = int(config.get(section, 'port'))
                self.announcer = Announcer(host, port)
                self.logic.add_state_listener(self.announcer.update_state)
        
        if self.master == None:
            self.logger.error('Please specify a self.master controller')
            sys.exit(1)

        self.interface_logic = UserInterfaceLogic(self.master)
        self.logic.add_state_listener(self.interface_logic.update_state)
        
        if display != None:
            self.display_controller = DisplayController(display, max_update_rate)
            self.display_logic = DisplayLogic(self.display_controller)
            self.logic.add_state_listener(self.display_logic.update_state)
            for door in self.doors.values():
                self.display_logic.add_door(door)

        else:
            self.logger.warning('No display specified.')

        self.input_queue.put({'origin_name': 'init',
                        'origin_type': DoorLogic.Origin.INTERNAL,
                        'input_name': '',
                        'input_type': DoorLogic.Input.COMMAND,
                        'input_value': 'down'})
示例#12
0
                timing["last_update"] = time.time()

        self.stop()
        # self.strip.show(bytearray(STRIP_LENGTH * 4))

        # self.strip.close()

    def stop(self):
        self.strip.show(ImageLoader.black())
        self.strip.close()


if __name__ == "__main__":
    from motor_controller import MotorController
    mc = MotorController()

    mc.connect()
    mc.set_motor_speed(1700)
    mc.sync_speed(5)

    display_controller = DisplayController()
    fan = PovFan()
    fan.load_sequence("KfirRam", 1)
    # fan.load_sequence("shburit", 1)
    fan.play(10, display_controller)
    display_controller.close()

    # mc.set_motor_speed(1600)
    # mc = MotorController()
    # mc.connect()