def _reset_hook(self): self.__game_color_helper.reset() self.__game_color_mode = GameColorHelper.determine_game_color_mode( Config.get('game_of_life.game_color_mode')) variant = Config.get('game_of_life.variant', 'random') if variant in self.VARIANTS: self.__variant = variant else: self.__variant = random.choice(self.VARIANTS)
def __populate_frames(self, frames, ffmpeg_to_python_fifo, vid_start_time, bytes_per_frame, np_array_shape): is_ready_to_read, ignore1, ignore2 = select.select( [ffmpeg_to_python_fifo], [], [], 0) if not is_ready_to_read: return [False, vid_start_time] ffmpeg_output = ffmpeg_to_python_fifo.read(bytes_per_frame) if ffmpeg_output and len(ffmpeg_output) < bytes_per_frame: raise Exception( 'Expected {} bytes from ffmpeg output, but got {}.'.format( bytes_per_frame, len(ffmpeg_output))) if not ffmpeg_output: self.__logger.info("no ffmpeg_output, end of video processing.") if vid_start_time is None: # under rare circumstances, youtube-dl might fail and we end up in this code path. self.__logger.error( "No vid_start_time set. Possible yt-dl crash. See: https://github.com/ytdl-org/youtube-dl/issues/24780" ) vid_start_time = 0 # set this so that __process_and_play_video doesn't endlessly loop return [True, vid_start_time] if vid_start_time is None: # Start the video clock as soon as we see ffmpeg output. Ffplay probably sent its # first audio data at around the same time so they stay in sync. # Add time for better audio / video sync vid_start_time = time.time() + (0.075 if Config.get( 'video.should_play_audio', True) else 0) frames.append( np.frombuffer(ffmpeg_output, np.uint8).reshape(np_array_shape)) return [False, vid_start_time]
def __init__(self, url, clear_screen): self.__logger = Logger().set_namespace(self.__class__.__name__) self.__url = url self.__led_frame_player = LedFramePlayer( clear_screen=clear_screen, video_color_mode=Config.get('video.color_mode', VideoColorMode.COLOR_MODE_COLOR)) self.__process_and_play_vid_proc_pgid = None self.__init_time = time.time() # True if the video already exists (see config value: "video.should_save_video") self.__is_video_already_downloaded = False self.__do_housekeeping(clear_screen) self.__register_signal_handlers()
def __init__(self): self.__secure = Config.get('server.use_ssl', False) if not self.__secure: self.__server = PifiThreadingHTTPServer(('0.0.0.0', 80), PifiServerRequestHandler) else: self.__server = PifiThreadingHTTPServer(('0.0.0.0', 443), PifiServerRequestHandler) self.__server.socket = ssl.wrap_socket( self.__server.socket, keyfile=Config.get_or_throw('server.keyfile'), certfile=Config.get_or_throw('server.certfile'), server_side=True)
def __init__(self): self.__logger = Logger().set_namespace(self.__class__.__name__) # Ensure only one instance of the LedFramePlayer is used across all screensavers. # See: https://github.com/dasl-/pifi/commit/fd48ba5b41bba6c6aa0034d743e40de153482f21 self.__led_frame_player = LedFramePlayer() screensaver_types = Config.get("screensavers.screensavers", ["game_of_life", "cyclic_automaton"]) self.__screensavers = [] if "game_of_life" in screensaver_types: self.__screensavers.append( GameOfLife(led_frame_player=self.__led_frame_player)) if "cyclic_automaton" in screensaver_types: self.__screensavers.append( CyclicAutomaton(led_frame_player=self.__led_frame_player))
def process_and_play(self): self.__logger.info(f"Starting process_and_play for url: {self.__url}") self.__led_frame_player.show_loading_screen() video_save_path = self.__get_video_save_path() if os.path.isfile(video_save_path): self.__logger.info( f'Video has already been downloaded. Using saved video: {video_save_path}' ) self.__is_video_already_downloaded = True elif Config.get('video.should_predownload_video', False): download_command = self.__get_streaming_video_download_cmd( ) + ' > ' + shlex.quote(self.__get_video_save_path()) self.__logger.info(f'Downloading video: {download_command}') subprocess.call(download_command, shell=True, executable='/usr/bin/bash') self.__logger.info(f'Video download complete: {video_save_path}') self.__is_video_already_downloaded = True attempt = 1 max_attempts = 2 clear_screen = True while attempt <= max_attempts: try: self.__process_and_play_video() clear_screen = True break except YoutubeDlException as e: if attempt < max_attempts: self.__logger.warning( "Caught exception in VideoProcessor.__process_and_play_video: " + traceback.format_exc()) self.__logger.warning( "Updating youtube-dl and retrying video...") self.__led_frame_player.show_loading_screen() clear_screen = False self.__update_youtube_dl() if attempt >= max_attempts: clear_screen = True raise e finally: self.__do_housekeeping(clear_screen=clear_screen) attempt += 1 self.__logger.info("Finished process_and_play")
def __get_ffmpeg_pixel_conversion_cmd(self): pix_fmt = 'gray' if VideoColorMode.is_color_mode_rgb( Config.get('video.color_mode', VideoColorMode.COLOR_MODE_COLOR)): pix_fmt = 'rgb24' return (self.get_standard_ffmpeg_cmd() + ' ' '-i pipe:0 ' + # read input video from stdin '-filter:v ' + shlex.quote( # resize video 'scale=' + str(Config.get_or_throw('leds.display_width')) + 'x' + str(Config.get_or_throw('leds.display_height'))) + " " '-c:a copy ' + # don't process the audio at all '-f rawvideo -pix_fmt ' + shlex.quote(pix_fmt) + " " # output in numpy compatible byte format 'pipe:1' # output to stdout )
def __init__(self, clear_screen=True): self.__pixels = apa102.APA102( num_led=(Config.get_or_throw('leds.display_width') * Config.get_or_throw('leds.display_height')), mosi=self.__MOSI_PIN, sclk=self.__SCLK_PIN, order=self.__LED_ORDER) brightness = Config.get('leds.brightness', 3) self.__pixels.set_global_brightness(brightness) if clear_screen: self.clear_screen() # Look up the order in which to write each color value to the LED strip. # It's 1-indexed, so subtract by 1. self.__color_order = [x - 1 for x in apa102.RGB_MAP[self.__LED_ORDER]] # Calculate the LED start "frame": 3 1 bits followed by 5 brightness bits. See # set_pixel in the apa102 implementation for this calculation. self.__ledstart = (brightness & 0b00011111) | self.__pixels.LED_START
def _seed_hook(self): # Create the board with an extra edge cell on all sides to simplify the # neighborhood calculation and avoid edge checks. shape = [ Config.get_or_throw('leds.display_height') + 2, Config.get_or_throw('leds.display_width') + 2 ] self._board = np.zeros(shape, np.uint8) probability = Config.get('game_of_life.seed_liveness_probability', 1 / 3) if self.__variant == self.__VARIANT_IMMIGRATION: seed = np.random.random_sample([x - 2 for x in shape ]) < (probability / 2) seed2 = np.random.random_sample([x - 2 for x in shape ]) < (probability / 2) self._board[1:-1, 1:-1][seed] = 1 self._board[1:-1, 1:-1][seed2] = 2 else: # __VARIANT_NORMAL seed = np.random.random_sample([x - 2 for x in shape]) < probability self._board[1:-1, 1:-1][seed] = 1
def __init__(self, server_unix_socket_fd, playlist_video, settings): self.__logger = Logger().set_namespace(self.__class__.__name__) self.__num_ticks = 0 self.__eliminated_snake_count = 0 self.__last_eliminated_snake_sound = None self.__apple = None self.__apples_eaten_count = 0 self.__game_color_helper = GameColorHelper() self.__playlist_video = playlist_video self.__players = [] self.__settings = settings server_unix_socket = socket.socket(fileno=server_unix_socket_fd) # The timeout is not "inherited" from the socket_fd that was given to us, thus we have to set it again. UnixSocketHelper().set_server_unix_socket_timeout(server_unix_socket) for i in range(self.__settings['num_players']): self.__players.append( SnakePlayer(i, server_unix_socket, self, settings)) # why do we use both simpleaudio and pygame mixer? see: https://github.com/dasl-/pifi/blob/main/utils/sound_test.py mixer.init(frequency=22050, buffer=512) background_music_file = secrets.choice([ 'dragon_quest4_05_town.wav', 'dragon_quest4_04_solitary_warrior.wav', 'dragon_quest4_19_a_pleasant_casino.wav', 'radia_senki_reimei_hen_06_unknown_village_elfas.wav', 'the_legend_of_zelda_links_awakening_04_mabe_village_loop.wav', ]) self.__background_music = mixer.Sound( DirectoryUtils().root_dir + "/assets/snake/{}".format(background_music_file)) self.__game_color_mode = GameColorHelper.determine_game_color_mode( Config.get('snake.game_color_mode')) self.__led_frame_player = LedFramePlayer() self.__register_signal_handlers()
def __transform_frame(self, frame): if not (VideoColorMode.is_color_mode_rgb(self.__video_color_mode)): gamma_index = self.__gamma_controller.getGammaIndexForMonochromeFrame( frame) shape = [ Config.get_or_throw('leds.display_height'), Config.get_or_throw('leds.display_width'), 3 ] transformed_frame = np.zeros(shape, np.uint8) # calculate gamma corrected colors if self.__video_color_mode == VideoColorMode.COLOR_MODE_COLOR: transformed_frame[:, :, 0] = np.take(self.__scale_red_gamma_curve, frame[:, :, 0]) transformed_frame[:, :, 1] = np.take(self.__scale_green_gamma_curve, frame[:, :, 1]) transformed_frame[:, :, 2] = np.take(self.__scale_blue_gamma_curve, frame[:, :, 2]) elif self.__video_color_mode == VideoColorMode.COLOR_MODE_R: transformed_frame[:, :, 0] = np.take( self.__scale_red_gamma_curves[gamma_index], frame[:, :]) elif self.__video_color_mode == VideoColorMode.COLOR_MODE_G: transformed_frame[:, :, 1] = np.take( self.__scale_green_gamma_curves[gamma_index], frame[:, :]) elif self.__video_color_mode == VideoColorMode.COLOR_MODE_B: transformed_frame[:, :, 2] = np.take( self.__scale_blue_gamma_curves[gamma_index], frame[:, :]) elif self.__video_color_mode == VideoColorMode.COLOR_MODE_BW: transformed_frame[:, :, 0] = np.take( self.__scale_red_gamma_curves[gamma_index], frame[:, :]) transformed_frame[:, :, 1] = np.take( self.__scale_green_gamma_curves[gamma_index], frame[:, :]) transformed_frame[:, :, 2] = np.take( self.__scale_blue_gamma_curves[gamma_index], frame[:, :]) elif self.__video_color_mode == VideoColorMode.COLOR_MODE_INVERT_COLOR: transformed_frame[:, :, 0] = np.take(self.__scale_red_gamma_curve, 255 - frame[:, :, 0]) transformed_frame[:, :, 1] = np.take(self.__scale_green_gamma_curve, 255 - frame[:, :, 1]) transformed_frame[:, :, 2] = np.take(self.__scale_blue_gamma_curve, 255 - frame[:, :, 2]) elif self.__video_color_mode == VideoColorMode.COLOR_MODE_INVERT_BW: transformed_frame[:, :, 0] = np.take( self.__scale_red_gamma_curves[gamma_index], 255 - frame[:, :]) transformed_frame[:, :, 1] = np.take( self.__scale_green_gamma_curves[gamma_index], 255 - frame[:, :]) transformed_frame[:, :, 2] = np.take( self.__scale_blue_gamma_curves[gamma_index], 255 - frame[:, :]) else: raise Exception( f'Unexpected color mode: {self.__video_color_mode}.') flips = () if Config.get('leds.flip_y', False): flips += (0, ) if Config.get('leds.flip_x', False): flips += (1, ) if flips: transformed_frame = np.flip(transformed_frame, flips) return transformed_frame
def _get_game_over_detection_lookback_amount(self): return Config.get('cyclic_automaton.game_over_detection_lookback', self._DEFAULT_GAME_OVER_LOOKBACK_DETECTION_AMOUNT)
def _should_fade_to_frame(self): return Config.get('cyclic_automaton.fade', self._DEFAULT_SHOULD_FADE_TO_FRAME)
def _get_tick_sleep_seconds(self): return Config.get('cyclic_automaton.tick_sleep', self._DEFAULT_TICK_SLEEP_SECONDS)
def __process_and_play_video(self): ffmpeg_to_python_fifo_name = self.__make_fifo( additional_prefix='ffmpeg_to_python') fps_fifo_name = self.__make_fifo(additional_prefix='fps') process_and_play_vid_cmd = self.__get_process_and_play_vid_cmd( ffmpeg_to_python_fifo_name, fps_fifo_name) self.__logger.info('executing process and play cmd: ' + process_and_play_vid_cmd) process_and_play_vid_proc = subprocess.Popen( process_and_play_vid_cmd, shell=True, executable='/usr/bin/bash', start_new_session=True) # Store the PGID separately, because attempting to get the PGID later via `os.getpgid` can # raise `ProcessLookupError: [Errno 3] No such process` if the process is no longer running self.__process_and_play_vid_proc_pgid = os.getpgid( process_and_play_vid_proc.pid) display_width = Config.get_or_throw('leds.display_width') display_height = Config.get_or_throw('leds.display_height') bytes_per_frame = display_width * display_height np_array_shape = [display_height, display_width] if VideoColorMode.is_color_mode_rgb( Config.get('video.color_mode', VideoColorMode.COLOR_MODE_COLOR)): bytes_per_frame = bytes_per_frame * 3 np_array_shape.append(3) vid_start_time = None last_frame = None vid_processing_lag_counter = 0 is_ffmpeg_done_outputting = False frames = ReadOnceCircularBuffer(self.__FRAMES_BUFFER_LENGTH) ffmpeg_to_python_fifo = open(ffmpeg_to_python_fifo_name, 'rb') fps = self.__read_fps_from_fifo(fps_fifo_name) frame_length = 1 / fps pathlib.Path(self.__FPS_READY_FILE).touch() while True: if is_ffmpeg_done_outputting or frames.is_full(): pass else: is_ffmpeg_done_outputting, vid_start_time = self.__populate_frames( frames, ffmpeg_to_python_fifo, vid_start_time, bytes_per_frame, np_array_shape) if vid_start_time is None: # video has not started being processed yet pass else: if self.__init_time: self.__logger.info( f"Started playing video after {round(time.time() - self.__init_time, 3)} s." ) self.__init_time = None is_video_done_playing, last_frame, vid_processing_lag_counter = self.__play_video( frames, vid_start_time, frame_length, is_ffmpeg_done_outputting, last_frame, vid_processing_lag_counter) if is_video_done_playing: break self.__logger.info("Waiting for process_and_play_vid_proc to end...") while True: # Wait for proc to end if process_and_play_vid_proc.poll() is not None: if process_and_play_vid_proc.returncode != 0: raise YoutubeDlException( "The process_and_play_vid_proc process exited non-zero: " + f"{process_and_play_vid_proc.returncode}. This could mean an issue with youtube-dl; " + "it may require updating.") self.__logger.info("The process_and_play_vid_proc proc ended.") break time.sleep(0.1)
def __get_process_and_play_vid_cmd(self, ffmpeg_to_python_fifo_name, fps_fifo_name): video_save_path = self.__get_video_save_path() vid_data_cmd = None if self.__is_video_already_downloaded: vid_data_cmd = '< {} '.format(shlex.quote(video_save_path)) else: vid_data_cmd = self.__get_streaming_video_download_cmd() + ' | ' # Explanation of the FPS calculation pipeline: # # cat - >/dev/null: Prevent tee from exiting uncleanly (SIGPIPE) after ffprobe has finished probing. # # mbuffer: use mbuffer so that writes to ffprobe are not blocked by shell pipeline backpressure. # Note: ffprobe may need to read a number of bytes proportional to the video size, thus there may # be no buffer size that works for all videos (see: https://stackoverflow.com/a/70707003/627663 ) # But our current buffer size works for videos that are ~24 hours long, so it's good enough in # most cases. Something fails for videos that are 100h+ long, but I believe it's unrelated to # mbuffer size -- those videos failed even with our old model of calculating FPS separately from # the video playback pipeline. See: https://github.com/yt-dlp/yt-dlp/issues/3390 # # while true ... : The pipeline will wait until a signal is given (the existence of the __FPS_READY_FILE) # before data is emitted downstream. The signal will be given once the videoprocessor has finished # calculating the FPS of the video. The FPS is calculated by ffprobe and communicated to the # videoprocessor via the fps_fifo_name fifo. ffprobe_cmd = f'ffprobe -v 0 -of csv=p=0 -select_streams v:0 -show_entries stream=r_frame_rate - > {fps_fifo_name}' ffprobe_mbuffer = self.__get_mbuffer_cmd(1024 * 1024 * 50, '/tmp/mbuffer-ffprobe.out') fps_cmd = ( f'tee >( {ffprobe_cmd} && cat - >/dev/null ) | {ffprobe_mbuffer} | ' + f'{{ while true ; do [ -f {self.__FPS_READY_FILE} ] && break || sleep 0.1 ; done && cat - ; }} | ' ) maybe_play_audio_tee = '' if Config.get('video.should_play_audio', True): # Add mbuffer because otherwise the ffplay command blocks the whole pipeline. Because # audio can only play in real-time, this would block ffmpeg from processing the frames # as fast as it otherwise could. This prevents us from building up a big enough buffer # in the frames circular buffer to withstand blips in performance. This # ensures the circular buffer will generally get filled, rather than lingering around # only ~70 frames full. Makes it less likely that we will fall behind in video # processing. maybe_play_audio_tee = (">( " + self.__get_mbuffer_cmd( 1024 * 1024 * 10, '/tmp/mbuffer-ffplay.out') + ' | ' + self.__get_ffplay_cmd() + " ) ") ffmpeg_tee = f'>( {self.__get_ffmpeg_pixel_conversion_cmd()} > {ffmpeg_to_python_fifo_name} ) ' maybe_save_video_tee = '' maybe_mv_saved_video_cmd = '' if Config.get('video.should_save_video', False) and not self.__is_video_already_downloaded: self.__logger.info(f'Video will be saved to: {video_save_path}') temp_video_save_path = video_save_path + self.__TEMP_VIDEO_DOWNLOAD_SUFFIX maybe_save_video_tee = shlex.quote(temp_video_save_path) + ' ' maybe_mv_saved_video_cmd = '&& mv ' + shlex.quote( temp_video_save_path) + ' ' + shlex.quote(video_save_path) process_and_play_vid_cmd = ('set -o pipefail && export SHELLOPTS && ' + vid_data_cmd + fps_cmd + "tee " + maybe_play_audio_tee + ffmpeg_tee + maybe_save_video_tee + "> /dev/null " + maybe_mv_saved_video_cmd) return process_and_play_vid_cmd
def _should_fade_to_frame(self): return Config.get('game_of_life.fade', self._DEFAULT_SHOULD_FADE_TO_FRAME)
def _get_tick_sleep_seconds(self): return Config.get('game_of_life.tick_sleep', self._DEFAULT_TICK_SLEEP_SECONDS)