Example #1
0
    def _upscale_frames(self, input_directory: pathlib.Path,
                        output_directory: pathlib.Path):
        """Upscale video frames with waifu2x-caffe

        This function upscales all the frames extracted
        by ffmpeg using the waifu2x-caffe binary.

        Args:
            input_directory (pathlib.Path): directory containing frames to upscale
            output_directory (pathlib.Path): directory which upscaled frames should be exported to

        Raises:
            UnrecognizedDriverError: raised when the given driver is not recognized
            e: re-raised exception after an exception has been captured and finished processing in this scope
        """

        # initialize waifu2x driver
        if self.driver not in AVAILABLE_DRIVERS:
            raise UnrecognizedDriverError(
                _("Unrecognized driver: {}").format(self.driver))

        # list all images in the extracted frames
        frames = [(input_directory / f) for f in input_directory.iterdir()
                  if f.is_file]

        # if we have less images than processes,
        # create only the processes necessary
        if len(frames) < self.processes:
            self.processes = len(frames)

        # create a directory for each process and append directory
        # name into a list
        process_directories = []
        for process_id in range(self.processes):
            process_directory = input_directory / str(process_id)
            process_directories.append(process_directory)

            # delete old directories and create new directories
            if process_directory.is_dir():
                shutil.rmtree(process_directory)
            process_directory.mkdir(parents=True, exist_ok=True)

        # waifu2x-converter-cpp will perform multi-threading within its own process
        if self.driver in [
                "waifu2x_converter_cpp",
                "waifu2x_ncnn_vulkan",
                "srmd_ncnn_vulkan",
                "realsr_ncnn_vulkan",
                "anime4kcpp",
        ]:
            process_directories = [input_directory]

        else:
            # evenly distribute images into each directory
            # until there is none left in the directory
            for image in frames:
                # move image
                image.rename(process_directories[0] / image.name)
                # rotate list
                process_directories = (process_directories[-1:] +
                                       process_directories[:-1])

        # create driver processes and start them
        for process_directory in process_directories:
            self.process_pool.append(
                self.driver_object.upscale(process_directory,
                                           output_directory))

        # start progress bar in a different thread
        Avalon.debug_info(_("Starting progress monitor"))
        self.progress_monitor = ProgressMonitor(self, process_directories)
        self.progress_monitor.start()

        # create the clearer and start it
        Avalon.debug_info(_("Starting upscaled image cleaner"))
        self.image_cleaner = ImageCleaner(input_directory, output_directory,
                                          len(self.process_pool))
        self.image_cleaner.start()

        # wait for all process to exit
        try:
            self._wait()
        except (Exception, KeyboardInterrupt, SystemExit) as e:
            # cleanup
            Avalon.debug_info(_("Killing progress monitor"))
            self.progress_monitor.stop()

            Avalon.debug_info(_("Killing upscaled image cleaner"))
            self.image_cleaner.stop()
            raise e

        # if the driver is waifu2x-converter-cpp
        # images need to be renamed to be recognizable for FFmpeg
        if self.driver == "waifu2x_converter_cpp":
            for image in [
                    f for f in output_directory.iterdir() if f.is_file()
            ]:
                renamed = re.sub(
                    f"_\\[.*\\]\\[x(\\d+(\\.\\d+)?)\\]\\.{self.extracted_frame_format}",
                    f".{self.extracted_frame_format}",
                    str(image.name),
                )
                (output_directory / image).rename(output_directory / renamed)

        # upscaling done, kill helper threads
        Avalon.debug_info(_("Killing progress monitor"))
        self.progress_monitor.stop()

        Avalon.debug_info(_("Killing upscaled image cleaner"))
        self.image_cleaner.stop()
Example #2
0
def load():
    if conf_file:
        file = open(conf_file)
        conf = yaml.load(file)
        try:
            default_password = conf['default_passwd']
            disk_thresold = conf['disk_threshold']
            disk_monitor_interval = conf['monitors']['disk_monitor']
            progress_monitor_interval = conf['monitors']['progress_monitor']
            gateway_monitor_interval = conf['monitors']['gateway_monitor']
            db_conn_monitor_interval = conf['monitors']['db_conn_monitor']
            smtp_server = conf['email']['server']
            smtp_port = conf['email']['port']
            smtp_username = conf['email']['username']
            smtp_password = conf['email']['passwd']
            mailto_list = conf['email']['mailto']
            servers_info = conf['servers']

            servers = []
            for server_info in servers_info:
                projects = []
                for project_info in server_info['projects']:
                    project = Server.Project(project_info['path'],
                                             project_info['port'])
                    projects.append(project)

                server = Server(
                    name=server_info['name'],
                    ip=server_info['ip'],
                    projects=projects,
                    passwd=server_info['passwd'],
                    default_passwd=default_password)

                servers.append(server)

            global SERVERS
            SERVERS = servers

            # initilize
            Server.Disk.set_threshold(disk_thresold)
            Email.setup(smtp_server, smtp_port, smtp_username, smtp_password,
                        mailto_list)
            disk_monitor = DiskMonitor(disk_monitor_interval)
            progress_monitor = ProgressMonitor(progress_monitor_interval)
            gateway_monitor = GatewayMonitor(gateway_monitor_interval)
            db_conn_monitor = DbConnMonitor(db_conn_monitor_interval)

            # clear cache
            disk_monitor.servers.clear()
            progress_monitor.servers.clear()
            gateway_monitor.servers.clear()
            # recache
            for server in servers:
                disk_monitor.servers.append(server)
                if server.any_project_has_port():
                    progress_monitor.servers.append(server)
                if 'gate' in server.name:
                    gateway_monitor.servers.append(server)
                if 'db' in server.name or 'datacenter' in server.name:
                    db_conn_monitor.servers.append(server)

            return conf, disk_monitor, progress_monitor, gateway_monitor, db_conn_monitor
        except Exception as e:
            logger.error('load config file error! details:\n{e}'.format(e=e))
            return None
        finally:
            file.close()
    else:
        logger.error('Cound not found config file!')
        return None
Example #3
0
    def _upscale_frames(self):
        """ Upscale video frames with waifu2x-caffe

        This function upscales all the frames extracted
        by ffmpeg using the waifu2x-caffe binary.

        Arguments:
            w2 {Waifu2x Object} -- initialized waifu2x object
        """

        # initialize waifu2x driver
        if self.driver not in AVAILABLE_DRIVERS:
            raise UnrecognizedDriverError(
                _('Unrecognized driver: {}').format(self.driver))

        # list all images in the extracted frames
        frames = [(self.extracted_frames / f)
                  for f in self.extracted_frames.iterdir() if f.is_file]

        # if we have less images than processes,
        # create only the processes necessary
        if len(frames) < self.processes:
            self.processes = len(frames)

        # create a directory for each process and append directory
        # name into a list
        process_directories = []
        for process_id in range(self.processes):
            process_directory = self.extracted_frames / str(process_id)
            process_directories.append(process_directory)

            # delete old directories and create new directories
            if process_directory.is_dir():
                shutil.rmtree(process_directory)
            process_directory.mkdir(parents=True, exist_ok=True)

        # waifu2x-converter-cpp will perform multi-threading within its own process
        if self.driver in [
                'waifu2x_converter_cpp', 'waifu2x_ncnn_vulkan',
                'srmd_ncnn_vulkan', 'realsr_ncnn_vulkan', 'anime4kcpp'
        ]:
            process_directories = [self.extracted_frames]

        else:
            # evenly distribute images into each directory
            # until there is none left in the directory
            for image in frames:
                # move image
                image.rename(process_directories[0] / image.name)
                # rotate list
                process_directories = process_directories[
                    -1:] + process_directories[:-1]

        # create driver processes and start them
        for process_directory in process_directories:
            self.process_pool.append(
                self.driver_object.upscale(process_directory,
                                           self.upscaled_frames))

        # start progress bar in a different thread
        Avalon.debug_info(_('Starting progress monitor'))
        self.progress_monitor = ProgressMonitor(self, process_directories)
        self.progress_monitor.start()

        # create the clearer and start it
        Avalon.debug_info(_('Starting upscaled image cleaner'))
        self.image_cleaner = ImageCleaner(self.extracted_frames,
                                          self.upscaled_frames,
                                          len(self.process_pool))
        self.image_cleaner.start()

        # wait for all process to exit
        try:
            self._wait()
        except (Exception, KeyboardInterrupt, SystemExit) as e:
            # cleanup
            Avalon.debug_info(_('Killing progress monitor'))
            self.progress_monitor.stop()

            Avalon.debug_info(_('Killing upscaled image cleaner'))
            self.image_cleaner.stop()
            raise e

        # if the driver is waifu2x-converter-cpp
        # images need to be renamed to be recognizable for FFmpeg
        if self.driver == 'waifu2x_converter_cpp':
            for image in [
                    f for f in self.upscaled_frames.iterdir() if f.is_file()
            ]:
                renamed = re.sub(
                    f'_\\[.*\\]\\[x(\\d+(\\.\\d+)?)\\]\\.{self.extracted_frame_format}',
                    f'.{self.extracted_frame_format}', str(image.name))
                (self.upscaled_frames / image).rename(self.upscaled_frames /
                                                      renamed)

        # upscaling done, kill helper threads
        Avalon.debug_info(_('Killing progress monitor'))
        self.progress_monitor.stop()

        Avalon.debug_info(_('Killing upscaled image cleaner'))
        self.image_cleaner.stop()
def summarize_amazing_race(path_to_data='sample_data/test1/', num_processes=4):
    """Calculates the amazing race statistics and prints them out for each person in the race.

    Goes through all the days annotated in index.json file. From each day it gatheres data for each participant and then
    combines data for whole race. The method can run in parallel, recommended number of processes is
    < number_cpu_cores * 2. While the data is being processed the progress is written to std in 5% increments by number
    of legs. At the end the data for each participant is printed to stdout.

    :param path_to_data: string, path to the folder containing data_xxx.json and index.json
    :param num_processes: int, number of processes to run in parallel
    """

    path = pathlib.Path(path_to_data)
    with open(
            path / "index.json", "r"
    ) as f:  # Because you use with, indent everything below to the same level
        index_jsonable = json.load(f)

        #  Read data from index.json file and stores it for later use.
        friend_list = index_jsonable['friends']
        total_distance = sum(dist for _, dist, _ in index_jsonable['files'])
        total_legs = sum(num_legs for *_, num_legs in index_jsonable['files'])
        file_names = [fn for fn, *_ in index_jsonable['files']]
        total_files = len(file_names)

        queue = mp.Queue(
        )  # Create shared queue in order to bring back results after they have been processed.
        progress_monitor = ProgressMonitor(
            total_legs, total_distance)  # Create shared ProgressMonitor

        processes = []
        step_size = math.ceil(total_files /
                              num_processes)  # How many files per process

        for i in range(0, len(file_names), step_size):
            #  Write all filename that are to be processed in this batch (by a single process)
            batch = [
                path / file_names[j]
                for j in range(i, min(i + step_size, total_files))
            ]
            processes.append(
                mp.Process(target=summarize_data_for_batch,
                           args=(batch, i // step_size, queue,
                                 progress_monitor)))

        for t in processes:
            t.start()
        for t in processes:
            t.join()

        #  Print final progress, it should be at 100%.
        progress_monitor.print_progress(True)

        #  Gather shared data from the queue.
        all_data = [None] * queue.qsize()
        while not queue.empty():
            processed_day = queue.get()
            all_data[processed_day[0]] = processed_day[1]

        friend_summaries = combine_data(
            all_data)  # Combine data from different batches into one.

        print("\n\n++++++++++++")
        print("In total %i friends participated in this race: " %
              (len(friend_list)) + ", ".join(friend_list))
        print("Together they traveled for total distance of %0.1f km\n" %
              total_distance)
        for friend in friend_summaries.keys():
            friend_summary = friend_summaries[friend]
            print("%s\n--------" % (friend, ))
            friend_summary.print()
            print("")