Exemple #1
0
 def __call__(self, track_set: RGTrackSet) -> RGTrackSet:
     try:
         super(TrackSetHandler, self).__call__(track_set)
     except Exception:
         logger.error("Failed to analyze %s. Skipping this track set. The exception was:\n\n%s\n",
                      track_set.track_set_key_string(), traceback.format_exc())
     return track_set
Exemple #2
0
 def __call__(self, track_set: RGTrackSet) -> RGTrackSet:
     try:
         super(TrackSetHandler, self).__call__(track_set)
     except Exception:
         logger.error(
             "Failed to analyze %s. Skipping this track set. The exception was:\n\n%s\n",
             track_set.track_set_key_string(), traceback.format_exc())
     return track_set
Exemple #3
0
 def wrapped_handler(track_set: RGTrackSet) -> RGTrackSet:
     p = Process(target=handler, args=(track_set,))
     try:
         p.start()
         p.join()
         if p.exitcode != 0:
             logger.error("Subprocess exited with code %s for %s", p.exitcode, track_set.track_set_key_string())
     finally:
         if p.is_alive():
             logger.debug("Killing subprocess")
             p.terminate()
     return track_set
Exemple #4
0
 def wrapped_handler(track_set: RGTrackSet) -> RGTrackSet:
     p = Process(target=handler, args=(track_set,)) # type: ignore # https://github.com/python/mypy/issues/797
     try:
         p.start()
         p.join()
         if p.exitcode != 0:  # type: ignore
             logger.error("Subprocess exited with code %s for %s", p.exitcode, track_set.track_set_key_string())  # type: ignore
     finally:
         if p.is_alive():
             logger.debug("Killing subprocess")
             p.terminate()
     return track_set
Exemple #5
0
 def wrapped_handler(track_set: RGTrackSet) -> RGTrackSet:
     p = Process(target=handler, args=(
         track_set,
     ))  # type: ignore # https://github.com/python/mypy/issues/797
     try:
         p.start()
         p.join()
         if p.exitcode != 0:  # type: ignore
             logger.error("Subprocess exited with code %s for %s",
                          p.exitcode,
                          track_set.track_set_key_string())  # type: ignore
     finally:
         if p.is_alive():
             logger.debug("Killing subprocess")
             p.terminate()
     return track_set
Exemple #6
0
def main(force_reanalyze: bool = False,
         include_hidden: bool = False,
         dry_run: bool = False,
         gain_type: str = 'auto',
         backend: str = 'auto',
         jobs: int = default_job_count(),
         low_memory: bool = False,
         quiet: bool = False,
         verbose: bool = False,
         *music_dir: str,
         ) -> None:
    '''Add replaygain tags to your music files.'''

    try:
        from tqdm import tqdm
    except ImportError:
        # Fallback: No progress bars
        tqdm = tqdm_fake
    if quiet:
        logger.setLevel(logging.WARN)
        tqdm = tqdm_fake
    elif verbose:
        logger.setLevel(logging.DEBUG)
    else:
        logger.setLevel(logging.INFO)

    if backend == 'auto':
        backend_exceptions: List[BackendUnavailableException] = []
        for bname in known_backends:
            try:
                gain_backend = get_backend(bname)
                logger.info('Selected the {} backend to compute ReplayGain'.format(bname))
                break
            except BackendUnavailableException as ex:
                backend_exceptions.append(ex)
        else:
            for exc in backend_exceptions:
                logger.error(exc.args[0])
            logger.error('Could not find any usable backends. Perhaps you have not installed the prerequisites?')
            sys.exit(1)
    else:
        try:
            gain_backend = get_backend(backend)
            logger.info('Using the {} backend to compute ReplayGain'.format(backend))
        except BackendUnavailableException as ex:
            logger.error(ex.args[0])
            sys.exit(1)

    track_constructor = RGTrack
    if dry_run:
        logger.warn('This script is running in "dry run" mode, so no files will actually be modified.')
        track_constructor = RGTrackDryRun
    if len(music_dir) == 0:
        logger.error("You did not specify any music directories or files. Exiting.")
        sys.exit(1)
    music_directories = list(unique(map(fullpath, music_dir)))
    logger.info("Searching for music files in the following locations:\n%s", "\n".join(music_directories),)
    all_music_files = get_all_music_files(music_directories,
                                          ignore_hidden=(not include_hidden))
    if low_memory:
        tracks = map(track_constructor, all_music_files)
        track_sets = RGTrackSet.MakeTrackSets(tracks, gain_backend=gain_backend)
    else:
        tracks = map(track_constructor, tqdm(all_music_files, desc="Searching"))
        track_sets = list(RGTrackSet.MakeTrackSets(tracks, gain_backend=gain_backend))
        if len(track_sets) == 0:
            logger.error("Failed to find any tracks in the directories you specified. Exiting.")
            sys.exit(1)
        if (jobs > len(track_sets)):
            jobs = len(track_sets)

    logger.info("Beginning analysis")

    handler = TrackSetHandler(force=force_reanalyze, gain_type=gain_type, dry_run=dry_run, verbose=verbose)
    # Wrapper that runs the handler in a subprocess, allowing for
    # parallel operation
    def wrapped_handler(track_set: RGTrackSet) -> RGTrackSet:
        p = Process(target=handler, args=(track_set,))
        try:
            p.start()
            p.join()
            if p.exitcode != 0:
                logger.error("Subprocess exited with code %s for %s", p.exitcode, track_set.track_set_key_string())
        finally:
            if p.is_alive():
                logger.debug("Killing subprocess")
                p.terminate()
        return track_set

    pool = None
    try:
        if jobs <= 1:
            # Sequential
            handled_track_sets = map(handler, track_sets)
        else:
            # Parallel (Using process pool doesn't work, so instead we
            # use Process instance within each thread)
            pool = ThreadPool(jobs)
            handled_track_sets = pool.imap_unordered(wrapped_handler, track_sets)
        # Wait for completion
        iter_len = None if low_memory else len(cast(Sized, track_sets))
        for ts in tqdm(handled_track_sets, total=iter_len, desc="Analyzing"):
            pass
        logger.info("Analysis complete.")
    except KeyboardInterrupt:
        if pool is not None:
            logger.debug("Terminating process pool")
            pool.terminate()
            pool = None
        raise
    finally:
        if pool is not None:
            logger.debug("Closing transcode process pool")
            pool.close()
    if dry_run:
        logger.warn('This script ran in "dry run" mode, so no files were actually modified.')
    pass
Exemple #7
0
def main(force_reanalyze: bool = False,
         include_hidden: bool = False,
         dry_run: bool = False,
         gain_type: str = 'auto',
         backend: str = 'auto',
         jobs: int = default_job_count(),
         low_memory: bool = False,
         quiet: bool = False,
         verbose: bool = False,
         *music_dir: str):
    '''Add replaygain tags to your music files.'''

    try:
        from tqdm import tqdm
    except ImportError:
        # Fallback: No progress bars
        tqdm = tqdm_fake
    if quiet:
        logger.setLevel(logging.WARN)
        tqdm = tqdm_fake
    elif verbose:
        logger.setLevel(logging.DEBUG)
    else:
        logger.setLevel(logging.INFO)

    if backend == 'auto':
        for bname in known_backends:
            try:
                gain_backend = get_backend(bname)
                logger.info("Selected the %s backend to compute ReplayGain",
                            bname)
                break
            except BackendUnavailableException:
                pass
        else:
            raise BackendUnavailableException(
                "Could not find any usable backends.")
    else:
        gain_backend = get_backend(backend)
        logger.info("Using the %s backend to compute ReplayGain", backend)

    track_constructor = RGTrack
    if dry_run:
        logger.warn(
            'This script is running in "dry run" mode, so no files will actually be modified.'
        )
        track_constructor = RGTrackDryRun
    if len(music_dir) == 0:
        logger.error(
            "You did not specify any music directories or files. Exiting.")
        sys.exit(1)
    music_directories = list(unique(map(fullpath, music_dir)))
    logger.info(
        "Searching for music files in the following locations:\n%s",
        "\n".join(music_directories),
    )
    all_music_files = get_all_music_files(music_directories,
                                          ignore_hidden=(not include_hidden))
    if low_memory:
        tracks = map(track_constructor, all_music_files)
        track_sets = RGTrackSet.MakeTrackSets(tracks,
                                              gain_backend=gain_backend)
    else:
        tracks = map(track_constructor, tqdm(all_music_files,
                                             desc="Searching"))
        track_sets = list(
            RGTrackSet.MakeTrackSets(tracks, gain_backend=gain_backend))
        if len(track_sets) == 0:
            logger.error(
                "Failed to find any tracks in the directories you specified. Exiting."
            )
            sys.exit(1)
        if (jobs > len(track_sets)):
            jobs = len(track_sets)

    logger.info("Beginning analysis")

    handler = TrackSetHandler(force=force_reanalyze,
                              gain_type=gain_type,
                              dry_run=dry_run,
                              verbose=verbose)

    # Wrapper that runs the handler in a subprocess, allowing for
    # parallel operation
    def wrapped_handler(track_set: RGTrackSet) -> RGTrackSet:
        p = Process(target=handler, args=(
            track_set,
        ))  # type: ignore # https://github.com/python/mypy/issues/797
        try:
            p.start()
            p.join()
            if p.exitcode != 0:  # type: ignore
                logger.error("Subprocess exited with code %s for %s",
                             p.exitcode,
                             track_set.track_set_key_string())  # type: ignore
        finally:
            if p.is_alive():
                logger.debug("Killing subprocess")
                p.terminate()
        return track_set

    pool = None
    try:
        if jobs <= 1:
            # Sequential
            handled_track_sets = map(
                handler, track_sets
            )  # type: ignore # https://github.com/python/mypy/issues/797
        else:
            # Parallel (Using process pool doesn't work, so instead we
            # use Process instance within each thread)
            pool = ThreadPool(jobs)
            handled_track_sets = pool.imap_unordered(
                wrapped_handler, track_sets
            )  # type: ignore # https://github.com/python/typeshed/issues/683
        # Wait for completion
        iter_len = None if low_memory else len(cast(Sized, track_sets))
        for ts in tqdm(handled_track_sets, total=iter_len, desc="Analyzing"):
            pass
        logger.info("Analysis complete.")
    except KeyboardInterrupt:
        if pool is not None:
            logger.debug("Terminating process pool")
            pool.terminate()
            pool = None
        raise
    finally:
        if pool is not None:
            logger.debug("Closing transcode process pool")
            pool.close()
    if dry_run:
        logger.warn(
            'This script ran in "dry run" mode, so no files were actually modified.'
        )
    pass