Beispiel #1
0
def time_to_seconds(time: TIME_FORMAT) -> float:
    """ 
    Convert any time into seconds.
    """

    if isinstance(time, str):
        expr = r"(?:(?:(\d+):)?(?:(\d+):))?(\d+)?(?:[,|.](\d+))?"
        finds = re.findall(expr, time)[0]
        finds = [find if find else '0' for find in finds]

        seconds = (3600 * int(finds[0]) + 60 * int(finds[1]) + int(finds[2]) +
                   int(finds[3]) / (10**len(finds[3])))
    elif isinstance(time, tuple):
        if len(time) == 3:
            hr, mn, sec = time
        elif len(time) == 2:
            hr, mn, sec = 0, time[0], time[1]
        else:
            raise ParameterError(
                f"Unsupported number of elements in tuple {time}")
        seconds = (3600 * hr) + (60 * mn) + sec
    else:
        seconds = time

    return seconds
Beispiel #2
0
def prepare_args(args):
    """
    Formats and validates program inputs
    """
    if getattr_none(args, 'duration') is not None and getattr_none(args, 'event_locations') is None:
        raise ParameterError("--duration option requires --event-locations.")

    if getattr_none(args, 'video_dimensions') is not None:
        args.video_dimensions = tuple(args.video_dimensions)

    return args
Beispiel #3
0
def prepare_args(args):
    """
    Formats and validates program inputs
    """
    # sources = [getattrNone(args, 'audio_source'), getattrNone(args, 'video_sources'), getattrNone(args, 'spec_src')]
    # sources = [src for src in sources if src is not None]
    # cli_util.validate_path(*[sources])

    if getattr_none(args, 'duration') is not None and getattr_none(
            args, 'event_locations') is None:
        raise ParameterError("Duration option requires event locations.")

    if getattr_none(args, 'video_dimensions') is not None:
        args.video_dimensions = tuple(args.video_dimensions)

    if getattr_none(args, 'event_locations') is None:
        args.audio_events_mode = AudioEventsMode.BEATS

    return args
Beispiel #4
0
def create_marked_audio_file(mark_locations: Union[List[float], np.ndarray],
                             output_path: Opt[str] = None,
                             *,
                             audio_file: Opt[str] = None,
                             duration: float = None):
    if audio_file:
        y, sr = librosa.load(audio_file)
        marked_audio = librosa.core.clicks(times=mark_locations,
                                           sr=sr,
                                           length=len(y))
        marked_audio = y + marked_audio
    elif duration:
        sr = 22050
        marked_audio = librosa.core.clicks(times=mark_locations,
                                           sr=sr,
                                           length=int(sr * duration))
    else:
        raise ParameterError("Must provide either audio file or duration.")

    soundfile.write(output_path, marked_audio, sr, 'PCM_24')

    return output_path
Beispiel #5
0
def fill_slices(slices: List[slice], length) -> List[slice]:
    """
    Completes the list of slices for a list, given a list of slices and the list's length.
    """
    all_slices = []

    # Sort by start element
    slices_sorted = sorted(slices, key=operator.attrgetter('start'))

    # If any ranges overlap, throw an error
    for index, sl in enumerate(slices_sorted):
        if index == len(slices_sorted) - 1:
            continue

        next_sl = slices_sorted[index + 1]
        if ranges_overlap(sl.start, sl.stop, next_sl.start, next_sl.stop):
            raise ParameterError(f"Slice ranges may not overlap. "
                                 f"Found overlapping slices {sl}, {next_sl}.")

    for index, sl in enumerate(slices_sorted):
        if index == 0:
            if 0 < sl.start:
                first_sl = slice(0, sl.start)
                all_slices.insert(0, first_sl)

        all_slices.append(sl)

        if index == len(slices_sorted) - 1:
            if sl.stop < length:
                last_sl = slice(sl.stop, length)
                all_slices.append(last_sl)
            continue

        next_sl = slices_sorted[index + 1]
        if sl.stop < next_sl.start:
            new_sl = slice(sl.stop, next_sl.start)
            all_slices.append(new_sl)

    return all_slices
Beispiel #6
0
def prepare_events(generator: MusicVideoGenerator, args) -> EventList:
    audio = generator.audio

    audio_events_mode = args.audio_events_mode
    beats_mode = args.beats_mode
    onsets_mode = args.onsets_mode
    event_locations = args.event_locations
    events_offset = args.events_offset
    events_speed = args.events_speed
    events_speed_offset = args.events_speed_offset
    group_events_by_slices = args.group_events_by_slices
    group_events_by_type = args.group_events_by_type
    target_groups = args.target_groups
    group_speeds = args.group_speeds
    group_speed_offsets = args.group_speed_offsets

    if audio_events_mode:
        message("Analyzing audio...")

        if audio_events_mode == AudioEventsMode.BEATS:
            if beats_mode == BeatsMode.BEATS:
                events = audio.beats()
            elif beats_mode == BeatsMode.WEAK_BEATS:
                events = audio.beats(trim=True)
            else:
                raise ParameterError(f"Unsupported beats mode {beats_mode}.")
        elif audio_events_mode == AudioEventsMode.ONSETS:
            if onsets_mode == OnsetsMode.ONSETS:
                events = audio.onsets()
            elif onsets_mode == OnsetsMode.BACKTRACK:
                events = audio.onsets(backtrack=True)
            else:
                raise ParameterError(f"Unsupported onsets mode {onsets_mode}.")
        else:
            raise ParameterError(
                f"Unsupported audio events mode {audio_events_mode}.")

        if events_speed:
            events.speed_multiply(events_speed, events_speed_offset)

        if group_events_by_type is not None or group_events_by_slices:
            if group_events_by_type is not None:
                event_groups = events.group_by_type(
                    select_types=group_events_by_type)
            else:
                event_groups = events.group_by_slices(
                    slices=group_events_by_slices)

            if target_groups == TargetGroups.ALL:
                event_groups.speed_multiply(group_speeds, group_speed_offsets)
            elif target_groups == TargetGroups.SELECTED:
                event_groups.selected_groups.speed_multiply(
                    group_speeds, group_speed_offsets)
            elif target_groups == TargetGroups.UNSELECTED:
                event_groups.unselected_groups.speed_multiply(
                    group_speeds, group_speed_offsets)

            events = event_groups.flatten()
        else:
            event_groups = EventGroupList([events])

        message(f"Events:\n{event_groups}")

        if event_locations:
            events.add_events(event_locations)
    elif event_locations:
        events = EventList(event_locations, end=generator.duration)
    else:
        raise ParameterError(
            "Must provide either audio events mode or event locations.")

    if events_offset:
        events.offset(events_offset)

    return events
Beispiel #7
0
    def __init__(self,
                 audio_file: Opt[str] = None,
                 video_sources: Opt[Union[VideoSourceList,
                                          List[Union[VideoSource, str,
                                                     List[Any]]]]] = None,
                 *,
                 duration: TIME_FORMAT = None,
                 video_filters: Opt[List[str]] = None,
                 exclude_video_filters: Opt[List[str]] = None,
                 include_video_filters: Opt[List[str]] = None,
                 custom_video_filters: Opt[List[Filter]] = None):
        """
        Parameters
        ----------
        audio_file 
            audio file to use for the music video

        video_sources
            Source videos to use for the music video.
            Accepts arbitrarily nested video files, directories, VideoSources, and VideoSourceLists.

        video_filters ~
            Video filters that each segment in the music video must pass.
            See :class:`~mugen.video.video_filters.VideoFilter` for a list of supported values.
            Defaults to :data:`~mugen.video.video_filters.VIDEO_FILTERS_DEFAULT`
        
        exclude_video_filters 
            Video filters to exclude from default video_filters. 
            Takes precedence over video_filters
            
        include_video_filters 
            Video filters to use in addition to default video_filters. 
            Takes precedence over exclude_video_filters
            
        custom_video_filters ~
            Custom video filters to use in addition to video_filters.
            Allows functions wrapped by :class:`~mugen.mixins.Filterable.Filter` or 
            :class:`~mugen.mixins.Filterable.ContextFilter`
        """
        if not audio_file and not duration:
            raise ParameterError(
                "Must provide either the audio file or duration for the music video."
            )

        self.audio = Audio(audio_file) if audio_file else None
        self._duration = duration

        if video_sources:
            self.video_sources = VideoSourceList(video_sources)

        # Assemble list of video filter names
        video_filter_names = video_filters if video_filters is not None else vf.VIDEO_FILTERS_DEFAULT
        if exclude_video_filters:
            for video_filter in exclude_video_filters:
                video_filter_names.remove(video_filter)
        if include_video_filters:
            video_filter_names.extend(include_video_filters)
        custom_video_filters = custom_video_filters or []

        # Compile video filters
        self.video_filters = []
        for filter_name in video_filter_names:
            try:
                self.video_filters.append(vf.VideoFilter[filter_name].value)
            except KeyError as e:
                raise MugenError(
                    f"Unknown video filter '{filter_name}'") from e
        self.video_filters.extend(custom_video_filters)

        self.meta = {self.Meta.REJECTED_SEGMENT_STATS: []}