Esempio n. 1
0
    def beats(self, trim: bool = False) -> EventList:
        """
        Gets beat events
        
        Parameters
        ----------
        trim
            Label weak leading and trailing beats separately

        Returns
        -------
        Detected beat events from the audio
        """
        untrimmed_beats = self._beats()
        untrimmed_beats = EventList([Beat(beat) for beat in untrimmed_beats], end=self.duration)

        if not trim:
            beats = untrimmed_beats
        else:
            trimmed_beats = self._beats(trim=True)
            trimmed_leading_beats = [beat for beat in untrimmed_beats.locations if beat < trimmed_beats[0]]
            trimmed_trailing_beats = [beat for beat in untrimmed_beats.locations if beat > trimmed_beats[-1]]

            # Mark leading & trailing trimmed beats as weak beats
            trimmed_beats = EventList([Beat(beat) for beat in trimmed_beats], end=self.duration)
            trimmed_leading_beats = EventList([WeakBeat(beat) for beat in trimmed_leading_beats], end=self.duration)
            trimmed_trailing_beats = EventList([WeakBeat(beat) for beat in trimmed_trailing_beats], end=self.duration)

            beats = trimmed_leading_beats + trimmed_beats + trimmed_trailing_beats

        return beats
Esempio n. 2
0
def test_event_group_list__group_by_types_resulting_groups(events):
    event_groups = events.group_by_type(['Silence'])
    assert event_groups.selected_groups == EventGroupList([EventList([Silence(6)]),
                                                          EventList([Silence(30)])])
    assert event_groups.unselected_groups == EventGroupList([EventList([Beat(12),
                                                                       Beat(18),
                                                                       Beat(24)])])

    # Not specifying type selects all groups
    event_groups = events.group_by_type()
    assert event_groups.selected_groups == event_groups
Esempio n. 3
0
    def generate_from_events(self,
                             events: Union[EventList, List[TIME_FORMAT]],
                             progress_bar: bool = True,
                             force_video_split: bool = False,
                             force_video_split_locs: list = []) -> MusicVideo:
        """
        Generates a MusicVideo from a list of events
        
        Parameters
        ----------
        events
            Events corresponding to cuts which occur in the music video.
            Either a list of events or event locations.

        progress_bar
            Whether to output progress information to stdout
        """
        if not isinstance(events, EventList):
            events = EventList(events, end=self.duration)

        # Get segment durations from cut locations
        segment_durations = events.segment_durations

        music_video_segments = self._generate_music_video_segments(
            segment_durations,
            progress_bar=progress_bar,
            force_video_split=force_video_split,
            force_video_split_locs=force_video_split_locs)

        # Assemble music video from music video segments and audio
        music_video = MusicVideo(music_video_segments,
                                 self.audio.file if self.audio else None)

        return music_video
Esempio n. 4
0
    def preview_events(self,
                       events: Union[EventList, List[TIME_FORMAT]],
                       output_path: Opt[str] = None,
                       mode: str = PreviewMode.VISUAL,
                       progress_bar: bool = True,
                       **kwargs):
        """
        Creates a new audio file with audible bleeps at event locations

        Parameters
        ----------
        events
            Events to mark in the audio file.

        output_path
            Path to save the output .wav or .mkv file

        mode
            Method of previewing. Visual by default.
            See :class:`~mugen.audio.Audio.PreviewMode` for supported values.

        progress_bar
            Whether to output progress information to stdout
        """
        if not isinstance(events, EventList):
            events = EventList(events, end=self.duration)

        if mode == PreviewMode.AUDIO:
            a_util.create_marked_audio_file(
                events.locations,
                output_path,
                audio_file=self.audio.file if self.audio else None,
                duration=self.duration)
        elif mode == PreviewMode.VISUAL:
            temp_marked_audio_file = a_util.create_marked_audio_file(
                events.locations,
                audio_file=self.audio.file if self.audio else None,
                duration=self.duration)

            composite_segments = []
            for index, duration in enumerate(events.segment_durations):
                # Alternate black & white
                color = 'black' if index % 2 == 0 else 'white'
                composite_segments.append(
                    ColorSegment(color, duration, size=(600, 300)))

            preview = MusicVideo(composite_segments, temp_marked_audio_file)
            preview.writer.preset = 'ultrafast'

            temp_output_path = preview.write_to_video_file(
                audio=True,
                add_auxiliary_tracks=False,
                progress_bar=progress_bar,
                **kwargs)
            self._add_preview_auxiliary_tracks(temp_output_path, events,
                                               output_path)

        return output_path
Esempio n. 5
0
    def onsets(self, backtrack: bool = False) -> EventList:
        """
        Gets onset events
        
        Parameters
        ----------
        backtrack
            Shift onset events back to the nearest local minimum of energy

        Returns
        -------
        Detected onset events from the audio
        """
        if not backtrack:
            onsets = self._onsets()
        else:
            onsets = self._onsets(backtrack=True)

        onsets = EventList([Onset(onset) for onset in onsets], end=self.duration)

        return onsets
Esempio n. 6
0
def prepare_events(generator: MusicVideoGenerator, args) -> EventList:
    audio = generator.audio

    audio_events_mode = args.audio_events_mode
    beats_mode = args.beats_mode
    onsets_mode = args.onsets_mode
    event_locations = args.event_locations
    events_offset = args.events_offset
    events_speed = args.events_speed
    events_speed_offset = args.events_speed_offset
    group_events_by_slices = args.group_events_by_slices
    group_events_by_type = args.group_events_by_type
    target_groups = args.target_groups
    group_speeds = args.group_speeds
    group_speed_offsets = args.group_speed_offsets

    if audio_events_mode:
        message("Analyzing audio...")

        if audio_events_mode == AudioEventsMode.BEATS:
            if beats_mode == BeatsMode.BEATS:
                events = audio.beats()
            elif beats_mode == BeatsMode.WEAK_BEATS:
                events = audio.beats(trim=True)
            else:
                raise ParameterError(f"Unsupported beats mode {beats_mode}.")
        elif audio_events_mode == AudioEventsMode.ONSETS:
            if onsets_mode == OnsetsMode.ONSETS:
                events = audio.onsets()
            elif onsets_mode == OnsetsMode.BACKTRACK:
                events = audio.onsets(backtrack=True)
            else:
                raise ParameterError(f"Unsupported onsets mode {onsets_mode}.")
        else:
            raise ParameterError(
                f"Unsupported audio events mode {audio_events_mode}.")

        if events_speed:
            events.speed_multiply(events_speed, events_speed_offset)

        if group_events_by_type is not None or group_events_by_slices:
            if group_events_by_type is not None:
                event_groups = events.group_by_type(
                    select_types=group_events_by_type)
            else:
                event_groups = events.group_by_slices(
                    slices=group_events_by_slices)

            if target_groups == TargetGroups.ALL:
                event_groups.speed_multiply(group_speeds, group_speed_offsets)
            elif target_groups == TargetGroups.SELECTED:
                event_groups.selected_groups.speed_multiply(
                    group_speeds, group_speed_offsets)
            elif target_groups == TargetGroups.UNSELECTED:
                event_groups.unselected_groups.speed_multiply(
                    group_speeds, group_speed_offsets)

            events = event_groups.flatten()
        else:
            event_groups = EventGroupList([events])

        message(f"Events:\n{event_groups}")

        if event_locations:
            events.add_events(event_locations)
    elif event_locations:
        events = EventList(event_locations, end=generator.duration)
    else:
        raise ParameterError(
            "Must provide either audio events mode or event locations.")

    if events_offset:
        events.offset(events_offset)

    return events
Esempio n. 7
0
 def cuts(self) -> EventList:
     durations = [segment.duration for segment in self.segments]
     locations = loc_util.locations_from_intervals(durations)
     return EventList([Cut(location) for location in locations[:-1]],
                      end=locations[-1])
Esempio n. 8
0
def test_event_list__initializes_non_uniform_inputs_successfully():
    EventList([1, 2, 3, Beat(4)])
Esempio n. 9
0
def events_speed_multiplied_1_3x() -> EventList:
    return EventList([Silence(6),
                      Beat(12),
                      Silence(30)])
Esempio n. 10
0
def events_speed_multiplied_1_2x_offset_1() -> EventList:
    return EventList([Silence(6),
                      Beat(18),
                      Silence(30)])
Esempio n. 11
0
def events_speed_multiplied_2x() -> EventList:
    return EventList([Silence(6),
                      Beat(12), Beat(15),
                      Beat(18), Beat(21),
                      Beat(24),
                      Silence(30)])
Esempio n. 12
0
def events_grouped_by_type() -> EventGroupList:
    return EventGroupList([EventList([Silence(6)]),
                           EventList([Beat(12), Beat(18), Beat(24)]),
                           EventList([Silence(30)])])
Esempio n. 13
0
def test_event_group_list__flatten():
    assert EventGroupList([EventList([1, 2, 3], end=50), EventList([4, 5, 6], end=50)]).flatten() == \
           EventList([1, 2, 3, 4, 5, 6], end=50)
Esempio n. 14
0
def events() -> EventList:
    return EventList([Silence(6),
                      Beat(12),
                      Beat(18),
                      Beat(24),
                      Silence(30)])
Esempio n. 15
0
def test_event_group_list__group_by_slices_resulting_groups():
    events = EventList([1, 2, 3, 4, 5, 6, 7, 8])
    event_groups = events.group_by_slices([(1, 3), (5, 7)])
    assert event_groups.selected_groups == EventGroupList([[2, 3], [6, 7]])
    assert event_groups.unselected_groups == EventGroupList([[1], [4, 5], [8]])
Esempio n. 16
0
                      Silence(30)])


@pytest.fixture
def events_speed_multiplied_1_3x() -> EventList:
    return EventList([Silence(6),
                      Beat(12),
                      Silence(30)])


def test_event_list__initializes_non_uniform_inputs_successfully():
    EventList([1, 2, 3, Beat(4)])


@pytest.mark.parametrize("events, speed, offset, expected_events", [
    (EventList([]), 5, None, EventList([])),
    (EventList([6]), 0, None, EventList([])),
    (EventList([6]), 3, None, EventList([6])),
    (EventList([6, 12]), 1 / 3, None, EventList([6])),
    (EventList([1, 2, 3, 4, 5, 6, 7, 8]), 1 / 2, None, EventList([1, 3, 5, 7])),
    (events(), 1, None, events()),
    (events(), 1 / 2, None, events_speed_multiplied_1_2x()),
    (events(), 1 / 2, 1, events_speed_multiplied_1_2x_offset_1()),
    (events(), 1 / 3, None, events_speed_multiplied_1_3x()),
    (events(), 2, None, events_speed_multiplied_2x())
])
def test_speed_multiply_events(events, speed, offset, expected_events):
    events.speed_multiply(speed, offset)
    assert (events == expected_events)