Пример #1
0
    def _convert_array2song(self, array):
        """ Create a new song from a numpy array
        A note will be created for each non empty case of the array. The song will contain a single track, and use the
        default beats_per_tick as midi resolution
        For now, the changes of tempo are ignored. Only 4/4 is supported.
        Warning: All note have the same duration, the default value defined in music.Note
        Args:
            np.array: the numpy array (Warning: could be a array of int or float containing the prediction before the sigmoid)
        Return:
            song (Song): The song to convert
        """

        new_song = music.Song()
        main_track = music.Track()

        scale = self._get_scale(new_song)

        for index, x in np.ndenumerate(array):  # Add some notes
            if x > 1e-12:  # Note added (TODO: What should be the condition, =1 ? sigmoid>0.5 ?)
                new_note = music.Note()

                new_note.set_relative_note(index[0])
                new_note.tick = index[1] * scale  # Absolute time in tick from the beginning

                main_track.notes.append(new_note)

        new_song.tracks.append(main_track)

        return new_song
Пример #2
0
    def get_batches_test_old(
        self
    ):  # TODO: This is the old version. Ideally should use the version above
        """ Return the batches which initiate the RNN when generating
        The initial batches are loaded from a json file containing the first notes of the song. The note values
        are the standard midi ones. Here is an examples of an initiator file:
        ```
        {"initiator":[
            {"name":"Simple_C4",
             "seq":[
                {"notes":[60]}
            ]},
            {"name":"some_chords",
             "seq":[
                {"notes":[60,64]}
                {"notes":[66,68,71]}
                {"notes":[60,64]}
            ]}
        ]}
        ```
        Return:
            List[Batch], List[str]: The generated batches with the associated names
        """
        assert self.args.batch_size == 1

        batches = []
        names = []

        with open(self.TEST_INIT_FILE) as init_file:
            initiators = json.load(init_file)

        for initiator in initiators['initiator']:
            raw_song = music.Song()
            main_track = music.Track()

            current_tick = 0
            for seq in initiator['seq']:  # We add a few notes
                for note_pitch in seq['notes']:
                    new_note = music.Note()
                    new_note.note = note_pitch
                    new_note.tick = current_tick
                    main_track.notes.append(new_note)
                current_tick += 1

            raw_song.tracks.append(main_track)
            raw_song.normalize(inverse=True)

            batch = self.batch_builder.process_batch(raw_song)

            names.append(initiator['name'])
            batches.append(batch)

        return batches, names
    def reconstruct_song(self, rel_song):
        """ Reconstruct the original raw song from the preprocessed data
        See parent class for details

        Some information will be lost compare to the original song:
            * Only one track left
            * Original tempo lost
        Args:
            rel_song (RelativeSong): the song to reconstruct
        Return:
            Song: the reconstructed song
        """
        raw_song = music.Song()
        main_track = music.Track()

        prev_note = rel_song.first_note
        main_track.notes.append(rel_song.first_note)
        current_tick = rel_song.first_note.tick
        for next_note in rel_song.notes:
            # Case of separator
            if next_note.pitch_class is None:
                current_tick += 1
                continue

            # Adding the new note
            new_note = music.Note()
            # * Note
            if Relative.NOTE_ABSOLUTE:
                new_note.note = Relative.BASELINE_OFFSET + next_note.pitch_class
            else:
                new_note.note = Relative.BASELINE_OFFSET + (
                    (prev_note.note - Relative.BASELINE_OFFSET) +
                    next_note.pitch_class) % Relative.NB_NOTES_SCALE
            # * Tick
            if Relative.HAS_EMPTY:
                new_note.tick = current_tick
            else:
                new_note.tick = prev_note.tick + next_note.prev_tick
            # * Scale
            # ...
            main_track.notes.append(new_note)
            prev_note = new_note

        raw_song.tracks.append(main_track)
        raw_song.normalize(inverse=True)
        return raw_song
Пример #4
0
    def load_file(filename):
        """ Extract data from midi file
        Args:
            filename (str): a valid midi file
        Return:
            Song: a song object containing the tracks and melody
        """
        # Load in the MIDI data using the midi module
        midi_data = mido.MidiFile(filename)

        # Get header values

        # 3 midi types:
        # * type 0 (single track): all messages are saved in one multi-channel track
        # * type 1 (synchronous): all tracks start at the same time
        # * type 2 (asynchronous): each track is independent of the others

        # Division (ticks per beat notes or SMTPE timecode)
        # If negative (first byte=1), the mode is SMTPE timecode (unsupported)
        # 1 MIDI clock = 1 beat = 1 quarter note

        # Assert
        if midi_data.type != 1:
            raise MidiInvalidException('Only type 1 supported ({} given)'.format(midi_data.type))
        if not 0 < midi_data.ticks_per_beat < 128:
            raise MidiInvalidException('SMTPE timecode not supported ({} given)'.format(midi_data.ticks_per_beat))

        # TODO: Support at least for type 0

        # Get tracks messages

        # The tracks are a mix of meta messages, which determine the tempo and signature, and note messages, which
        # correspond to the melodie.
        # Generally, the meta event are set at the beginning of each tracks. In format 1, these meta-events should be
        # contained in the first track (known as 'Tempo Map').

        # If not set, default parameters are:
        #  * time signature: 4/4
        #  * tempo: 120 beats per minute

        # Each event contain begins by a delta time value, which correspond to the number of ticks from the previous
        # event (0 for simultaneous event)

        tempo_map = midi_data.tracks[0]  # Will contains the tick scales
        # TODO: smpte_offset

        # Warning: The drums are filtered

        # Merge tracks ? < Not when creating the dataset
        #midi_data.tracks = [mido.merge_tracks(midi_data.tracks)] ??

        new_song = music.Song()

        new_song.ticks_per_beat = midi_data.ticks_per_beat

        # TODO: Normalize the ticks per beats (same for all songs)

        for message in tempo_map:
            # TODO: Check we are only 4/4 (and there is no tempo changes ?)
            if not isinstance(message, mido.MetaMessage):
                raise MidiInvalidException('Tempo map should not contains notes')
            if message.type in MidiConnector.META_INFO_TYPES:
                pass
            elif message.type == 'set_tempo':
                new_song.tempo_map.append(message)
            elif message.type in MidiConnector.META_TEMPO_TYPES:  # We ignore the key signature and time_signature ?
                pass
            elif message.type == 'smpte_offset':
                pass  # TODO
            else:
                err_msg = 'Header track contains unsupported meta-message type ({})'.format(message.type)
                raise MidiInvalidException(err_msg)

        for i, track in enumerate(midi_data.tracks[1:]):  # We ignore the tempo map
            i += 1  # Warning: We have skipped the track 0 so shift the track id
            #tqdm.write('Track {}: {}'.format(i, track.name))

            new_track = music.Track()

            buffer_notes = []  # Store the current notes (pressed but not released)
            abs_tick = 0  # Absolute nb of ticks from the beginning of the track
            for message in track:
                abs_tick += message.time
                if isinstance(message, mido.MetaMessage):  # Lyrics, track name and other meta info
                    if message.type in MidiConnector.META_INFO_TYPES:
                        pass
                    elif message.type in MidiConnector.META_TEMPO_TYPES:
                        # TODO: Could be just a warning
                        raise MidiInvalidException('Track {} should not contain {}'.format(i, message.type))
                    else:
                        err_msg = 'Track {} contains unsupported meta-message type ({})'.format(i, message.type)
                        raise MidiInvalidException(err_msg)
                    # What about 'sequence_number', cue_marker ???
                else:  # Note event
                    if message.type == 'note_on' and message.velocity != 0:  # Note added
                        new_note = music.Note()
                        new_note.tick = abs_tick
                        new_note.note = message.note
                        if message.channel+1 != i and message.channel+1 != MidiConnector.MIDI_CHANNEL_DRUMS:  # Warning: Mido shift the channels (start at 0) # TODO: Channel management for type 0
                            raise MidiInvalidException('Notes belong to the wrong tracks ({} instead of {})'.format(i, message.channel))  # Warning: May not be an error (drums ?) but probably
                        buffer_notes.append(new_note)
                    elif message.type == 'note_off' or message.type == 'note_on':  # Note released
                        for note in buffer_notes:
                            if note.note == message.note:
                                note.duration = abs_tick - note.tick
                                buffer_notes.remove(note)
                                new_track.notes.append(note)
                    elif message.type == 'program_change':  # Instrument change
                        if not new_track.set_instrument(message):
                            # TODO: We should create another track with the new instrument
                            raise MidiInvalidException('Track {} as already a program defined'.format(i))
                        pass
                    elif message.type == 'control_change':  # Damper pedal, mono/poly, channel volume,...
                        # Ignored
                        pass
                    elif message.type == 'aftertouch':  # Signal send after a key has been press. What real effect ?
                        # Ignored ?
                        pass
                    elif message.type == 'pitchwheel':  # Modulate the song
                        # Ignored
                        pass
                    else:
                        err_msg = 'Track {} contains unsupported message type ({})'.format(i, message)
                        raise MidiInvalidException(err_msg)
                # Message read
            # Track read

            # Assert
            if buffer_notes:  # All notes should have ended
                raise MidiInvalidException('Some notes ({}) did not ended'.format(len(buffer_notes)))
            if len(new_track.notes) < MidiConnector.MINIMUM_TRACK_LENGTH:
                #tqdm.write('Track {} ignored (too short): {} notes'.format(i, len(new_track.notes)))
                continue
            if new_track.is_drum:
                #tqdm.write('Track {} ignored (is drum)'.format(i))
                continue

            new_song.tracks.append(new_track)
        # All track read

        if not new_song.tracks:
            raise MidiInvalidException('Empty song. No track added')

        return new_song