Ejemplo n.º 1
0
 def _get_master_track(self):
     try:
         return [
             msg for msg in merge_tracks(self.midi.tracks) if
             hasattr(msg, 'type') and msg.type in ['note_on', 'note_off']
         ]
     except TypeError:
         # Exclude the first track
         return [
             msg for msg in merge_tracks(self.midi.tracks[1:]) if
             hasattr(msg, 'type') and msg.type in ['note_on', 'note_off']
         ]
Ejemplo n.º 2
0
    def ai_text_from_file(self, midi_path):
        """
        Given a midi file, parse out the midi to a string we can store for
        later use, and later train the ai on
        """
        mid = mido.MidiFile(midi_path)

        # TODO should we grab the time signature?
        # Or just have everything the sane tempo?
        # Or a meta character for time signature?

        ai_text = ''
        notes_on = []
        note_events = [
            msg for msg in mido.merge_tracks(mid.tracks)
            if msg.type == 'note_on'
        ]
        for note_event in note_events:
            # Add the notes that are currently being played, for the time
            # they are currently played
            chord_chars = [self.char_keyboard[note]
                           for note in notes_on] + [' ']
            chord_chars *= note_event.time

            ai_text += ''.join(chord_chars)

            # TODO can we add a few levels of velocity?
            if note_event.velocity > 0:
                notes_on.append(note_event.note)
            else:
                notes_on.remove(note_event.note)

        assert notes_on == []

        return ai_text
Ejemplo n.º 3
0
    def midi_to_data(self, midi: mido.MidiFile) -> (np.array, list):
        ticks_per_beat = midi.ticks_per_beat
        simple_seq = [[-1, -1, -1]]
        vels = [0]
        offset = 0

        for i, msg in enumerate(mido.merge_tracks(midi.tracks)):
            if msg.type[:4] == "note":
                note = msg.note
                vel = msg.velocity
                time = msg.time / ticks_per_beat + offset

                if vel != 0:
                    time = round(time, 6)
                    simple_seq.append([note, time, 0])
                    vels.append(vel)
                    offset = 0

                else:
                    offset = time
                    ind = len(simple_seq) - 1
                    length = time
                    # Loop through end of list until all note with current node value's length is set
                    change_ind = None
                    last_length = None

                    def update_length(last_length):
                        length = last_length
                        length = round(length, 5)
                        simple_seq[change_ind][2] = length

                    while ind >= 0:
                        if simple_seq[ind][0] == note:
                            if simple_seq[ind][2] == 0:
                                change_ind = ind
                                last_length = length
                            elif change_ind is not None:
                                update_length(last_length)
                                break

                        elif ind == 0 and change_ind is not None:
                            update_length(last_length)
                            break

                        time = simple_seq[ind][1]
                        length += time
                        ind -= 1

        matrix_seq = [[-1] * 128]
        for i, event in enumerate(simple_seq):
            note, time, length = event
            next_matrix = matrix_seq[-1][:]
            next_matrix = [
                x - time if x - time > 0 else 0 for x in next_matrix
            ]
            next_matrix[note] = max(next_matrix[note], length)
            if i > 0:
                matrix_seq.append(next_matrix)

        return [np.array(matrix_seq), np.array(vels)]
Ejemplo n.º 4
0
def load_midi(filename):

    # Load using mido and convert to representation
    midi = mido.MidiFile(filename)
    events = rep_from_midi(midi.ticks_per_beat, mido.merge_tracks(midi.tracks))

    return events
Ejemplo n.º 5
0
def to_point_set(path, chosen_tracks):
	"""Produce a point set representation of MIDI file in given path.

	Args:
		path: path to the MIDI file
		chosen_tracks: list of track numbers that the user wants to
		be included in the point set

	Returns:
		Point set representation of MIDI file in given path. The point
		set is a list of notes, and notes are tuples of note onset and
		MIDI pitch number
	"""
	point_set = set()
	mid = mido.MidiFile(path)
	mid = _filter_tracks(mid, chosen_tracks)
	mid = mido.merge_tracks(mid.tracks)

	note_onset = 0
	tempo = 1
	for msg in mid:
		if msg.type == 'set_tempo':
			tempo = msg.tempo
		note_onset += msg.time * tempo
		if msg.type == 'note_on' and msg.velocity != 0:
			point_set.add((note_onset, msg.note))

	point_set = sorted(point_set)
	point_set = _limit_point_set_length(point_set, 150)

	return point_set
Ejemplo n.º 6
0
def convert_midi_to_grid(m, ticks_per_step):  # m is a MidiFile object
    # merge all tracks into one
    trklist = []
    for trk in m.tracks:
        trklist.append(trk)
    trk = merge_tracks(trklist)

    raw_notes = [
        [0] * 128
    ]  # 2D array of (128 pitches, time in ticks), entries are 1 for note on, 0 for note off
    timer = 0  # measured in steps, continues through the entire MIDI file
    no_offs = True
    # iterate through all midi messages in file and create grid of raw music
    for msg in trk:
        delta_t = msg.time  # in ticks
        delta_steps = (timer +
                       delta_t) // ticks_per_step - timer // ticks_per_step
        if delta_steps > 0:  # fill in array until next MIDI event then advance timer
            raw_notes += [raw_notes[timer // ticks_per_step][:]
                          ] * (delta_steps - 1)
            raw_notes += [raw_notes[timer // ticks_per_step][:]]

        timer += delta_t

        # NB fast notes will be clustered!
        if msg.type == 'note_on':
            raw_notes[timer // ticks_per_step][msg.note] = 1
        elif msg.type == 'note_off':
            raw_notes[timer // ticks_per_step][msg.note] = 0
            no_offs = False

    return np.asarray(raw_notes), no_offs
Ejemplo n.º 7
0
    def parse_midi_file(self, midi_file: MidiFile) -> None:
        """
        Метод инициализации обертки из файла. Собирает все данные с файла и сливает их в один трек.
        Ударные не исключаются! После слияния разбивает трек на "минимальные заданные доли" и
        собирает набор звучащих нот
        Args:
            midi_file (MidiFile): Произвольный MIDI-файл
        Returns:
            None
        """
        merged_track = mido.merge_tracks(midi_file.tracks)
        ticks_per_division = int(midi_file.ticks_per_beat / self.division *
                                 self.numerator)
        global_time = 0

        sample = []
        current = [0 for i in range(127)]

        for message in merged_track:
            if message.is_meta and message.type == 'time_signature':
                self.numerator = message.numerator
                self.denominator = message.denominator

            if message.time > 0:
                for i in range(int(message.time / ticks_per_division)):
                    sample.append(list(current))

            if message.type == 'note_on':
                current[message.note] = 1
            if message.type == 'note_off':
                current[message.note] = 0
            global_time += message.time
        self.divisions = sample
def MergeMidiList(input):
    NewMidi = MidiFile()
    NewTrack = MidiTrack()
    for i in range(len(input)):
        NewTrack = merge_tracks([NewTrack, input[i]])
    NewMidi.tracks.append(NewTrack)
    return NewMidi
Ejemplo n.º 9
0
def getMidiNotes(mid):
    """Converts midi message list into a list of chords (represented as arrays of notes.
    Channel 0 is represented in notes 0 to 200, channel 1 in 200 to 400 and so on"""
    track=mido.merge_tracks(mid.tracks)
    notes=[]
    result=[]
    for msg in track:
        tm=msg.dict()['time']
        try:
            current_note=(msg.dict()['note']+200*msg.dict()['channel'])
            current_velocity=msg.dict()['velocity']
            if(current_velocity==0):
                if(current_note in notes):
                    del notes[notes.index(current_note)]
                else:
                    pass
            else:
                if(current_note in notes):
                    pass
                else:
                    notes.append(current_note)
            if(tm!=0):
                if(notes==[]):
                    n=[]
                else:
                    n=list(notes)
                elm=([n,tm])
                result.append(elm)
        except:
            result.append(msg)
    return result
Ejemplo n.º 10
0
    def midifile_to_dict(mid):
        '''Pull out midi messages and specifically get note_on and note_off messages'''
        notes = []
        other_messages = []
        time_so_far = 0
        for msg in mido.merge_tracks(mid.tracks):
            msg_dict = vars(msg).copy()
            time_so_far += msg_dict["time"]

            print(time_so_far)
            if (msg_dict["type"] == "note_on"
                    or msg_dict["type"] == "note_off"):
                note = {
                    "channel": msg_dict["channel"],
                    "type": msg_dict["type"],
                    "note": msg_dict["note"],
                    "velocity": msg_dict["velocity"],
                    "time": msg_dict["time"],
                    "time_so_far": time_so_far
                }
                notes.append(note)
            else:
                other_messages.append({
                    "time_so_far": time_so_far,
                    "message": msg_dict
                })
        return notes, other_messages
Ejemplo n.º 11
0
def vectorize_midi(midi, frame_dur=16, drum_mode=False, merge_tracks=False):
    if merge_tracks:
        return vectorize_track(mido.merge_tracks(midi.tracks),
                               midi.ticks_per_beat, frame_dur, drum_mode)
    else:
        vectorized_tracks = [
            vectorize_track(track, midi.ticks_per_beat, frame_dur, drum_mode)
            for track in midi.tracks
        ]
        return [track for track in vectorized_tracks if track is not None]
Ejemplo n.º 12
0
    def midi_to_data(self, mid, vocabs) -> (np.array, list):
        if not vocabs:
            vocabs = [["-1"]]
        vocab = vocabs[0]
        sequence = [[vocab.index("-1")] * 128]
        ticks_per_beat = mid.ticks_per_beat
        mult = self.ticks_per_beat / ticks_per_beat

        start = False
        step_matrix = ["0"] * 128
        playing = [0] * 128
        prev_note = None
        for i, msg in enumerate(mido.merge_tracks(mid.tracks)):
            if msg.type[:4] == "note":
                note = msg.note
                vel = msg.velocity
                time = round(msg.time * mult)

                if time > 0 and start:
                    add = []
                    for n, xs in enumerate(step_matrix[:]):
                        if xs not in vocab:
                            vocab.append(xs)
                        add.append(vocab.index(xs))
                    sequence.append(add[:])

                    for j, value in enumerate(add):
                        if vocab[value][-1] == "2":
                            if "1" not in vocab: vocab.append("1")
                            add[j] = vocab.index("1")
                        else:
                            add[j] = value
                    sequence += [add[:]] * (time - 1)

                if vel != 0:
                    if time == 0 and prev_note == note:
                        step_matrix[note] += "2"
                    else:
                        step_matrix[note] = "2"
                    playing[note] += 1
                else:
                    if playing[note] > 0:
                        playing[note] -= 1

                    if time == 0 and prev_note == note:
                        step_matrix[note] += "3"
                    else:
                        step_matrix[note] = "3"

                if not start: start = True
                prev_note = note

        sequence.append([vocab.index(x) for x in step_matrix[:]])

        return np.array(sequence), vocabs
Ejemplo n.º 13
0
 def __init__(self, filename, level=1):
     midi = MidiFile(filename)
     track = merge_tracks(midi.tracks)
     self.metamessages = []
     byte_list = []
     for message in track:
         if not isinstance(message, MetaMessage):
             byte_list.extend(message.bytes())
         else:
             self.metamessages.append(message)
     super().__init__(byte_list, level)
Ejemplo n.º 14
0
    def action_deleteTrack(self):
        print("Delete Track")

        self.deleteWindow = DW.DeleteWindow(self,self.audioTrackNames,self.midiTrackNames,self.midiTracks, self.audioTracks)
        self.deleteWindow.exec_()
        updatedAudioList = self.deleteWindow.getAudioNames()
        updatedMidiList = self.deleteWindow.getMidiNames()
        self.midTrack = mido.MidiFile(type=1)
        mergeTrack = mido.merge_tracks(self.midiTracks)
        self.midTrack.tracks.append(mergeTrack)
        self.checkNumberOfTracks()
Ejemplo n.º 15
0
    def parse(self, path):
        midi = mido.MidiFile(path)

        self.update_timing(midi)

        # note_on velocity=0, and note_off are equivalent

        last_velocity = 64

        events = []

        total_time = 0

        tracks = mido.merge_tracks(midi.tracks[1:])
        for msg in tracks:
            if msg.is_meta or not hasattr(msg, 'note'):
                continue
            if msg.type == 'note_on':
                note = msg.note
                velocity = msg.velocity
                time = msg.time

                self._add_time(time, events)

                if velocity != last_velocity and velocity != 0:
                    events.append(
                        (MidiParse.velocity, velocity // self.velocity_div))
                    last_velocity = velocity

                if velocity == 0:
                    events.append((MidiParse.note_off, note))
                else:
                    events.append((MidiParse.note_on, note))

            if msg.type == 'note_off':
                note = msg.note
                time = msg.time

                self._add_time(time, events)

                events.append((MidiParse.note_off, note))

            total_time += time

        if total_time / self.ticks_per_second < 30:
            return

        out = np.argmax(
            np.array(list(map(lambda x: self._parse_vec(x), events))),
            1).astype(np.uint16)
        basename, _ = os.path.splitext(os.path.basename(path))
        out_path = os.path.join(self.data_dir, basename)
        np.save(out_path, out)
Ejemplo n.º 16
0
    def __init__(self, path: str):
        self.path = self.absolute_path(path)
        super().__init__(self.path, clip=True)

        self.messages = None
        self.time_marks = None
        self.tempo = DEFAULT_TEMPO

        # merge tracks for simple playback if the file is not asynchronous
        if self.type != 2:
            self.messages, self.time_marks = self._calculate_time(
                merge_tracks(self.tracks))
Ejemplo n.º 17
0
    def to_midi_tracks(self, max_length=1024):

        ticks = 100
        beat_tracks = [
            GateToMidi(MaxLength(Repeat(beat), max_length),
                       note=note,
                       ticks=ticks).to_midi_track(channel=GmDrum.Channel)
            for note, beat in self.beats.items()
        ]

        beat_track = mido.merge_tracks(beat_tracks)

        return [beat_track]
Ejemplo n.º 18
0
def load_midi(fname):
    cache_path = os.path.join(CACHE_DIR, fname + '.npy')
    try:
        seq = np.load(cache_path)
    except Exception as e:
        # Load
        mid = mido.MidiFile(fname)
        track = mido.merge_tracks(mid.tracks)
        seq = midi_to_seq(mid, track)

        # Perform caching
        os.makedirs(os.path.dirname(cache_path), exist_ok=True)
        np.save(cache_path, seq)
    return seq
Ejemplo n.º 19
0
def midi_to_p_roll(mid, Nyquist_rate, sample_duration, pitch_range):
    '''
    Description:
    Converts a MIDI file into a piano roll of required time length and pitch range.
    
    **Algorithm was inspired by the method adopted by Jain et al. (http://cs229.stanford.edu/proj2019aut/data/assignment_308832_raw/26583519.pdf)
    '''

    piano_size = pitch_range[1] - pitch_range[0]
    p_roll = np.zeros([piano_size + 1, Nyquist_rate * sample_duration])
    track = mido.merge_tracks(mid.tracks)

    current_time = 0
    current_position = 0
    on_notes = np.zeros(piano_size + 1)
    tempo = 0

    for msg in track:

        if msg.time > 0:
            delta = mido.tick2second(msg.time, mid.ticks_per_beat, tempo)
        else:
            delta = 0
        if hasattr(msg, "note"):
            if msg.type == "note_on":
                if pitch_range[0] <= msg.note <= pitch_range[1]:
                    on_notes[msg.note - pitch_range[0]] = msg.velocity
            else:
                if pitch_range[0] <= msg.note <= pitch_range[1]:
                    on_notes[msg.note - pitch_range[0]] = 0
        last_time = current_time
        current_time += delta

        if current_time > sample_duration:
            break

        new_position = np.floor(current_time * Nyquist_rate).astype(int)

        if new_position > current_position:
            new_position = np.floor(current_time * Nyquist_rate).astype(int)
            block = np.tile(on_notes.reshape(piano_size + 1, 1),
                            new_position - current_position)
            p_roll[:, current_position:new_position] = block
            current_position = new_position

        if hasattr(msg, "tempo"):
            tempo = msg.tempo

    return p_roll
Ejemplo n.º 20
0
    def midi_file_to_numpy(self, midifile: mido.MidiFile) -> np.ndarray:
        """
        Used to convert a single midifile.
        Transforms the given `midifile` into nparray songs. Returns the songs.
        """
        messages = mido.merge_tracks(midifile.tracks)
        ppq = midifile.ticks_per_beat
        time_signatures = self.get_time_signatures(midifile)

        songs = []
        song = np.zeros((NUM_MEASURES, NUM_TIMES, NUM_NOTES))
        time = 0
        cur_time_signature = time_signatures.pop(0)
        cur_measure = 0
        time_of_prev_measure = 0
        measure_duration = cur_time_signature.numerator * ppq
        for note in messages:
            time += note.time
            # Update time signature
            if len(time_signatures) and time > time_signatures[0].time:
                cur_time_signature = time_signatures.pop(0)

            # Update current measure
            if time >= time_of_prev_measure + measure_duration:
                time_since_prev = time - time_of_prev_measure
                cur_measure += time_since_prev // measure_duration
                # Update current song
                if cur_measure >= NUM_MEASURES:
                    cur_measure %= NUM_MEASURES
                    songs.append(song)
                    song = np.zeros((NUM_MEASURES, NUM_TIMES, NUM_NOTES),
                                    dtype=bool)
                time_of_prev_measure += measure_duration
                measure_duration = cur_time_signature.numerator * ppq

            # Update note
            if note.type == "note_on" and note.velocity != 0:
                tensor_time = self.midi_to_tensor_time(
                    time, ppq, cur_time_signature.numerator)
                while note.note >= 96:
                    note.note -= 12  # Transpose down an octave
                song[cur_measure, tensor_time, note.note] = True

        # There are notes in the last measure
        if (song[-1] == 1).any():
            songs.append(song)

        return np.asarray(songs, dtype=bool)
Ejemplo n.º 21
0
    def parse_midi_file(path: str, division: int) -> ParsedMidi:
        midi = MidiFile(path)
        numerator = 4
        denominator = 4

        for track in midi.tracks:
            for message in track:
                if message.is_meta and message.type == 'time_signature':
                    numerator = message.numerator
                    denominator = message.denominator

        ticks_per_division = int(midi.ticks_per_beat /
                                 (division * numerator / denominator))
        global_time = 0

        result_interpretation = []
        merged = merge_tracks(midi.tracks)

        for track in [merged]:
            if len(track) < 128:
                continue

            print(str(track))
            track_interpretation = []
            current = [0 for i in range(127)]
            add_to_parsing = True

            for message in track:
                if message.time > 0:
                    for i in range(int(message.time / ticks_per_division)):
                        track_interpretation.append(list(current))

                if message.type == 'note_on':
                    current[message.note] = 1
                if message.type == 'note_off':
                    current[message.note] = 0
                global_time += message.time

            if add_to_parsing and len(track_interpretation) > 64:
                result_interpretation.append(track_interpretation)

        result = ParsedMidi(name=path,
                            division=32,
                            numerator=numerator,
                            denominator=denominator,
                            track_count=len(result_interpretation),
                            divisions=result_interpretation)
        return result
Ejemplo n.º 22
0
def parse_events(f):
    midi = MidiFile(f)
    events = []
    tracks = [midi.tracks[0]]
    tracks.extend(
        [track for track in midi.tracks if 'piano' in track.name.lower()])
    key_offset = 0
    key_set = False
    for msg in merge_tracks(tracks):
        if key_set is False:
            if msg.type == 'key_signature':
                key_offset = key_to_semitone_offset(msg.key)
                key_set = True
            else:
                events.append(msg2event(msg, key_offset))
    return events
Ejemplo n.º 23
0
def get_first_notes(midi, length=200):
    track = mido.merge_tracks(midi.tracks)
    notes = []
    count = 0
    ind = 0
    while count < length:
        msg = track[ind]

        if msg.type[:4] != "note":
            ind += 1
            continue

        notes.append([msg.note, msg.velocity, msg.time])
        count += 1

    return notes
Ejemplo n.º 24
0
def getNotes(f):
    notes = []
    global_tick = 0
    delta_tick = 0
    merged_tracks = mido.merge_tracks(f.tracks)
    for i, msg in enumerate(merged_tracks):
        global_tick += msg.time
        if msg.is_meta:
            delta_tick += msg.time
            continue
        if (msg.type == "note_on") and (msg.velocity != 0):
            notes.append((msg.type, msg.note, numberToNote(msg.note), msg.velocity, \
             msg.time + delta_tick, global_tick))
            delta_tick = 0
            continue
        delta_tick += msg.time
    return notes
Ejemplo n.º 25
0
def midi2data(filename, min_note_denom=32, duration_categories=8, volumes=1):
    """
    Convert a midi file into an array of 4-vectors, containing
    [pitch, octave, volume, duration]

    All note_off and note_on with velocity 0 messages are ignored.
    This means that notes cannot really be held and preserved through this function.
    """

    midifile = mido.MidiFile(filename)

    if min_note_denom % 4 != 0:
        raise ValueError("min_note_denom is not divisible by 4")

    if midifile.ticks_per_beat % (min_note_denom // 4) != 0:
        print("Warning: call to midi2data with min_note_denom " + str(min_note_denom) \
            + ", but those notes do not have an integer delta time")
    delta_step = midifile.ticks_per_beat // (min_note_denom // 4)

    # create array with the following four elements in each row
    # [pitch, octave, volume, duration]
    messages = []
    time = 0
    for msg in mido.merge_tracks(midifile.tracks):
        time += msg.time  # accumulate time even for ignored messages
        if msg.is_meta or msg.type != 'note_on' or msg.velocity == 0:
            continue

        # convert message note to pitch and octave
        pitch, octave = note2pitch_octave(msg.note)

        # convert message velocity into volume.
        volume = velocity2volume(msg.velocity, volumes)

        # convert message time into several messages that add up to the total time, within delta_step
        ticks = time2ticks(time, delta_step)
        durations = ticks2durations(ticks, duration_categories)
        for duration in durations:
            messages += [[pitch, octave, volume, duration]]

        time = 0  # reset time after adding a message

    # finally, turn it into a numpy array, and return it
    messages = np.array(messages)
    return messages
Ejemplo n.º 26
0
def select_notes_from_file(midi_file):
    midi_file = MidiFile(midi_file)
    track = mido.merge_tracks(midi_file.tracks)
    ticks_per_beat = midi_file.ticks_per_beat

    notes = []
    play_time = 0
    for msg in track:
        m = msg.dict()
        play_time += m.get('time')

        # each beat is a quarter note, we care about sixteenth notes
        sixteenth = (play_time % ticks_per_beat) / (ticks_per_beat / 4)

        if (m.get('type') == 'note_on' and m.get('velocity') != 0
                and (sixteenth % 2)):
            notes.append(chromatic_scale.get(m.get('note') % 12))
    return notes
Ejemplo n.º 27
0
def melody(filename):

    mid = MidiFile()
    track = MidiTrack()
    mid.tracks.append(track)

    midi = mido.MidiFile(filename, clip=True)

    # a = calcMelody
    a = 0
    if a != 16:
        for msg in mido.merge_tracks(midi.tracks):
            out = str(msg)
            pos = out.find("channel=")
            if out[pos + 8:pos + 9] == str(a):
                track.append(mido.Message.from_str(out))
                #print(out)
    mid.save('melody.mid')
    return track
    def __iter__(self):
        # The tracks of type 2 files are not in sync, so they can
        # not be played back like this.
        if self.type == 2:
            raise TypeError("can't merge tracks in type 2 (asynchronous) file")

        tempo = DEFAULT_TEMPO
        time_signature = DEFAULT_TIME_SIGNATURE
        cum_delta = 0
        for msg in mido.merge_tracks(self.tracks):
            # Convert relative message time to desired unit
            if msg.time > 0:
                if self.unit.lower() in ('t', 'ticks'):
                    delta = msg.time
                elif self.unit.lower() in ('s', 'sec', 'seconds'):
                    delta = tick2second(msg.time, self.ticks_per_beat, tempo)
                elif self.unit.lower() in ('b', 'beats'):
                    delta = tick2beat(msg.time, self.ticks_per_beat,
                                      time_signature)
                else:
                    raise ValueError("`unit` must be either 'ticks', 't', "
                                     "'seconds', 's', 'beats', 'b', not %s." %
                                     self.unit)
            else:
                delta = 0
            # Convert relative time to absolute values if needed
            if self.timing.lower() in ('a', 'abs', 'absolute'):
                cum_delta += delta
            elif self.timing.lower() in ('r', 'rel', 'relative'):
                cum_delta = delta
            else:
                raise ValueError("`timing` must be either 'relative', 'rel', "
                                 "'r', or 'absolute', 'abs', 'a', not %s." %
                                 self.timing)

            yield msg.copy(time=cum_delta)

            if msg.type == 'set_tempo':
                tempo = msg.tempo
            elif msg.type == 'time_signature':
                time_signature = (msg.numerator, msg.denominator)
Ejemplo n.º 29
0
    def __init__(self, filename, tempo, disable_vel, speed_mult):
        """
        Convert a .midi (or .mid) file into an MML code (mostly) usable by the Jukebox in 7 Deadly Sins: Grand Cross

        :param filename: Filepath to the midi
        :param tempo: Tempo in BPM
        :param disable_vel: Disables note velocity so all notes are the same volume.
        :param speed_mult: Use this to scale the lengths of all the notes. Greater than one speeds up
        the song and less than 1 slows it down. For example, if speed_mult = 0.5, all quarter notes
        become halves, etc. If speed_mult is 2.0, all quarter notes become eighth notes. Since GC only
        supports a minimum of 16th notes, this might come in handy if your MIDI has a bunch of 64th
        notes or some shit I don't know.
        """
        self.midi = MidiFile(filename)
        self.tempo = tempo
        self.track = Track(
            merge_tracks(self.midi.tracks),
            self.midi.ticks_per_beat,
            disable_vel,
            speed_mult
        )
Ejemplo n.º 30
0
def load_midi(fname):
    print('load_midi: fname', fname)

    cache_path = os.path.join(CACHE_DIR, fname + '.npy')
    print('load_midi: cache_path=', cache_path)
    try:
        print('load_midi: before np.load')
        seq = np.load(cache_path)
        print('load_midi: after np.load')
    except Exception as e:
        # Load
        mid = mido.MidiFile(fname)
        track = mido.merge_tracks(mid.tracks)
        seq = midi_to_seq(mid, track)
        # Perform caching
        os.makedirs(os.path.dirname(cache_path), exist_ok=True)
        print('load_midi: after makedirs')
        np.save(cache_path, seq)
        print('load_midi: after save(cache_path, seq)')

        return seq
Ejemplo n.º 31
0
    def load_midi(self, song_path):
        if song_path in self.is_loaded_midi.keys():
            return

        self.is_loaded_midi.clear()
        self.is_loaded_midi[song_path] = True
        self.loading = 1  # 1 = Load..
        self.is_started_midi = False  # Stop current learning song
        self.t = threading.currentThread()

        try:
            # Load the midi file
            mid = mido.MidiFile('Songs/' + song_path)

            # Get tempo and Ticks per beat
            self.song_tempo = self.get_tempo(mid)
            self.ticks_per_beat = mid.ticks_per_beat

            # Assign Tracks to different channels before merging to know the message origin
            self.loading = 2  # 2 = Proces
            if len(mid.tracks
                   ) == 2:  # check if the midi file has only 2 Tracks
                offset = 1
            else:
                offset = 0
            for k in range(len(mid.tracks)):
                for msg in mid.tracks[k]:
                    if not msg.is_meta:
                        msg.channel = k + offset
                        if msg.type == 'note_off':
                            msg.velocity = 0

            # Merge tracks
            self.loading = 3  # 3 = Merge
            self.song_tracks = mido.merge_tracks(mid.tracks)
            fastColorWipe(self.ledstrip.strip, True, self.ledsettings)
            self.loading = 4  # 4 = Done
        except:
            self.loading = 5  # 5 = Error!
            self.is_loaded_midi.clear()
Ejemplo n.º 32
0
def midi_to_p_roll(mid, Nyquist_rate, sample_duration):

    p_roll = np.zeros([128, Nyquist_rate * sample_duration])
    track = mido.merge_tracks(mid.tracks)

    current_time = 0
    current_position = 0
    on_notes = np.zeros(128)
    tempo = 0

    for msg in track:

        if msg.time > 0:
            delta = mido.tick2second(msg.time, mid.ticks_per_beat, tempo)
        else:
            delta = 0
        if hasattr(msg, "note"):
            if msg.type == "note_on":
                on_notes[msg.note] = msg.velocity
            else:
                on_notes[msg.note] = 0
        last_time = current_time
        current_time += delta

        if current_time > sample_duration:
            break

        new_position = np.floor(current_time * Nyquist_rate).astype(int)

        if new_position > current_position:
            new_position = np.floor(current_time * Nyquist_rate).astype(int)
            block = np.tile(on_notes.reshape(128, 1),
                            new_position - current_position)
            p_roll[:, current_position:new_position] = block
            current_position = new_position

        if hasattr(msg, "tempo"):
            tempo = msg.tempo

    return p_roll