Exemplo n.º 1
0
def notes_from_file(filename: str) -> List[Note]:
    midifile_rel = midi.read_midifile(filename)
    midifile_abs = copy.deepcopy(midifile_rel)
    midifile_abs.make_ticks_abs()

    # Convert MIDI events to our music representation: a list of Note objects
    notes = []
    active_notes = {}

    for ev_rel, ev_abs in zip(midifile_rel[-1], midifile_abs[-1]):
        if isinstance(ev_rel, midi.NoteOnEvent) and ev_rel.data[1]:
            n = Note()
            n.resolution = midifile_rel.resolution
            n.tick_abs = ev_abs.tick
            n.pitch = ev_rel.data[0]
            n.velocity = ev_rel.data[1]
            if n.pitch not in active_notes:
                active_notes[n.pitch] = {n}
            else:
                active_notes[n.pitch].add(n)
        elif isinstance(ev_rel, midi.NoteOffEvent) or (isinstance(
                ev_rel, midi.NoteOnEvent) and ev_rel.data[1] == 0):
            n = active_notes[ev_rel.data[0]].pop()
            n.duration = ev_abs.tick - n.tick_abs
            notes.append(n)
    assert not any(active_notes.values()), "Some notes were not released"
    return sorted(notes, key=lambda note: note.tick_abs)
Exemplo n.º 2
0
def notes_from_file(filename: str) -> List[Note]:
    midifile_rel = midi.read_midifile(filename)
    midifile_abs = copy.deepcopy(midifile_rel)
    midifile_abs.make_ticks_abs()

    # Convert MIDI events to our music representation: a list of Note objects
    notes = []
    active_notes = {}

    for ev_rel, ev_abs in zip(midifile_rel[-1], midifile_abs[-1]):
        if isinstance(ev_rel, midi.NoteOnEvent) and ev_rel.data[1]:
            n = Note()
            n.resolution = midifile_rel.resolution
            n.tick_abs = ev_abs.tick
            n.pitch = ev_rel.data[0]
            n.velocity = ev_rel.data[1]
            if n.pitch not in active_notes:
                active_notes[n.pitch] = {n}
            else:
                active_notes[n.pitch].add(n)
        elif isinstance(ev_rel, midi.NoteOffEvent) or (isinstance(ev_rel, midi.NoteOnEvent) and ev_rel.data[1] == 0):
            n = active_notes[ev_rel.data[0]].pop()
            n.duration = ev_abs.tick - n.tick_abs
            notes.append(n)
    assert not any(active_notes.values()), "Some notes were not released"
    return sorted(notes, key=lambda note: note.tick_abs)
Exemplo n.º 3
0
def generate(past: List[Note], changes: ChordProgression,
             melody_generator: Union[MelodyGenerator, MelodyAndRhythmGenerator,
                                     UniversalGenerator],
             rhythm_generator: Optional[RhythmGenerator],
             measures: int) -> List[Note]:
    """ Improvise a melody using two models for the melody and the rhythm, and one chord progression

        :param melody_generator:
        :param rhythm_generator:
        :param past: the seed
        :param changes: the chord progression.
            It can be the same as the melody generator, or equivalently None.
        :param measures: The number of measures to generate
        """
    if rhythm_generator is None:
        rhythm_generator = melody_generator  # type: RhythmGenerator
    universal = isinstance(melody_generator, UniversalGenerator)
    melody = past
    beat = melody[-1].beat
    chord = changes[beat]
    melody_generator.start(beat)
    while beat < measures * Note.meter:
        n = Note()
        n.resolution = past[0].resolution
        rest = None
        if universal:
            n.pitch, tsbq, dq, *rest = melody_generator.next(
            )  # in LSTM case, rest[0] is the beat diff
        else:
            tsbq, dq = rhythm_generator.next_rhythm()
        tsbq *= Note.ticks_quantisation_rate
        n.duration = dq * Note.duration_quantisation_rate
        if melody and (rest or melody[-1].ticks_since_beat > tsbq):
            beat_diff = rest[
                0] if rest else 1 + melody[-1].duration // n.resolution
            for _ in range(beat_diff):
                beat += 1
                # If the chord changed, inform the melody generator
                newchord = changes[beat]
                if newchord != chord:
                    melody_generator.start(beat)
                    chord = newchord
        # Prevent overlapping notes
        n.tick_abs = tsbq + n.resolution * \
            (beat if rest else max(beat, math.floor((melody[-1].tick_abs + melody[-1].duration) / n.resolution)))
        if not universal:
            n.pitch = melody_generator.next_pitch()
        melody.append(n)
        if melody_generator == rhythm_generator:
            melody_generator.add_past(n)
    return melody
Exemplo n.º 4
0
def generate(past: List[Note], changes: ChordProgression,
             melody_generator: Union[MelodyGenerator, MelodyAndRhythmGenerator, UniversalGenerator],
             rhythm_generator: Optional[RhythmGenerator], measures: int) -> List[Note]:
    """ Improvise a melody using two models for the melody and the rhythm, and one chord progression

        :param melody_generator:
        :param rhythm_generator:
        :param past: the seed
        :param changes: the chord progression.
            It can be the same as the melody generator, or equivalently None.
        :param measures: The number of measures to generate
        """
    if rhythm_generator is None:
        rhythm_generator = melody_generator  # type: RhythmGenerator
    universal = isinstance(melody_generator, UniversalGenerator)
    melody = past
    beat = melody[-1].beat
    chord = changes[beat]
    melody_generator.start(beat)
    while beat < measures * Note.meter:
        n = Note()
        n.resolution = past[0].resolution
        rest = None
        if universal:
            n.pitch, tsbq, dq, *rest = melody_generator.next()  # in LSTM case, rest[0] is the beat diff
        else:
            tsbq, dq = rhythm_generator.next_rhythm()
        tsbq *= Note.ticks_quantisation_rate
        n.duration = dq * Note.duration_quantisation_rate
        if melody and (rest or melody[-1].ticks_since_beat > tsbq):
            beat_diff = rest[0] if rest else 1 + melody[-1].duration // n.resolution
            for _ in range(beat_diff):
                beat += 1
                # If the chord changed, inform the melody generator
                newchord = changes[beat]
                if newchord != chord:
                    melody_generator.start(beat)
                    chord = newchord
        # Prevent overlapping notes
        n.tick_abs = tsbq + n.resolution * \
            (beat if rest else max(beat, math.floor((melody[-1].tick_abs + melody[-1].duration) / n.resolution)))
        if not universal:
            n.pitch = melody_generator.next_pitch()
        melody.append(n)
        if melody_generator == rhythm_generator:
            melody_generator.add_past(n)
    return melody
Exemplo n.º 5
0
def convert_song(song: SongMetadata):
    """ Combine the quantized and the unquantized MIDI files 
    into one that aligns to measures but attempts to retain the original phrasing """
    from file_handlers import notes_to_file
    from file_handlers import notes_from_file
    Note.default_resolution = 960
    quantized = notes_from_file('weimardb/midi_from_db_quant/{}.mid'.format(song.name))
    original = notes_from_file('weimardb/midi_from_db/{}.mid'.format(song.name))
    lilypond = notes_from_file('weimardb/midi_from_ly/{}.mid'.format(song.name))
    d = {}
    for n, m in zip(quantized, lilypond):
        if m.measure - n.measure not in d:
            d[m.measure - n.measure] = 1
        else:
            d[m.measure - n.measure] += 1
        if len(d) == 2:
            break
    meas_start = min(d.keys())  # type: int
    meas_size = Note.meter * Note.default_resolution
    meas_no = 0
    a, b, c = [], [], []  # quantized measure, original measure, combined output
    lp = []  # lilypond measure: only needed for durations
    for q, o, l in zip(quantized, original, lilypond):
        if q.measure != meas_no:
            if len(a) > 1:
                r = (a[-1].tick_abs - a[0].tick_abs) / (b[-1].tick_abs - b[0].tick_abs)  # stretch ratio
                a_m = (meas_no + 0.5) * meas_size  # middle of quantized measure
                b_m = b[0].tick_abs + (a_m - a[0].tick_abs) / r  # estimated middle of unquantized measure
                for a_j, b_j, l_j in zip(a, b, lp):
                    n = Note()
                    n.pitch = b_j.pitch
                    n.resolution = b_j.resolution
                    n.velocity = b_j.velocity
                    n.tick_abs = int(a_m + r * (b_j.tick_abs - b_m)) + (meas_start * meas_size)
                    n.duration = int(r * b_j.duration) or a_j.duration or l_j.duration
                    c.append(n)
            else:
                c += a
            meas_no = q.measure
            a, b, lp = [], [], []
        a.append(q)
        b.append(o)
        lp.append(l)
    notes_to_file(sorted(c, key=lambda p: p.tick_abs), 'weimardb/midi_combined/{}.mid'.format(song.name))