Esempio n. 1
0
 def scatter_quant():
     path = 'weimardb/midi_from_db_quant'
     quant = lsum(
         notes_from_file(os.path.join(path, file))
         for file in os.listdir(path))
     path = 'weimardb/midi_from_db'
     db = lsum(
         notes_from_file(os.path.join(path, file))
         for file in os.listdir(path))
     path = 'weimardb/midi_combined'
     comb = lsum(
         notes_from_file(os.path.join(path, file))
         for file in os.listdir(path))
     path = 'weimardb/midi_from_ly'
     ly = lsum(
         notes_from_file(os.path.join(path, file))
         for file in os.listdir(path))
     for note in ly:
         note.tick_abs = int(note.tick_abs * 2.5)
         note.resolution = 960
     for lis in db, quant, ly, comb:
         for pos in 48, 64, 72:
             print(pos,
                   sum(n.ticks_since_beat_quantised == pos for n in lis))
     vec = np.array([[n.ticks_since_beat_quantised, n.duration_quantised]
                     for n in comb])
     plt.scatter(*vec.T)
     plt.yscale('log', basey=2)
     plt.show()
Esempio n. 2
0
    def weimar(model, song, choruses=3, order=5, epochs=None, callback=None):
        """
        Train a model on the Weimar database of transcriptions and then run it on the specified chord progression.

        :param epochs:
        :param callback: to monitor training
        :param model: The name of the model: 'twolayer' or 'lstm'.
        :param song: The name of the chord progression to use for generation.
            Both the midi file and the text file containing the changes must exist with this name.
            The generation seed will be obtained from the beginning of the midi file.
        :param choruses: The number of choruses to generate
        :param order:
        """
        from models import neural
        msg_callback = callback.set_text if callback else print
        changes = changes_from_file(song)
        model_name = model
        if model == 'neural':
            model = neural.OneLayer(changes, order)
        elif model.startswith('lstm'):
            model = neural.LSTM(changes, stateful=not model.endswith('stateless'))
        seed = notes_from_file(r"input/{}.mid".format(song))[:model.order]
        Note.default_resolution = seed[0].resolution
        metadata = weimar.load_metadata()
        training_set = list(itertools.chain(*((notes_from_file('weimardb/midi_combined/{}.mid'.format(song.name)),
                                               song.changes)
                            for song in metadata)))
        if model.learn(*training_set, epochs=epochs, callback=callback):
            msg_callback("Generating notes...")
            model.add_past(*seed)
            melody = generate(seed, changes, model, None, choruses * changes.measures())
            notes_to_file(add_chords(melody, changes), 'output/weimar_{}.mid'.format(model_name), msg_callback)
Esempio n. 3
0
 def turing():
     """ Generate a Turing test """
     from models import neural
     song = 'Eb_therewill'
     changes = changes_from_file(song)
     filename = "input/{}.mid".format(song)
     notes = notes_from_file(filename)
     Note.default_resolution = notes[0].resolution
     melody_generator = neural.LSTM(changes)
     train(notes, changes, melody_generator)
     melody_generator.add_past(*notes[:melody_generator.order])
     melody = generate(notes[:melody_generator.order], changes,
                       melody_generator, None, 6 * changes.measures())
     melody = add_chords(melody, changes)
     num_choruses = notes[-1].measure // changes.measures()
     for i in range(5):
         # Write a chorus of human improvisation and a chorus of machine improvisation to file
         start = (i + 1) * changes.measures()
         end = (i + 2) * changes.measures()
         notes_to_file(extract_measures(melody, start, end),
                       'output/machine{}.mid'.format(i))
         n = random.randint(2, num_choruses - 2)
         start = n * changes.measures()
         end = (n + 1) * changes.measures()
         notes_to_file(
             add_chords(extract_measures(notes, start, end), changes),
             'output/man{}.mid'.format(i))
Esempio n. 4
0
 def pca():
     """ Perform PCA on the MIDI file """
     notes = notes_from_file('input/Eb_therewill.mid')
     data = np.array([[n.ticks_since_beat_quantised, n.duration_quantised, n.pitch] for n in notes])
     cent = (data - data.T.mean(axis=1)).T
     c, s, l = princomp(cent)
     plt.plot(l)
     plt.show()
Esempio n. 5
0
 def weimar():
     """ Check for correct relation of changes to MIDI """
     Note.default_resolution = 960
     metadata = weimar.load_metadata()
     for song in metadata:
         snl = song.name.lower()
         if 'therewill' in snl or 'singin' in snl or 'anthropo' in snl:
             notes = notes_from_file('weimardb/midi_combined/{}.mid'.format(song.name))
             notes_to_file(add_chords(notes, song.changes), 'output/weimartest_{}.mid'.format(song.name))
Esempio n. 6
0
def convert_song(song: SongMetadata):
    """ Combine the quantized and the unquantized MIDI files 
    into one that aligns to measures but attempts to retain the original phrasing """
    from file_handlers import notes_to_file
    from file_handlers import notes_from_file
    Note.default_resolution = 960
    quantized = notes_from_file('weimardb/midi_from_db_quant/{}.mid'.format(song.name))
    original = notes_from_file('weimardb/midi_from_db/{}.mid'.format(song.name))
    lilypond = notes_from_file('weimardb/midi_from_ly/{}.mid'.format(song.name))
    d = {}
    for n, m in zip(quantized, lilypond):
        if m.measure - n.measure not in d:
            d[m.measure - n.measure] = 1
        else:
            d[m.measure - n.measure] += 1
        if len(d) == 2:
            break
    meas_start = min(d.keys())  # type: int
    meas_size = Note.meter * Note.default_resolution
    meas_no = 0
    a, b, c = [], [], []  # quantized measure, original measure, combined output
    lp = []  # lilypond measure: only needed for durations
    for q, o, l in zip(quantized, original, lilypond):
        if q.measure != meas_no:
            if len(a) > 1:
                r = (a[-1].tick_abs - a[0].tick_abs) / (b[-1].tick_abs - b[0].tick_abs)  # stretch ratio
                a_m = (meas_no + 0.5) * meas_size  # middle of quantized measure
                b_m = b[0].tick_abs + (a_m - a[0].tick_abs) / r  # estimated middle of unquantized measure
                for a_j, b_j, l_j in zip(a, b, lp):
                    n = Note()
                    n.pitch = b_j.pitch
                    n.resolution = b_j.resolution
                    n.velocity = b_j.velocity
                    n.tick_abs = int(a_m + r * (b_j.tick_abs - b_m)) + (meas_start * meas_size)
                    n.duration = int(r * b_j.duration) or a_j.duration or l_j.duration
                    c.append(n)
            else:
                c += a
            meas_no = q.measure
            a, b, lp = [], [], []
        a.append(q)
        b.append(o)
        lp.append(l)
    notes_to_file(sorted(c, key=lambda p: p.tick_abs), 'weimardb/midi_combined/{}.mid'.format(song.name))
Esempio n. 7
0
 def scatter_quant():
     path = 'weimardb/midi_from_db_quant'
     quant = lsum(notes_from_file(os.path.join(path, file)) for file in os.listdir(path))
     path = 'weimardb/midi_from_db'
     db = lsum(notes_from_file(os.path.join(path, file)) for file in os.listdir(path))
     path = 'weimardb/midi_combined'
     comb = lsum(notes_from_file(os.path.join(path, file)) for file in os.listdir(path))
     path = 'weimardb/midi_from_ly'
     ly = lsum(notes_from_file(os.path.join(path, file)) for file in os.listdir(path))
     for note in ly:
         note.tick_abs = int(note.tick_abs * 2.5)
         note.resolution = 960
     for lis in db, quant, ly, comb:
         for pos in 48, 64, 72:
             print(pos, sum(n.ticks_since_beat_quantised == pos for n in lis))
     vec = np.array([[n.ticks_since_beat_quantised, n.duration_quantised] for n in comb])
     plt.scatter(*vec.T)
     plt.yscale('log', basey=2)
     plt.show()
Esempio n. 8
0
 def pca():
     """ Perform PCA on the MIDI file """
     notes = notes_from_file('input/Eb_therewill.mid')
     data = np.array(
         [[n.ticks_since_beat_quantised, n.duration_quantised, n.pitch]
          for n in notes])
     cent = (data - data.T.mean(axis=1)).T
     c, s, l = princomp(cent)
     plt.plot(l)
     plt.show()
Esempio n. 9
0
 def total_recall():
     """ Test the code that drives generation by feeding it the training set as generated output """
     song = 'Eb_therewill'
     model = TotalRecall()
     changes = changes_from_file(song)
     notes = notes_from_file(r"input/{}.mid".format(song))
     Note.default_resolution = notes[0].resolution
     train(notes, changes, model)
     m = generate(notes[:model.order], changes, model, None, 4 * changes.measures())
     notes_to_file(add_chords(m, changes), 'output/test.mid')
Esempio n. 10
0
 def scatter():
     """ Create a scatter plot of durations vs position in beat """
     song = 'Eb_therewill'
     notes = notes_from_file(r"input/{}.mid".format(song))
     vec = np.array([[n.ticks_since_beat_quantised, n.duration_quantised] for n in notes])
     with open('scatter.dat', 'w') as kf:
         for tsbq, dq in vec:
             if dq:  # don't want 0 values exported for logarithmic plot
                 print(tsbq, dq, file=kf)
     plt.scatter(*vec.T)
     plt.show()
Esempio n. 11
0
 def total_recall():
     """ Test the code that drives generation by feeding it the training set as generated output """
     song = 'Eb_therewill'
     model = TotalRecall()
     changes = changes_from_file(song)
     notes = notes_from_file(r"input/{}.mid".format(song))
     Note.default_resolution = notes[0].resolution
     train(notes, changes, model)
     m = generate(notes[:model.order], changes, model, None,
                  4 * changes.measures())
     notes_to_file(add_chords(m, changes), 'output/test.mid')
Esempio n. 12
0
 def weimar():
     """ Check for correct relation of changes to MIDI """
     Note.default_resolution = 960
     metadata = weimar.load_metadata()
     for song in metadata:
         snl = song.name.lower()
         if 'therewill' in snl or 'singin' in snl or 'anthropo' in snl:
             notes = notes_from_file('weimardb/midi_combined/{}.mid'.format(
                 song.name))
             notes_to_file(add_chords(notes, song.changes),
                           'output/weimartest_{}.mid'.format(song.name))
Esempio n. 13
0
 def scatter():
     """ Create a scatter plot of durations vs position in beat """
     song = 'Eb_therewill'
     notes = notes_from_file(r"input/{}.mid".format(song))
     vec = np.array([[n.ticks_since_beat_quantised, n.duration_quantised]
                     for n in notes])
     with open('scatter.dat', 'w') as kf:
         for tsbq, dq in vec:
             if dq:  # don't want 0 values exported for logarithmic plot
                 print(tsbq, dq, file=kf)
     plt.scatter(*vec.T)
     plt.show()
Esempio n. 14
0
    def single(model, song, choruses=3, order=5, epochs=None, callback=None):
        """
        Train and run a model on the same chord progression.

        :param model: The name of the model: 'markov' or 'neural'.
        :param song: The name of the song.
            Both the midi file and the text file containing the changes must exist with this name.
        :param choruses: The number of choruses to generate
        :param order:
        :param epochs:
        :param callback: to monitor training
        """
        from models import neural
        msg_callback = callback.set_text if callback else print
        # Read the chord changes from a text file
        changes = changes_from_file(song)
        # Read the training set from a MIDI file
        notes = notes_from_file(r"input/{}.mid".format(song))
        Note.default_resolution = notes[0].resolution
        # Learn and generate
        melody_generator = None
        rhythm_generator = None
        if model == 'markov':
            melody_generator = markov.ChordAgnosticMelody(order)
            rhythm_generator = markov.Rhythm()
        elif model == 'neural':
            melody_generator = neural.OneLayer(changes, order)
        elif model.startswith('lstm'):
            melody_generator = neural.LSTM(
                changes, stateful=not model.endswith('stateless'))
        elif model == 'lasagne':
            melody_generator = neural.lasagne.OneLayer(changes, order)
        if train(notes,
                 changes,
                 melody_generator,
                 rhythm_generator,
                 callback=callback,
                 epochs=epochs):
            if model != 'markov':
                melody_generator.add_past(*notes[:melody_generator.order])
            msg_callback("Generating notes...")
            if rhythm_generator is None:
                rhythm_generator = melody_generator
            melody = generate(
                notes[:max(melody_generator.order, rhythm_generator.order)],
                changes, melody_generator, rhythm_generator,
                choruses * changes.measures())
            # Write output file
            notes_to_file(add_chords(melody, changes),
                          'output/{}.mid'.format(model), msg_callback)
Esempio n. 15
0
    def weimar(model, song, choruses=3, order=5, epochs=None, callback=None):
        """
        Train a model on the Weimar database of transcriptions and then run it on the specified chord progression.

        :param epochs:
        :param callback: to monitor training
        :param model: The name of the model: 'twolayer' or 'lstm'.
        :param song: The name of the chord progression to use for generation.
            Both the midi file and the text file containing the changes must exist with this name.
            The generation seed will be obtained from the beginning of the midi file.
        :param choruses: The number of choruses to generate
        :param order:
        """
        from models import neural
        msg_callback = callback.set_text if callback else print
        changes = changes_from_file(song)
        model_name = model
        if model == 'neural':
            model = neural.OneLayer(changes, order)
        elif model.startswith('lstm'):
            model = neural.LSTM(changes,
                                stateful=not model.endswith('stateless'))
        seed = notes_from_file(r"input/{}.mid".format(song))[:model.order]
        Note.default_resolution = seed[0].resolution
        metadata = weimar.load_metadata()
        training_set = list(
            itertools.chain(*((notes_from_file(
                'weimardb/midi_combined/{}.mid'.format(song.name)),
                               song.changes) for song in metadata)))
        if model.learn(*training_set, epochs=epochs, callback=callback):
            msg_callback("Generating notes...")
            model.add_past(*seed)
            melody = generate(seed, changes, model, None,
                              choruses * changes.measures())
            notes_to_file(add_chords(melody, changes),
                          'output/weimar_{}.mid'.format(model_name),
                          msg_callback)
Esempio n. 16
0
 def fast_and_furious(stateful=True):
     """ Keras provides a choice of 3 implementations for recurrent layers. Find out which is the fastest. """
     from models import neural
     song = 'Eb_therewill'
     changes = changes_from_file(song)
     notes = notes_from_file(r"input/{}.mid".format(song))
     Note.default_resolution = notes[0].resolution
     model = neural.LSTM(changes, stateful=stateful)
     d = {}
     for i in [0, 1, 2]:
         print('implementation', i)
         t = time.time()
         model._implementation = i
         model.learn(notes, changes, epochs=1)
         d[i] = time.time() - t
     print(d)
Esempio n. 17
0
 def fast_and_furious(stateful=True):
     """ Keras provides a choice of 3 implementations for recurrent layers. Find out which is the fastest. """
     from models import neural
     song = 'Eb_therewill'
     changes = changes_from_file(song)
     notes = notes_from_file(r"input/{}.mid".format(song))
     Note.default_resolution = notes[0].resolution
     model = neural.LSTM(changes, stateful=stateful)
     d = {}
     for i in [0, 1, 2]:
         print('implementation', i)
         t = time.time()
         model._implementation = i
         model.learn(notes, changes, epochs=1)
         d[i] = time.time() - t
     print(d)
Esempio n. 18
0
    def single(model, song, choruses=3, order=5, epochs=None, callback=None):
        """
        Train and run a model on the same chord progression.

        :param model: The name of the model: 'markov' or 'neural'.
        :param song: The name of the song.
            Both the midi file and the text file containing the changes must exist with this name.
        :param choruses: The number of choruses to generate
        :param order:
        :param epochs:
        :param callback: to monitor training
        """
        from models import neural
        msg_callback = callback.set_text if callback else print
        # Read the chord changes from a text file
        changes = changes_from_file(song)
        # Read the training set from a MIDI file
        notes = notes_from_file(r"input/{}.mid".format(song))
        Note.default_resolution = notes[0].resolution
        # Learn and generate
        melody_generator = None
        rhythm_generator = None
        if model == 'markov':
            melody_generator = markov.ChordAgnosticMelody(order)
            rhythm_generator = markov.Rhythm()
        elif model == 'neural':
            melody_generator = neural.OneLayer(changes, order)
        elif model.startswith('lstm'):
            melody_generator = neural.LSTM(changes, stateful=not model.endswith('stateless'))
        elif model == 'lasagne':
            melody_generator = neural.lasagne.OneLayer(changes, order)
        if train(notes, changes, melody_generator, rhythm_generator, callback=callback, epochs=epochs):
            if model != 'markov':
                melody_generator.add_past(*notes[:melody_generator.order])
            msg_callback("Generating notes...")
            if rhythm_generator is None:
                rhythm_generator = melody_generator
            melody = generate(notes[:max(melody_generator.order, rhythm_generator.order)],
                              changes, melody_generator, rhythm_generator, choruses * changes.measures())
            # Write output file
            notes_to_file(add_chords(melody, changes), 'output/{}.mid'.format(model), msg_callback)
Esempio n. 19
0
 def turing():
     """ Generate a Turing test """
     from models import neural
     song = 'Eb_therewill'
     changes = changes_from_file(song)
     filename = "input/{}.mid".format(song)
     notes = notes_from_file(filename)
     Note.default_resolution = notes[0].resolution
     melody_generator = neural.LSTM(changes)
     train(notes, changes, melody_generator)
     melody_generator.add_past(*notes[:melody_generator.order])
     melody = generate(notes[:melody_generator.order], changes, melody_generator, None, 6 * changes.measures())
     melody = add_chords(melody, changes)
     num_choruses = notes[-1].measure // changes.measures()
     for i in range(5):
         # Write a chorus of human improvisation and a chorus of machine improvisation to file
         start = (i + 1) * changes.measures()
         end = (i + 2) * changes.measures()
         notes_to_file(extract_measures(melody, start, end), 'output/machine{}.mid'.format(i))
         n = random.randint(2, num_choruses - 2)
         start = n * changes.measures()
         end = (n + 1) * changes.measures()
         notes_to_file(add_chords(extract_measures(notes, start, end), changes), 'output/man{}.mid'.format(i))
Esempio n. 20
0
 def back_to_the_future():
     """ Find out how common it is that a later note has a smaller tick_abs """
     song = 'Eb_therewill'
     notes = notes_from_file(r"input/{}.mid".format(song))
     print(sum(n.tick_abs > m.tick_abs for n, m in nwise(notes, 2)))
Esempio n. 21
0
 def back_to_the_future():
     """ Find out how common it is that a later note has a smaller tick_abs """
     song = 'Eb_therewill'
     notes = notes_from_file(r"input/{}.mid".format(song))
     print(sum(n.tick_abs > m.tick_abs for n, m in nwise(notes, 2)))