Exemple #1
0
def generate(data_fn, out_fn, N_epochs):
    # model settings
    max_len = 20
    max_tries = 1000
    diversity = 0.5

    # musical settings
    bpm = 130

    # get data
    chords, abstract_grammars = get_musical_data(data_fn)
    corpus, values, val_indices, indices_val = get_corpus_data(
        abstract_grammars)
    print("corpus length:", len(corpus))
    print("total # of values:", len(values))

    # build model
    model = lstm.build_model(corpus=corpus,
                             val_indices=val_indices,
                             max_len=max_len,
                             N_epochs=N_epochs)

    # set up audio stream
    out_stream = stream.Stream()

    # generation loop
    curr_offset = 0.0
    loopEnd = len(chords)
    for loopIndex in range(1, loopEnd):
        # get chords from file
        curr_chords = stream.Voice()
        for j in chords[loopIndex]:
            curr_chords.insert((j.offset % 4), j)

        # generate grammar
        curr_grammar = __generate_grammar(
            model=model,
            corpus=corpus,
            abstract_grammars=abstract_grammars,
            values=values,
            val_indices=val_indices,
            indices_val=indices_val,
            max_len=max_len,
            max_tries=max_tries,
            diversity=diversity,
        )

        curr_grammar = curr_grammar.replace(" A", " C").replace(" X", " C")

        # Pruning #1: smoothing measure
        curr_grammar = prune_grammar(curr_grammar)

        # Get notes from grammar and chords
        curr_notes = unparse_grammar(curr_grammar, curr_chords)

        # Pruning #2: removing repeated and too close together notes
        curr_notes = prune_notes(curr_notes)

        # quality assurance: clean up notes
        curr_notes = clean_up_notes(curr_notes)

        # print # of notes in curr_notes
        print("After pruning: %s notes" %
              (len([i for i in curr_notes if isinstance(i, note.Note)])))

        # insert into the output stream
        for m in curr_notes:
            out_stream.insert(curr_offset + m.offset, m)
        for mc in curr_chords:
            out_stream.insert(curr_offset + mc.offset, mc)

        curr_offset += 4.0

    out_stream.insert(0.0, tempo.MetronomeMark(number=bpm))

    # Play the final stream through output (see 'play' lambda function above)
    # play = lambda x: midi.realtime.StreamPlayer(x).play()
    # play(out_stream)

    # save stream
    mf = midi.translate.streamToMidiFile(out_stream)
    mf.open(out_fn, "wb")
    mf.write()
    mf.close()
"""Modified from the original data_utils.py file from the Deep Learning Specialization assignment"""
from grammar import unparse_grammar
from music import data_processing
from music21 import *
from preprocess import get_corpus_data, get_musical_data
from qa import clean_up_notes, prune_grammar, prune_notes

chords, abstract_grammars = get_musical_data('data/original_metheny.mid')
corpus, tones, tones_indices, indices_tones = get_corpus_data(
    abstract_grammars)


def load_data():
    N_tones = len(set(corpus))
    X, Y, N_tones = data_processing(corpus, tones_indices, 60, 30)

    return (X, Y, N_tones, indices_tones)


def generate_music(sampling_fn):
    """
    Generates music using the given sampling function that returns the indices of musical values.
    Creates an audio stream to save the music.
    """

    # Set up audio stream
    out_stream = stream.Stream()

    # Initialize chord variables
    curr_offset = 0.0  # variable used to write sounds to the Stream.
    num_chords = int(len(chords) / 3)  # number of different set of chords