Exemplo n.º 1
0
def train_network():
    notes = get_notes()

    with open("data/notes.json", "w") as filename:
        json.dump(notes, filename)

    notes_df = pd.DataFrame(notes, columns=['pitch', 'duration'])

    pitches = notes_df['pitch']
    durations = notes_df['duration']

    pitch_vocab = sorted(set(item for item in pitches))
    duration_vocab = sorted(set(item for item in durations))

    with open("data/pitch_vocab.json", "w") as filename:
        json.dump(pitch_vocab, filename)

    with open("data/duration_vocab.json", "w") as filename:
        json.dump(duration_vocab, filename)

    # print("notes_df:")
    # print(notes_df)

    look_back = 4

    in_pitches, in_durations, out_pitches, out_durations = prepare_sequences(
        notes_df, look_back)

    model = create_network(timesteps=look_back,
                           pitch_vocab_size=len(pitch_vocab),
                           duration_vocab_size=len(duration_vocab))
    model.summary()

    train(model, in_pitches, in_durations, out_pitches, out_durations)
Exemplo n.º 2
0
def generate():
    """ Generate a piano midi file """
    # load the notes used to train the model
    with open('data/notes.json', 'r') as filename:
        notes = json.load(filename)

    with open("data/pitch_vocab.json", "r") as filename:
        pitch_vocab = json.load(filename)

    with open("data/duration_vocab.json", "r") as filename:
        duration_vocab = json.load(filename)

    look_back = 4

    model = create_network(timesteps=look_back,
                           pitch_vocab_size=len(pitch_vocab),
                           duration_vocab_size=len(duration_vocab))

    model.load_weights('weights/weights.hdf5')

    model.summary()

    notes_df = pd.DataFrame(notes, columns=['pitch', 'duration'])
    pitch_samples, duration_samples = prepare_samples(notes_df, pitch_vocab, duration_vocab, look_back)

    print("\npitch_samples:")
    print(pitch_samples)

    print("\nduration_samples:")
    print(duration_samples)

    pitches, durations = generate_notes(model, pitch_samples, duration_samples, pitch_vocab, duration_vocab, count=16)
Exemplo n.º 3
0
def generate(song_path, sequence_length):
    # If song name is 'random', use a random sequence
    """ Generate a piano midi file """
    # load the notes used to train the model
    with open(args.dataset + '/notes', 'rb') as filepath:
        notes = pickle.load(filepath)

    # Get all pitch names
    pitchnames = sorted(set(item for item in notes))
    # Get all pitch names
    n_vocab = len(set(notes))
    network_input, normalized_input = prepare_sequences_predict(
        notes, pitchnames, n_vocab)
    model = create_network(
        normalized_input, n_vocab, args.mode,
        "weights/" + args.mode + "_" + args.dataset + ".hdf5")
    song_name = song_path.split("/")[-1]

    if song_name != "random.mid":
        # Get notes of input song
        song_notes = get_input_notes(song_path)

        # Create a processed midi of the song we want to predict
        create_midi(song_notes, outputdir + "full_" + song_name)

        # Get the sequence after 100 notes
        if sequence_length > len(song_notes):
            end = None
        else:
            end = 100 + sequence_length
        expected_song = song_notes[100:end]

        # Create a midi of the expected
        create_midi(expected_song, outputdir + "expected_" + song_name)
        song_input, _ = prepare_sequences_predict(song_notes, pitchnames,
                                                  n_vocab)
        prediction_output = generate_notes(model, song_input, pitchnames,
                                           n_vocab, sequence_length, False)
    else:
        prediction_output = generate_notes(model, network_input, pitchnames,
                                           n_vocab, sequence_length, True)

    create_midi(prediction_output, outputdir + "prediction_" + song_name)
Exemplo n.º 4
0
def train_network():
    """ Train a Neural Network to generate music """
    
    # check if the notes file already exists
    if os.path.isfile(args.dataset + "/notes"):
        with open(args.dataset + '/notes', 'rb') as filepath:
            notes = pickle.load(filepath)
    else:
        # if not create the notes and store in the dataset folder
        notes = get_notes()

    # get amount of pitch names
    n_vocab = len(set(notes))

    network_input, network_output = prepare_sequences(notes, n_vocab)

    model = create_network(network_input, n_vocab, args.mode)

    train(model, network_input, network_output)
Exemplo n.º 5
0
def generate():
    """ Generate a piano midi file """
    # load the notes used to train the model
    with open('data/notes.json', 'r') as filename:
        notes = json.load(filename)

    with open("data/pitch_vocab.json", "r") as filename:
        pitch_vocab = json.load(filename)

    with open("data/duration_vocab.json", "r") as filename:
        duration_vocab = json.load(filename)

    look_back = 4

    model = create_network(timesteps=look_back,
                           pitch_vocab_size=len(pitch_vocab),
                           duration_vocab_size=len(duration_vocab))

    model.load_weights('weights/weights.hdf5')

    model.summary()

    notes_df = pd.DataFrame(notes, columns=['pitch', 'duration'])
    pitch_samples, duration_samples = prepare_samples(notes_df, pitch_vocab,
                                                      duration_vocab,
                                                      look_back)

    pitches, durations = generate_notes(model,
                                        pitch_samples,
                                        duration_samples,
                                        pitch_vocab,
                                        duration_vocab,
                                        count=64)

    filename = 'output/' + datetime.datetime.today().strftime(
        "%d.%m.%Y %H.%M") + '.mid'
    create_midi(pitches, durations, filename)
Exemplo n.º 6
0
    if config['LOG_FOLDER'] != '':
        if not os.path.exists(config['LOG_FOLDER']):
            os.makedirs(config['LOG_FOLDER'])
        training_loss_file = open(config['LOG_FOLDER'] + '/training_loss.txt',
                                  'w')
        evaluate_stat_file = open(config['LOG_FOLDER'] + '/evaluate_stat.txt',
                                  'w')
    log(config, training_loss_file)
    log(config, evaluate_stat_file)

    # Step 2. Create dataset loader
    train_dataloader, test_dataloader, val_dataloader = create_dataset_loader(
        config)

    # Step 3. Create cnn
    cnn = create_network(config)

    if config['CKPT_PATH'] is not '':
        print('Loading checkpoint from %s' % config['CKPT_PATH'])
        cnn.load_state_dict(torch.load(config['CKPT_PATH']))

    # Step 4. Create optimizer
    optimizer = None
    if 'train' in config['OPERATION']:
        if config['OPTIMIZER'] == 'adam':
            optimizer = torch.optim.Adam(cnn.parameters(),
                                         lr=config['LEARNING_RATE'],
                                         betas=(0.9, 0.999))
        else:
            raise Exception('Optimizer not implemented!')