def generate_double(fn, gn):
    test_piano_roll = midiToPianoroll(fn)
    test_data = [test_piano_roll]
    test_input = createSeqTestNetInputs(test_data,
                                        cfg.MODEL_PARAMS.X_SEQ_LENGTH)
    test_data = test_input[0]
    net_output = model.predict(test_data)
    net_roll = seqNetOutToPianoroll(net_output)
    total_roll = np.concatenate((test_piano_roll, net_roll))
    pianorollToMidi(net_roll, gn)
def test():
    midi_files = glob.glob(DATA_DIR + '/*.mid')
    file_idx = random.randint(0, len(midi_files) - 1)
    # midi_file = midi_files[file_idx]
    midi_file = 'uploads/mozk175b_trimmed.mid'
    print('Generating from %s' % midi_file)
    test_piano_roll = midiToPianoroll(midi_file)
    test_data = [test_piano_roll]
    test_input = createSeqTestNetInputs(test_data,
                                        cfg.MODEL_PARAMS.X_SEQ_LENGTH)
    test_data = test_input[0]

    generated_file = 'AI_generated_%s.mid' % (
        time.strftime("%Y_%m_%d_%H_%M_%s"))
    generated_path = '%s/%s' % (cfg.DATA.GENERATED_DIR, generated_file)

    net_output = model.predict(test_data)
    print("net_output:", np.array(net_output.shape))
    net_roll = seqNetOutToPianoroll(net_output)
    print("net_roll:", net_roll.shape)
    pianorollToMidi(net_roll, generated_path)
def generate():
    file = flask.request.files['midifile']
    if file.filename.split('.')[-1] in ALLOWED_EXTENSIONS:
        print(model.summary())
        fn = 'uploads/input_file_%s.mid' % (time.strftime("%Y_%m_%d_%H_%M_%s"))
        file.save(fn)
        gn = 'static/AI_generated_%s.mid' % (
            time.strftime("%Y_%m_%d_%H_%M_%s"))
        # zen.generate_double(fn, gn)
        test_piano_roll = midiToPianoroll(fn)
        test_data = [test_piano_roll]
        test_input = createSeqTestNetInputs(test_data,
                                            cfg.MODEL_PARAMS.X_SEQ_LENGTH)
        test_data = test_input[0]
        net_output = model.predict(test_data)
        net_roll = seqNetOutToPianoroll(net_output)
        total_roll = np.concatenate((test_piano_roll, net_roll))
        pianorollToMidi(net_roll, gn)

        return flask.jsonify({'generated': gn})
    else:
        return "You have uploaded invalid file"
示例#4
0
    block_blob_service.get_blob_to_path('musicmodels', cfg.DATA.MODEL_FILE,
                                        cfg.DATA.MODEL_PATH)
    block_blob_service.get_blob_to_path('musicmodels', cfg.DATA.WEIGHTS_FILE,
                                        cfg.DATA.WEIGHTS_PATH)

    model = model_from_json(open(cfg.DATA.MODEL_PATH).read())
    model.load_weights(cfg.DATA.WEIGHTS_PATH)
    model.compile(loss=cfg.MODEL_PARAMS.LOSS_FUNCTION,
                  optimizer=cfg.MODEL_PARAMS.OPTIMIZER)


if __name__ == '__main__':
    # Import the logger only for Workbench runs
    logger = get_azureml_logger()

    init()

    #PRIMER
    dataset_folder = download_grocery_data()
    midi_files = glob.glob(dataset_folder + '/*.mid')

    #choose a random file as a primer
    file_idx = random.randint(0, len(midi_files) - 1)
    primer = midi_files[file_idx]
    test_piano_roll = midiToPianoroll(primer)
    test_data = [test_piano_roll]
    test_input = createSeqTestNetInputs(test_data,
                                        cfg.MODEL_PARAMS.X_SEQ_LENGTH)

    run(test_input)
def test_identity_fn():
    # verify the identity function
    midi_file = 'uploads/mozk175b_trimmed.mid'
    test_piano_roll = midiToPianoroll(midi_file)
    generated_path = '%s/%s' % (cfg.DATA.GENERATED_DIR, 'same.mid')
    pianorollToMidi(test_piano_roll, generated_path)
示例#6
0
                        default=0)
    args = parser.parse_args()

    if args.origin_length <= 0:
        print("invalid origin length")
        exit()

    if args.target_length <= 0:
        print("invalid target length")
        exit()

    if args.load_epoch <= 0:
        print("invalid load epoch")
        exit()

    piano_data = midiToPianoroll(path, debug=True)
    print("shape of data ", piano_data.shape)

    input_datax = torch.from_numpy(
        piano_data[0:args.origin_length, :]).unsqueeze(1).float()

    encoder1 = EncoderRNN(input_dim, hidden_dim).to(device)
    decoder1 = DecoderRNN(input_dim, hidden_dim).to(device)

    encoder1.load_state_dict(
        torch.load('../models/encoder_baseline_' + str(args.load_epoch) +
                   '_Adam1e-3'))
    decoder1.load_state_dict(
        torch.load('../models/decoder_baseline_' + str(args.load_epoch) +
                   '_Adam1e-3'))