def generate_double(fn, gn): test_piano_roll = midiToPianoroll(fn) test_data = [test_piano_roll] test_input = createSeqTestNetInputs(test_data, cfg.MODEL_PARAMS.X_SEQ_LENGTH) test_data = test_input[0] net_output = model.predict(test_data) net_roll = seqNetOutToPianoroll(net_output) total_roll = np.concatenate((test_piano_roll, net_roll)) pianorollToMidi(net_roll, gn)
def run(test_data): generated_file = 'LSTM_gen_%s.mid' % (time.strftime("%Y%m%d_%H_%M")) generated_path = '%s%s' % (cfg.DATA.GENERATED_DIR, generated_file) for i, song in enumerate(test_data): net_output = model.predict(song) print("net_output:", np.array(net_output.shape)) net_roll = seqNetOutToPianoroll(net_output) print("net_roll:", net_roll.shape) pianorollToMidi(net_roll, generated_path) block_blob_service.create_blob_from_path('musicmodels', generated_file, generated_path)
def test(): midi_files = glob.glob(DATA_DIR + '/*.mid') file_idx = random.randint(0, len(midi_files) - 1) # midi_file = midi_files[file_idx] midi_file = 'uploads/mozk175b_trimmed.mid' print('Generating from %s' % midi_file) test_piano_roll = midiToPianoroll(midi_file) test_data = [test_piano_roll] test_input = createSeqTestNetInputs(test_data, cfg.MODEL_PARAMS.X_SEQ_LENGTH) test_data = test_input[0] generated_file = 'AI_generated_%s.mid' % ( time.strftime("%Y_%m_%d_%H_%M_%s")) generated_path = '%s/%s' % (cfg.DATA.GENERATED_DIR, generated_file) net_output = model.predict(test_data) print("net_output:", np.array(net_output.shape)) net_roll = seqNetOutToPianoroll(net_output) print("net_roll:", net_roll.shape) pianorollToMidi(net_roll, generated_path)
def generate(): file = flask.request.files['midifile'] if file.filename.split('.')[-1] in ALLOWED_EXTENSIONS: print(model.summary()) fn = 'uploads/input_file_%s.mid' % (time.strftime("%Y_%m_%d_%H_%M_%s")) file.save(fn) gn = 'static/AI_generated_%s.mid' % ( time.strftime("%Y_%m_%d_%H_%M_%s")) # zen.generate_double(fn, gn) test_piano_roll = midiToPianoroll(fn) test_data = [test_piano_roll] test_input = createSeqTestNetInputs(test_data, cfg.MODEL_PARAMS.X_SEQ_LENGTH) test_data = test_input[0] net_output = model.predict(test_data) net_roll = seqNetOutToPianoroll(net_output) total_roll = np.concatenate((test_piano_roll, net_roll)) pianorollToMidi(net_roll, gn) return flask.jsonify({'generated': gn}) else: return "You have uploaded invalid file"
print("invalid origin length") exit() if args.target_length <= 0: print("invalid target length") exit() if args.load_epoch <= 0: print("invalid load epoch") exit() piano_data = midiToPianoroll(path, debug=True) print("shape of data ", piano_data.shape) input_datax = torch.from_numpy( piano_data[0:args.origin_length, :]).unsqueeze(1).float() encoder1 = EncoderRNN(input_dim, hidden_dim).to(device) decoder1 = DecoderRNN(input_dim, hidden_dim).to(device) encoder1.load_state_dict( torch.load('../models/encoder_baseline_' + str(args.load_epoch) + '_Adam1e-3')) decoder1.load_state_dict( torch.load('../models/decoder_baseline_' + str(args.load_epoch) + '_Adam1e-3')) output = generate(input_datax, encoder1, decoder1, args.target_length) piano_roll = seqNetOutToPianoroll(output) pianorollToMidi(piano_roll, "../output/test.mid")