def current_model_update(): global model_name global models model_name = request.form['model_name'] # todo to remove this statement if model_name == 'undefined': return '' models = load_models(model_base_name=model_name, num_voices=num_voices) return 'Model ' + model_name + ' loaded'
def main(): # parse arguments parser = argparse.ArgumentParser() parser.add_argument('--timesteps', help="model's range (default: %(default)s)", type=int, default=16) parser.add_argument( '-b', '--batch_size_train', help='batch size used during training phase (default: %(default)s)', type=int, default=128) parser.add_argument( '-s', '--samples_per_epoch', help='number of samples per epoch (default: %(default)s)', type=int, default=12800 * 7) parser.add_argument( '--num_val_samples', help='number of validation samples (default: %(default)s)', type=int, default=1280) parser.add_argument('-u', '--num_units_lstm', nargs='+', help='number of lstm units (default: %(default)s)', type=int, default=[200, 200]) parser.add_argument( '-d', '--num_dense', help='size of non recurrent hidden layers (default: %(default)s)', type=int, default=200) parser.add_argument('-n', '--name', help='model name (default: %(default)s)', choices=['deepbach', 'skip', 'norelu'], type=str, default='skip') parser.add_argument( '-i', '--num_iterations', help='number of gibbs iterations (default: %(default)s)', type=int, default=20000) parser.add_argument('-t', '--train', nargs='?', help='train models for N epochs (default: 15)', default=0, const=15, type=int) parser.add_argument('-p', '--parallel', nargs='?', help='number of parallel updates (default: 16)', type=int, const=16, default=1) parser.add_argument('--overwrite', help='overwrite previously computed models', action='store_true') parser.add_argument('-m', '--midi_file', nargs='?', help='relative path to midi file', type=str, const='datasets/god_save_the_queen.mid') parser.add_argument('-l', '--length', help='length of unconstrained generation', type=int, default=160) parser.add_argument('--ext', help='extension of model name', type=str, default='') parser.add_argument('-o', '--output_file', nargs='?', help='path to output file', type=str, default='', const='generated_examples/example.mid') parser.add_argument('--dataset', nargs='?', help='path to dataset folder', type=str, default='') parser.add_argument( '-r', '--reharmonization', nargs='?', help='reharmonization of a melody from the corpus identified by its id', type=int) args = parser.parse_args() print(args) if args.ext: ext = '_' + args.ext else: ext = '' dataset_path = None pickled_dataset = BACH_DATASET # metadatas = [TickMetadatas(SUBDIVISION), FermataMetadatas(), KeyMetadatas(window_size=1)] metadatas = [TickMetadatas(SUBDIVISION), FermataMetadatas()] timesteps = args.timesteps batch_size = args.batch_size_train samples_per_epoch = args.samples_per_epoch nb_val_samples = args.num_val_samples num_units_lstm = args.num_units_lstm model_name = args.name.lower() + ext sequence_length = args.length batch_size_per_voice = args.parallel num_units_lstm = args.num_units_lstm num_dense = args.num_dense if args.output_file: output_file = args.output_file else: output_file = None parallel = batch_size_per_voice > 1 train = args.train > 0 num_epochs = args.train overwrite = args.overwrite # Create pickled dataset if not os.path.exists(pickled_dataset): initialization(dataset_path, metadatas=metadatas, voice_ids=[SOP_INDEX], BACH_DATASET=BACH_DATASET) # load dataset X, X_metadatas, voice_ids, index2notes, note2indexes, metadatas = pickle.load( open(pickled_dataset, 'rb')) # dataset dependant variables NUM_VOICES = len(voice_ids) num_voices = NUM_VOICES num_pitches = list(map(len, index2notes)) num_iterations = args.num_iterations // batch_size_per_voice // num_voices # Create, train load models if not os.path.exists('models/' + model_name + '_' + str(NUM_VOICES - 1) + '.yaml'): create_models(model_name, create_new=overwrite, num_units_lstm=num_units_lstm, num_dense=num_dense, pickled_dataset=pickled_dataset, num_voices=num_voices, metadatas=metadatas, timesteps=timesteps) if train: models = train_models(model_name=model_name, samples_per_epoch=samples_per_epoch, num_epochs=num_epochs, nb_val_samples=nb_val_samples, timesteps=timesteps, pickled_dataset=pickled_dataset, num_voices=NUM_VOICES, metadatas=metadatas, batch_size=batch_size) else: models = load_models(model_name, num_voices=NUM_VOICES) # todo to remove # model_name = 'skip_large' # timesteps = 32 # # test_autoencoder(model_name='models/' + model_name + '_0', # timesteps=timesteps, # pickled_dataset=pickled_dataset) distance_model = load_model('models/seq2seq_masking') # distance_model.compile(optimizer='adam', loss='categorical_crossentropy', # metrics=['accuracy']) hidden_repr_model = Model(input=distance_model.input, output=distance_model.layers[1].output) hidden_repr_model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy']) # create target left_features, _, _, _ = all_features(np.transpose(X[21], axes=(1, 0)), voice_index=0, time_index=16 * 4, timesteps=32, num_pitches=num_pitches, num_voices=num_voices) left_metas, central_metas, _ = all_metadatas(X_metadatas[21], time_index=16 * 4, timesteps=32, metadatas=metadatas) inputs_target_chorale = { 'left_features': np.array([left_features]), 'left_metas': np.array([left_metas]), 'central_metas': np.array([central_metas]) } # show target score = indexed_chorale_to_score(X[21][:, 16 * 4 - 32:16 * 4], pickled_dataset=pickled_dataset) score.show() generated_chorale = gibbs(generation_models=models, hidden_repr_model=hidden_repr_model, inputs_target_chorale=inputs_target_chorale, chorale_metas=X_metadatas[12][:150], num_iterations=200, pickled_dataset=pickled_dataset, timesteps=timesteps) # convert score = indexed_chorale_to_score(np.transpose(generated_chorale, axes=(1, 0)), pickled_dataset=pickled_dataset) score.show()
def main(): # parse arguments parser = argparse.ArgumentParser() parser.add_argument('--timesteps', help="model's range (default: %(default)s)", type=int, default=16) parser.add_argument( '-b', '--batch_size_train', help='batch size used during training phase (default: %(default)s)', type=int, default=128) parser.add_argument( '-s', '--steps_per_epoch', help='number of steps per epoch (default: %(default)s)', type=int, default=500) parser.add_argument( '--validation_steps', help='number of validation steps (default: %(default)s)', type=int, default=20) parser.add_argument('-u', '--num_units_lstm', nargs='+', help='number of lstm units (default: %(default)s)', type=int, default=[200, 200]) parser.add_argument( '-d', '--num_dense', help='size of non recurrent hidden layers (default: %(default)s)', type=int, default=200) parser.add_argument('-n', '--name', help='model name (default: %(default)s)', choices=['deepbach', 'skip'], type=str, default='deepbach') parser.add_argument( '-i', '--num_iterations', help='number of gibbs iterations (default: %(default)s)', type=int, default=20000) parser.add_argument('-t', '--train', nargs='?', help='train models for N epochs (default: 15)', default=0, const=15, type=int) parser.add_argument('-p', '--parallel', nargs='?', help='number of parallel updates (default: 16)', type=int, const=16, default=1) parser.add_argument('--overwrite', help='overwrite previously computed models', action='store_true') parser.add_argument('-m', '--midi_file', nargs='?', help='relative path to midi file', type=str, const='datasets/god_save_the_queen.mid') parser.add_argument('-l', '--length', help='length of unconstrained generation', type=int, default=160) parser.add_argument('--ext', help='extension of model name', type=str, default='') parser.add_argument('-o', '--output_file', nargs='?', help='path to output file', type=str, default='', const='generated_examples/example.mid') parser.add_argument('--dataset', nargs='?', help='path to dataset folder', type=str, default='') parser.add_argument( '-r', '--reharmonization', nargs='?', help='reharmonization of a melody from the corpus identified by its id', type=int) args = parser.parse_args() print(args) # fixed set of metadatas to use when CREATING the dataset # Available metadatas: # metadatas = [FermataMetadatas(), KeyMetadatas(window_size=1), # TickMetadatas(SUBDIVISION), ModeMetadatas()] metadatas = [ TickMetadatas(SUBDIVISION), FermataMetadatas(), KeyMetadatas(window_size=1) ] if args.ext: ext = '_' + args.ext else: ext = '' # datasets # set pickled_dataset argument if args.dataset: dataset_path = args.dataset pickled_dataset = pickled_dataset_path(dataset_path) print('pickled_dataset', pickled_dataset) else: dataset_path = None pickled_dataset = BACH_DATASET if not os.path.exists(pickled_dataset): initialization(dataset_path, metadatas=metadatas, voice_ids=[0, 1, 2, 3]) # load dataset X, X_metadatas, voice_ids, index2notes, note2indexes, metadatas = pickle.load( open(pickled_dataset, 'rb')) NUM_VOICES = len(voice_ids) num_pitches = list(map(len, index2notes)) timesteps = args.timesteps batch_size = args.batch_size_train steps_per_epoch = args.steps_per_epoch validation_steps = args.validation_steps num_units_lstm = args.num_units_lstm model_name = args.name.lower() + ext sequence_length = args.length batch_size_per_voice = args.parallel num_units_lstm = args.num_units_lstm num_dense = args.num_dense if args.output_file: output_file = args.output_file else: output_file = None # when reharmonization if args.midi_file: melody = converter.parse(args.midi_file) melody = part_to_inputs(melody.parts[0], index2note=index2notes[0], note2index=note2indexes[0]) num_voices = NUM_VOICES - 1 sequence_length = len(melody) # todo find a way to specify metadatas when reharmonizing a given melody chorale_metas = [ metas.generate(sequence_length) for metas in metadatas ] elif args.reharmonization: melody = X[args.reharmonization][0, :] num_voices = NUM_VOICES - 1 chorale_metas = X_metadatas[args.reharmonization] else: num_voices = NUM_VOICES melody = None # todo find a better way to set metadatas # chorale_metas = [metas[:sequence_length] for metas in X_metadatas[11]] chorale_metas = [ metas.generate(sequence_length) for metas in metadatas ] num_iterations = args.num_iterations // batch_size_per_voice // num_voices parallel = batch_size_per_voice > 1 train = args.train > 0 num_epochs = args.train overwrite = args.overwrite if not os.path.exists('models/' + model_name + '_' + str(NUM_VOICES - 1) + '.yaml'): create_models(model_name, create_new=overwrite, num_units_lstm=num_units_lstm, num_dense=num_dense, pickled_dataset=pickled_dataset, num_voices=num_voices, metadatas=metadatas, timesteps=timesteps) if train: models = train_models(model_name=model_name, steps_per_epoch=steps_per_epoch, num_epochs=num_epochs, validation_steps=validation_steps, timesteps=timesteps, pickled_dataset=pickled_dataset, num_voices=NUM_VOICES, metadatas=metadatas, batch_size=batch_size) else: models = load_models(model_name, num_voices=NUM_VOICES) temperature = 1. timesteps = int(models[0].input[0]._keras_shape[1]) seq = generation(model_base_name=model_name, models=models, timesteps=timesteps, melody=melody, initial_seq=None, temperature=temperature, chorale_metas=chorale_metas, parallel=parallel, batch_size_per_voice=batch_size_per_voice, num_iterations=num_iterations, sequence_length=sequence_length, output_file=output_file, pickled_dataset=pickled_dataset)
# get model names present in folder models/ models_list = glob('models/*.yaml') models_list = list( set( map(lambda name: '_'.join(name.split('_')[:-1]).split('/')[-1], models_list))) # model_name = 'deepbach' model_name = 'skip_new' assert os.path.exists('models/' + model_name + '_' + str(num_voices - 1) + '.yaml') # load models models = load_models(model_name, num_voices=num_voices) temperature = 1. timesteps = int(models[0].input[0]._keras_shape[1]) @app.route('/compose', methods=['POST']) def compose(): # global models # --- Parse request--- with tempfile.NamedTemporaryFile(mode='w', suffix='.xml') as file: print(file.name) # file_path = os.path.join(app.config['UPLOAD_FOLDER'], filename) xml_string = request.form['xml_string'] file.write(xml_string)
open(BACH_DATASET, 'rb')) NUM_VOICES = len(voice_ids) model_name = 'deepbach' sequence_length = args.length batch_size_per_voice = args.parallel midi_out = 'tmp/out.midi' wav_out = 'tmp/out.wav' iteration = 0 reharmonization = 0 num_iterations = args.num_iterations // batch_size_per_voice // ( NUM_VOICES - 1) parallel = batch_size_per_voice > 1 models = load_models(model_name, num_voices=NUM_VOICES) temperature = 1. timesteps = int(models[0].input[0]._keras_shape[1]) while True: print("-- ITERATION %d --" % iteration) print("-- REHARMONIZATION %d --" % reharmonization) melody = X[reharmonization][0, :] chorale_metas = X_metadatas[reharmonization] reharmonization = (reharmonization + 1) % len(X) generation(model_base_name=model_name, models=models, timesteps=timesteps, melody=melody,