def main(args): trainer, predictor = models.MODELS[args.model_name]() time_dim = trainer.inputs['data'].shape[2] if args.init_param_file: print "Loading parameters: %s" % args.init_param_file trainer.load_param_values(args.init_param_file) print "Opening %s" % args.training_file stash = biggie.Stash(args.training_file, cache=True) stream = S.minibatch(D.create_target_stream( stash, time_dim, max_pitch_shift=0, bins_per_pitch=1, mapper=D.FX.note_numbers_to_pitch, sample_func=D.slice_note_entity), batch_size=BATCH_SIZE) print "Starting '%s'" % args.trial_name driver = optimus.Driver(graph=trainer, name=args.trial_name, output_directory=args.output_directory) hyperparams = dict(learning_rate=LEARNING_RATE, dropout=DROPOUT) predictor_file = path.join(driver.output_directory, args.predictor_file) optimus.save(predictor, def_file=predictor_file) driver.fit(stream, hyperparams=hyperparams, **DRIVER_ARGS)
def main(args): sim_margin = -RADIUS * args.margin trainer, predictor, zerofilter = models.iX_c3f2_oY(20, 3, 'xlarge') time_dim = trainer.inputs['cqt'].shape[2] if args.init_param_file: print("Loading parameters: {0}".format(args.init_param_file)) trainer.load_param_values(args.init_param_file) print("Opening {0}".format(args.training_file)) stash = biggie.Stash(args.training_file, cache=True) stream = S.minibatch( D.create_pairwise_stream(stash, time_dim, working_size=100, threshold=0.05), batch_size=BATCH_SIZE) stream = D.batch_filter( stream, zerofilter, threshold=2.0**-16, min_batch=1, max_consecutive_skips=100, sim_margin=sim_margin, diff_margin=RADIUS) print("Starting '{0}'".format(args.trial_name)) driver = optimus.Driver( graph=trainer, name=args.trial_name, output_directory=futil.create_directory(args.output_directory)) hyperparams = dict( learning_rate=LEARNING_RATE, sim_margin=sim_margin, diff_margin=RADIUS) predictor_file = path.join(driver.output_directory, args.predictor_file) optimus.save(predictor, def_file=predictor_file) driver.fit(stream, hyperparams=hyperparams, **DRIVER_ARGS)
def main(args): trainer, predictor = models.MODELS[args.model_name]() time_dim = trainer.inputs['data'].shape[2] if args.init_param_file: print "Loading parameters: %s" % args.init_param_file trainer.load_param_values(args.init_param_file) print "Opening %s" % args.training_file stash = biggie.Stash(args.training_file) stream = D.create_chord_index_stream(stash, time_dim, VOCAB, sample_func=D.slice_chroma_entity, working_size=25) stream = S.minibatch(stream, batch_size=BATCH_SIZE) print "Starting '%s'" % args.trial_name driver = optimus.Driver(graph=trainer, name=args.trial_name, output_directory=args.output_directory) hyperparams = dict(learning_rate=LEARNING_RATE, dropout=DROPOUT) predictor_file = path.join(driver.output_directory, args.predictor_file) optimus.save(predictor, def_file=predictor_file) driver.fit(stream, hyperparams=hyperparams, **DRIVER_ARGS)
def main(args): trainer, predictor = models.MODELS[args.model_name]() time_dim = trainer.inputs['cqt'].shape[2] if args.init_param_file: print "Loading parameters: %s" % args.init_param_file trainer.load_param_values(args.init_param_file) print "Opening %s" % args.training_file stash = biggie.Stash(args.training_file, cache=True) stream = D.create_uniform_chord_stream( stash, time_dim, pitch_shift=0, vocab_dim=VOCAB, working_size=5, ) stream = S.minibatch(stream, batch_size=BATCH_SIZE) print "Starting '%s'" % args.trial_name driver = optimus.Driver(graph=trainer, name=args.trial_name, output_directory=args.output_directory) hyperparams = dict(learning_rate=LEARNING_RATE) predictor_file = path.join(driver.output_directory, args.predictor_file) optimus.save(predictor, def_file=predictor_file) driver.fit(stream, hyperparams=hyperparams, **DRIVER_ARGS)
def main(args): trainer, predictor = models.MODELS[args.model_name]() time_dim = trainer.inputs['data'].shape[2] if args.init_param_file: print "Loading parameters: %s" % args.init_param_file trainer.load_param_values(args.init_param_file) print "Opening %s" % args.training_file stash = biggie.Stash(args.training_file, cache=True) stream = D.create_chord_index_stream(stash, time_dim, max_pitch_shift=0, lexicon=VOCAB) # Load prior stat_file = "%s.json" % path.splitext(args.training_file)[0] prior = np.array(json.load(open(stat_file))['prior'], dtype=float) trainer.nodes['prior'].weight.value = 1.0 / prior.reshape(1, -1) stream = S.minibatch(stream, batch_size=BATCH_SIZE) print "Starting '%s'" % args.trial_name driver = optimus.Driver(graph=trainer, name=args.trial_name, output_directory=args.output_directory) hyperparams = dict(learning_rate=LEARNING_RATE, dropout=DROPOUT) predictor_file = path.join(driver.output_directory, args.predictor_file) optimus.save(predictor, def_file=predictor_file) driver.fit(stream, hyperparams=hyperparams, **DRIVER_ARGS)
def build_model(): x_in = optimus.Input(name="x", shape=(None, 2)) class_idx = optimus.Input(name="y", shape=(None, ), dtype='int32') learning_rate = optimus.Input(name='learning_rate', shape=None) layer0 = optimus.Affine(name='layer0', input_shape=x_in.shape, output_shape=(None, 100), act_type='relu') layer1 = optimus.Affine(name='layer1', input_shape=layer0.output.shape, output_shape=(None, 100), act_type='relu') classifier = optimus.Softmax(name='classifier', input_shape=layer1.output.shape, n_out=N_CLASSES, act_type='linear') nll = optimus.NegativeLogLikelihood(name="nll") posterior = optimus.Output(name='posterior') trainer_edges = optimus.ConnectionManager([ (x_in, layer0.input), (layer0.output, layer1.input), (layer1.output, classifier.input), (classifier.output, nll.likelihood), (class_idx, nll.target_idx) ]) update_manager = optimus.ConnectionManager([ (learning_rate, layer0.weights), (learning_rate, layer0.bias), (learning_rate, layer1.weights), (learning_rate, layer1.bias), (learning_rate, classifier.weights), (learning_rate, classifier.bias) ]) trainer = optimus.Graph(name='trainer', inputs=[x_in, class_idx, learning_rate], nodes=[layer0, layer1, classifier], connections=trainer_edges.connections, outputs=[optimus.Graph.TOTAL_LOSS], losses=[nll], updates=update_manager.connections) optimus.random_init(layer0.weights) optimus.random_init(layer1.weights) optimus.random_init(classifier.weights) predictor_edges = optimus.ConnectionManager([ (x_in, layer0.input), (layer0.output, layer1.input), (layer1.output, classifier.input), (classifier.output, posterior) ]) predictor = optimus.Graph(name='predictor', inputs=[x_in], nodes=[layer0, layer1, classifier], connections=predictor_edges.connections, outputs=[posterior]) driver = optimus.Driver(graph=trainer, name='test') return driver, predictor
def fit(trial_name, output_dir, model_params, hyperparams, train_params, data_params, param_file=''): """Fit a model given the parameters. Parameters ---------- ... Returns ------- artifacts : dict Contains data resulting from fitting the model. """ output_dir = os.path.join(output_dir, trial_name) utils.safe_makedirs(output_dir) trainer, predictor = M.create(**model_params) data_params.update(window_length=model_params['n_in'], dataset=pd.read_json(data_params.pop('dataset'))) # TODO: Migrate this into the config object... but where?! source = D.awgn(D.create_stream(**data_params), 0.1, 0.01) print("Starting '{0}'".format(trial_name)) param_file = os.path.join(output_dir, param_file) log_file = os.path.join(output_dir, 'train_stats.csv') driver = optimus.Driver(graph=trainer, name=trial_name, output_directory=output_dir, log_file=log_file) model_file = os.path.join(output_dir, "{}-predictor.json".format(trial_name)) optimus.save(predictor, model_file) driver.fit(source, hyperparams=hyperparams, **train_params) return dict(log_file=log_file, model_file=model_file)
def main(args): trainer, predictor = build_model() if args.init_param_file: print "Loading parameters: %s" % args.init_param_file trainer.load_param_values(args.init_param_file) optimus.random_init(trainer.params['layer3'].weights) optimus.random_init(trainer.params['layer3'].bias) # 3. Create Data print "Loading %s" % args.training_file stash = biggie.Stash(args.training_file) stream = D.create_stash_stream(stash, TIME_DIM, pitch_shift=0, vocab_dim=VOCAB, pool_size=25) if args.secondary_source: stash2 = biggie.Stash(args.secondary_source) stream2 = D.create_uniform_chord_stream(stash2, TIME_DIM, pitch_shift=0, vocab_dim=VOCAB, working_size=5) stream = S.mux([stream, stream2], [0.5, 0.5]) stream = S.minibatch(stream, batch_size=BATCH_SIZE) print "Starting '%s'" % args.trial_name driver = optimus.Driver(graph=trainer, name=args.trial_name, output_directory=args.model_directory) predictor_file = path.join(driver.output_directory, args.predictor_file) optimus.save(predictor, def_file=predictor_file) hyperparams = dict(learning_rate=LEARNING_RATE) driver.fit(stream, hyperparams=hyperparams, **DRIVER_ARGS)
def main(args): # 1.1 Create Inputs input_data = optimus.Input(name='cqt', shape=(None, 1, TIME_DIM, 252)) chord_idx = optimus.Input(name='chord_idx', shape=(None, ), dtype='int32') is_chord = optimus.Input(name='is_chord', shape=(None, )) learning_rate = optimus.Input(name='learning_rate', shape=None) # 1.2 Create Nodes layer0 = optimus.Conv3D(name='layer0', input_shape=input_data.shape, weight_shape=(12, 1, 9, 19), pool_shape=(1, 3), act_type='relu') layer1 = optimus.Conv3D(name='layer1', input_shape=layer0.output.shape, weight_shape=(16, None, 7, 15), act_type='relu') layer2 = optimus.Conv3D(name='layer2', input_shape=layer1.output.shape, weight_shape=(20, None, 6, 15), act_type='relu') layer3 = optimus.Affine(name='layer3', input_shape=layer2.output.shape, output_shape=( None, 512, ), act_type='relu') chord_estimator = optimus.Affine(name='chord_estimator', input_shape=layer3.output.shape, output_shape=(None, VOCAB), act_type='sigmoid') all_nodes = [layer0, layer1, layer2, layer3, chord_estimator] # 1.1 Create Losses chord_mse = optimus.SparseMeanSquaredError( # chord_mse = optimus.SparseCrossEntropy( name="chord_mse") # 2. Define Edges trainer_edges = optimus.ConnectionManager([ (input_data, layer0.input), (layer0.output, layer1.input), (layer1.output, layer2.input), (layer2.output, layer3.input), (layer3.output, chord_estimator.input), (chord_estimator.output, chord_mse.prediction), (chord_idx, chord_mse.index), (is_chord, chord_mse.target) ]) update_manager = optimus.ConnectionManager([ (learning_rate, layer0.weights), (learning_rate, layer0.bias), (learning_rate, layer1.weights), (learning_rate, layer1.bias), (learning_rate, layer2.weights), (learning_rate, layer2.bias), (learning_rate, layer3.weights), (learning_rate, layer3.bias), (learning_rate, chord_estimator.weights), (learning_rate, chord_estimator.bias) ]) print "Building trainer" trainer = optimus.Graph( name=GRAPH_NAME, inputs=[input_data, chord_idx, is_chord, learning_rate], nodes=all_nodes, connections=trainer_edges.connections, outputs=[optimus.Graph.TOTAL_LOSS], losses=[chord_mse], updates=update_manager.connections) optimus.random_init(chord_estimator.weights) print "Building validator" validator = optimus.Graph(name=GRAPH_NAME, inputs=[input_data, chord_idx, is_chord], nodes=all_nodes, connections=trainer_edges.connections, outputs=[optimus.Graph.TOTAL_LOSS], losses=[chord_mse]) posterior = optimus.Output(name='posterior') predictor_edges = optimus.ConnectionManager([ (input_data, layer0.input), (layer0.output, layer1.input), (layer1.output, layer2.input), (layer2.output, layer3.input), (layer3.output, chord_estimator.input), (chord_estimator.output, posterior) ]) print "Building predictor" predictor = optimus.Graph(name=GRAPH_NAME, inputs=[input_data], nodes=all_nodes, connections=predictor_edges.connections, outputs=[posterior]) # 3. Create Data print "Opening Data" stash = biggie.Stash(args.training_file) stream = S.minibatch(D.create_contrastive_quality_stream(stash, TIME_DIM, vocab_dim=VOCAB), batch_size=50) driver = optimus.Driver(graph=trainer, name=args.trial_name, output_directory=args.model_directory) hyperparams = {learning_rate.name: LEARNING_RATE} print "...aaand we're off!" driver.fit(stream, hyperparams=hyperparams, **DRIVER_ARGS) validator_file = path.join(driver.output_directory, args.validator_file) optimus.save(validator, def_file=validator_file) predictor_file = path.join(driver.output_directory, args.predictor_file) optimus.save(predictor, def_file=predictor_file)
def main(args): # 1.1 Create Inputs input_data = optimus.Input( name='cqt', shape=(None, 1, TIME_DIM, 252)) chord_idx = optimus.Input( name='chord_idx', shape=(None,), dtype='int32') learning_rate = optimus.Input( name='learning_rate', shape=None) # 1.2 Create Nodes layer0 = optimus.Conv3D( name='layer0', input_shape=input_data.shape, weight_shape=(12, 1, 9, 19), pool_shape=(1, 3), act_type='relu') layer1 = optimus.Conv3D( name='layer1', input_shape=layer0.output.shape, weight_shape=(16, None, 7, 15), act_type='relu') layer2 = optimus.Conv3D( name='layer2', input_shape=layer1.output.shape, weight_shape=(20, None, 6, 15), act_type='relu') layer3 = optimus.Affine( name='layer3', input_shape=layer2.output.shape, output_shape=(None, 512,), act_type='relu') chord_classifier = optimus.Softmax( name='chord_classifier', input_shape=layer3.output.shape, n_out=VOCAB, act_type='linear') all_nodes = [layer0, layer1, layer2, layer3, chord_classifier] # 1.1 Create Losses chord_nll = optimus.NegativeLogLikelihood( name="chord_nll") # 2. Define Edges trainer_edges = optimus.ConnectionManager([ (input_data, layer0.input), (layer0.output, layer1.input), (layer1.output, layer2.input), (layer2.output, layer3.input), (layer3.output, chord_classifier.input), (chord_classifier.output, chord_nll.likelihood), (chord_idx, chord_nll.target_idx)]) update_manager = optimus.ConnectionManager([ (learning_rate, layer0.weights), (learning_rate, layer0.bias), (learning_rate, layer1.weights), (learning_rate, layer1.bias), (learning_rate, layer2.weights), (learning_rate, layer2.bias), (learning_rate, layer3.weights), (learning_rate, layer3.bias), (learning_rate, chord_classifier.weights), (learning_rate, chord_classifier.bias)]) print "Building trainer" trainer = optimus.Graph( name=GRAPH_NAME, inputs=[input_data, chord_idx, learning_rate], nodes=all_nodes, connections=trainer_edges.connections, outputs=[optimus.Graph.TOTAL_LOSS], losses=[chord_nll], updates=update_manager.connections) optimus.random_init(chord_classifier.weights) print "Building validator" validator = optimus.Graph( name=GRAPH_NAME, inputs=[input_data, chord_idx], nodes=all_nodes, connections=trainer_edges.connections, outputs=[optimus.Graph.TOTAL_LOSS], losses=[chord_nll]) posterior = optimus.Output( name='posterior') predictor_edges = optimus.ConnectionManager([ (input_data, layer0.input), (layer0.output, layer1.input), (layer1.output, layer2.input), (layer2.output, layer3.input), (layer3.output, chord_classifier.input), (chord_classifier.output, posterior)]) print "Building predictor" predictor = optimus.Graph( name=GRAPH_NAME, inputs=[input_data], nodes=all_nodes, connections=predictor_edges.connections, outputs=[posterior]) # 3. Create Data print "Opening Data" stash = biggie.Stash(args.training_file) stream = S.minibatch( D.create_uniform_quality_stream(stash, TIME_DIM), batch_size=50, functions=[FX.pitch_shift(), FX.map_to_chord_index(VOCAB)]) driver = optimus.Driver( graph=trainer, name=args.trial_name, output_directory=args.model_directory) hyperparams = {learning_rate.name: LEARNING_RATE} print "...aaand we're off!" driver.fit(stream, hyperparams=hyperparams, **DRIVER_ARGS) validator_file = path.join(driver.output_directory, args.validator_file) optimus.save(validator, def_file=validator_file) predictor_file = path.join(driver.output_directory, args.predictor_file) optimus.save(predictor, def_file=predictor_file)
def main(args): # 1.1 Create Inputs input_data = optimus.Input( name='cqt', shape=(None, 1, TIME_DIM, PITCH_DIM)) target = optimus.Input( name='target', shape=(None, VOCAB)) learning_rate = optimus.Input( name='learning_rate', shape=None) # 1.2 Create Nodes layer0 = optimus.Conv3D( name='layer0', input_shape=input_data.shape, weight_shape=(32, 1, 5, 19), pool_shape=(2, 3), act_type='relu') layer1 = optimus.Conv3D( name='layer1', input_shape=layer0.output.shape, weight_shape=(64, None, 5, 15), act_type='relu') layer2 = optimus.Conv3D( name='layer2', input_shape=layer1.output.shape, weight_shape=(128, None, 3, 15), act_type='relu') layer3 = optimus.Affine( name='layer3', input_shape=layer2.output.shape, output_shape=(None, 1024,), act_type='relu') chord_classifier = optimus.Affine( name='chord_classifier', input_shape=layer3.output.shape, output_shape=(None, 6,), act_type='sigmoid') all_nodes = [layer0, layer1, layer2, layer3, chord_classifier] # 1.1 Create Losses chord_mse = optimus.MeanSquaredError( name="chord_mse") # 2. Define Edges trainer_edges = optimus.ConnectionManager([ (input_data, layer0.input), (layer0.output, layer1.input), (layer1.output, layer2.input), (layer2.output, layer3.input), (layer3.output, chord_classifier.input), (chord_classifier.output, chord_mse.prediction), (target, chord_mse.target)]) update_manager = optimus.ConnectionManager([ # (learning_rate, layer0.weights), # (learning_rate, layer0.bias), # (learning_rate, layer1.weights), # (learning_rate, layer1.bias), # (learning_rate, layer2.weights), # (learning_rate, layer2.bias), (learning_rate, layer3.weights), (learning_rate, layer3.bias), (learning_rate, chord_classifier.weights), (learning_rate, chord_classifier.bias)]) trainer = optimus.Graph( name=GRAPH_NAME, inputs=[input_data, target, learning_rate], nodes=all_nodes, connections=trainer_edges.connections, outputs=[optimus.Graph.TOTAL_LOSS], losses=[chord_mse], updates=update_manager.connections) for n in all_nodes: optimus.random_init(n.weights, 0, 0.01) optimus.random_init(n.bias, 0, 0.01) if args.init_param_file: param_values = dict(np.load(args.init_param_file)) keys = param_values.keys() for key in keys: if chord_classifier.name in key or layer3.name in key: print "skipping %s" % key del param_values[key] trainer.param_values = param_values posterior = optimus.Output( name='posterior') predictor_edges = optimus.ConnectionManager([ (input_data, layer0.input), (layer0.output, layer1.input), (layer1.output, layer2.input), (layer2.output, layer3.input), (layer3.output, chord_classifier.input), (chord_classifier.output, posterior)]) predictor = optimus.Graph( name=GRAPH_NAME, inputs=[input_data], nodes=all_nodes, connections=predictor_edges.connections, outputs=[posterior]) # 3. Create Data print "Loading %s" % args.training_file stash = biggie.Stash(args.training_file) stream = D.create_uniform_chord_stream( stash, TIME_DIM, pitch_shift=0, vocab_dim=VOCAB, working_size=10) stream = S.minibatch( FX.chord_index_to_tonnetz(stream, vocab_dim=VOCAB), batch_size=BATCH_SIZE) print "Starting '%s'" % args.trial_name driver = optimus.Driver( graph=trainer, name=args.trial_name, output_directory=args.model_directory) hyperparams = {learning_rate.name: LEARNING_RATE} predictor_file = path.join(driver.output_directory, args.predictor_file) optimus.save(predictor, def_file=predictor_file) driver.fit(stream, hyperparams=hyperparams, **DRIVER_ARGS)
def main(args): # 1.1 Create Inputs input_data = optimus.Input(name='cqt', shape=(None, 1, TIME_DIM, 252)) chord_idx = optimus.Input(name='chord_idx', shape=(None, ), dtype='int32') learning_rate = optimus.Input(name='learning_rate', shape=None) margin = optimus.Input(name='margin', shape=None) margin_weight = optimus.Input(name='margin_weight', shape=None) nll_weight = optimus.Input(name='nll_weight', shape=None) # 1.2 Create Nodes layer0 = optimus.Conv3D(name='layer0', input_shape=input_data.shape, weight_shape=(12, 1, 9, 19), pool_shape=(1, 3), act_type='relu') layer1 = optimus.Conv3D(name='layer1', input_shape=layer0.output.shape, weight_shape=(16, None, 7, 15), act_type='relu') layer2 = optimus.Conv3D(name='layer2', input_shape=layer1.output.shape, weight_shape=(20, None, 6, 15), act_type='relu') layer3 = optimus.Affine(name='layer3', input_shape=layer2.output.shape, output_shape=(None, 512), act_type='relu') chord_classifier = optimus.Affine(name='chord_classifier', input_shape=layer3.output.shape, output_shape=(None, VOCAB), act_type='sigmoid') all_nodes = [layer0, layer1, layer2, layer3, chord_classifier] # 1.1 Create Losses nll = optimus.NegativeLogLikelihood(name="nll", weighted=True) likelihood_margin = optimus.LikelihoodMargin(name="likelihood_margin", mode='l1', weighted=True) # likelihood_margin = optimus.NLLMargin( # name="likelihood_margin", # mode='l2', # weighted=True) # 2. Define Edges trainer_edges = optimus.ConnectionManager([ (input_data, layer0.input), (layer0.output, layer1.input), (layer1.output, layer2.input), (layer2.output, layer3.input), (layer3.output, chord_classifier.input), (chord_classifier.output, likelihood_margin.likelihood), (chord_idx, likelihood_margin.target_idx), (margin, likelihood_margin.margin), (margin_weight, likelihood_margin.weight), (chord_classifier.output, nll.likelihood), (chord_idx, nll.target_idx), (nll_weight, nll.weight) ]) update_manager = optimus.ConnectionManager([ (learning_rate, layer0.weights), (learning_rate, layer0.bias), (learning_rate, layer1.weights), (learning_rate, layer1.bias), (learning_rate, layer2.weights), (learning_rate, layer2.bias), (learning_rate, layer3.weights), (learning_rate, layer3.bias), (learning_rate, chord_classifier.weights), (learning_rate, chord_classifier.bias) ]) trainer = optimus.Graph(name=GRAPH_NAME, inputs=[ input_data, chord_idx, margin, learning_rate, margin_weight, nll_weight ], nodes=all_nodes, connections=trainer_edges.connections, outputs=[optimus.Graph.TOTAL_LOSS], losses=[nll, likelihood_margin], updates=update_manager.connections) optimus.random_init(chord_classifier.weights) optimus.random_init(chord_classifier.bias) validator = optimus.Graph( name=GRAPH_NAME, inputs=[input_data, chord_idx, margin, margin_weight, nll_weight], nodes=all_nodes, connections=trainer_edges.connections, outputs=[optimus.Graph.TOTAL_LOSS], losses=[nll, likelihood_margin]) posterior = optimus.Output(name='posterior') predictor_edges = optimus.ConnectionManager([ (input_data, layer0.input), (layer0.output, layer1.input), (layer1.output, layer2.input), (layer2.output, layer3.input), (layer3.output, chord_classifier.input), (chord_classifier.output, posterior) ]) predictor = optimus.Graph(name=GRAPH_NAME, inputs=[input_data], nodes=all_nodes, connections=predictor_edges.connections, outputs=[posterior]) # 3. Create Data stash = biggie.Stash(args.training_file) stream = S.minibatch(D.create_uniform_quality_stream(stash, TIME_DIM, vocab_dim=VOCAB), batch_size=50) driver = optimus.Driver(graph=trainer, name=args.trial_name, output_directory=args.model_directory) hyperparams = { learning_rate.name: LEARNING_RATE, margin_weight.name: MARGIN_WEIGHT, nll_weight.name: NLL_WEIGHT, margin.name: MARGIN } driver.fit(stream, hyperparams=hyperparams, **DRIVER_ARGS) validator_file = path.join(driver.output_directory, args.validator_file) optimus.save(validator, def_file=validator_file) predictor_file = path.join(driver.output_directory, args.predictor_file) optimus.save(predictor, def_file=predictor_file)
def main(args): # 1.1 Create Inputs input_data = optimus.Input(name='cqt', shape=(None, 6, TIME_DIM, 40)) target = optimus.Input(name='target', shape=(None, VOCAB)) learning_rate = optimus.Input(name='learning_rate', shape=None) # 1.2 Create Nodes layer0 = optimus.Conv3D(name='layer0', input_shape=input_data.shape, weight_shape=(32, None, 5, 5), pool_shape=(2, 3), act_type='relu') layer1 = optimus.Conv3D(name='layer1', input_shape=layer0.output.shape, weight_shape=(64, None, 5, 7), act_type='relu') layer2 = optimus.Conv3D(name='layer2', input_shape=layer1.output.shape, weight_shape=(128, None, 3, 6), act_type='relu') layer3 = optimus.Affine(name='layer3', input_shape=layer2.output.shape, output_shape=( None, 1024, ), act_type='relu') chord_estimator = optimus.Affine(name='chord_estimator', input_shape=layer3.output.shape, output_shape=( None, VOCAB, ), act_type='sigmoid') all_nodes = [layer0, layer1, layer2, layer3, chord_estimator] # 1.1 Create Losses chord_xentropy = optimus.CrossEntropy(name="chord_xentropy") # 2. Define Edges trainer_edges = optimus.ConnectionManager([ (input_data, layer0.input), (layer0.output, layer1.input), (layer1.output, layer2.input), (layer2.output, layer3.input), (layer3.output, chord_estimator.input), (chord_estimator.output, chord_xentropy.prediction), (target, chord_xentropy.target) ]) update_manager = optimus.ConnectionManager([ (learning_rate, layer0.weights), (learning_rate, layer0.bias), (learning_rate, layer1.weights), (learning_rate, layer1.bias), (learning_rate, layer2.weights), (learning_rate, layer2.bias), (learning_rate, layer3.weights), (learning_rate, layer3.bias), (learning_rate, chord_estimator.weights), (learning_rate, chord_estimator.bias) ]) trainer = optimus.Graph(name=GRAPH_NAME, inputs=[input_data, target, learning_rate], nodes=all_nodes, connections=trainer_edges.connections, outputs=[optimus.Graph.TOTAL_LOSS], losses=[chord_xentropy], updates=update_manager.connections, momentum=None) for n in all_nodes: optimus.random_init(n.weights, 0, 0.01) optimus.random_init(n.bias, 0, 0.01) validator = optimus.Graph(name=GRAPH_NAME, inputs=[input_data, target], nodes=all_nodes, connections=trainer_edges.connections, outputs=[optimus.Graph.TOTAL_LOSS], losses=[chord_xentropy]) posterior = optimus.Output(name='posterior') predictor_edges = optimus.ConnectionManager([ (input_data, layer0.input), (layer0.output, layer1.input), (layer1.output, layer2.input), (layer2.output, layer3.input), (layer3.output, chord_estimator.input), (chord_estimator.output, posterior) ]) predictor = optimus.Graph(name=GRAPH_NAME, inputs=[input_data], nodes=all_nodes, connections=predictor_edges.connections, outputs=[posterior]) # 3. Create Data print "Loading %s" % args.training_file stash = biggie.Stash(args.training_file) # partition_labels = json.load( # open("/home/ejhumphrey/Dropbox/tmp/train0_v2_merged_partition.json")) stream = D.create_uniform_chord_stream(stash, TIME_DIM, pitch_shift=False, vocab_dim=VOCAB, working_size=5) stream = S.minibatch(FX.chord_index_to_affinity_vectors( FX.wrap_cqt(stream, length=40, stride=36), VOCAB), batch_size=BATCH_SIZE) print "Starting '%s'" % args.trial_name driver = optimus.Driver(graph=trainer, name=args.trial_name, output_directory=args.model_directory) hyperparams = {learning_rate.name: LEARNING_RATE} validator_file = path.join(driver.output_directory, args.validator_file) optimus.save(validator, def_file=validator_file) predictor_file = path.join(driver.output_directory, args.predictor_file) optimus.save(predictor, def_file=predictor_file) driver.fit(stream, hyperparams=hyperparams, **DRIVER_ARGS)
def main(args): # 1.1 Create Inputs input_data = optimus.Input(name='cqt', shape=(None, 1, TIME_DIM, 252)) chord_idx = optimus.Input(name='chord_idx', shape=(None, ), dtype='int32') learning_rate = optimus.Input(name='learning_rate', shape=None) # 1.2 Create Nodes layer0 = optimus.Conv3D(name='layer0', input_shape=input_data.shape, weight_shape=(12, 1, 5, 19), pool_shape=(1, 3), act_type='relu') layer1 = optimus.Conv3D(name='layer1', input_shape=layer0.output.shape, weight_shape=(16, None, 5, 15), act_type='relu') layer2 = optimus.Conv3D(name='layer2', input_shape=layer1.output.shape, weight_shape=(20, None, 2, 15), act_type='relu') layer3 = optimus.Affine(name='layer3', input_shape=layer2.output.shape, output_shape=( None, 512, ), act_type='relu') chord_classifier = optimus.Softmax(name='chord_classifier', input_shape=layer3.output.shape, n_out=VOCAB, act_type='linear') all_nodes = [layer0, layer1, layer2, layer3, chord_classifier] # 1.1 Create Losses chord_nll = optimus.NegativeLogLikelihood(name="chord_nll") # 2. Define Edges trainer_edges = optimus.ConnectionManager([ (input_data, layer0.input), (layer0.output, layer1.input), (layer1.output, layer2.input), (layer2.output, layer3.input), (layer3.output, chord_classifier.input), (chord_classifier.output, chord_nll.likelihood), (chord_idx, chord_nll.target_idx) ]) update_manager = optimus.ConnectionManager([ (learning_rate, layer0.weights), (learning_rate, layer0.bias), (learning_rate, layer1.weights), (learning_rate, layer1.bias), (learning_rate, layer2.weights), (learning_rate, layer2.bias), (learning_rate, layer3.weights), (learning_rate, layer3.bias), (learning_rate, chord_classifier.weights), (learning_rate, chord_classifier.bias) ]) trainer = optimus.Graph(name=GRAPH_NAME, inputs=[input_data, chord_idx, learning_rate], nodes=all_nodes, connections=trainer_edges.connections, outputs=[optimus.Graph.TOTAL_LOSS], losses=[chord_nll], updates=update_manager.connections) optimus.random_init(chord_classifier.weights) validator = optimus.Graph(name=GRAPH_NAME, inputs=[input_data, chord_idx], nodes=all_nodes, connections=trainer_edges.connections, outputs=[optimus.Graph.TOTAL_LOSS], losses=[chord_nll]) posterior = optimus.Output(name='posterior') predictor_edges = optimus.ConnectionManager([ (input_data, layer0.input), (layer0.output, layer1.input), (layer1.output, layer2.input), (layer2.output, layer3.input), (layer3.output, chord_classifier.input), (chord_classifier.output, posterior) ]) predictor = optimus.Graph(name=GRAPH_NAME, inputs=[input_data], nodes=all_nodes, connections=predictor_edges.connections, outputs=[posterior]) # 3. Create Data source = optimus.Queue(optimus.File(args.training_file), transformers=[ T.chord_sample(input_data.shape[2]), T.pitch_shift(8), T.map_to_index(VOCAB) ], **SOURCE_ARGS) driver = optimus.Driver(graph=trainer, name=args.trial_name, output_directory=args.model_directory) hyperparams = {learning_rate.name: LEARNING_RATE} driver.fit(source, hyperparams=hyperparams, **DRIVER_ARGS) validator_file = path.join(driver.output_directory, args.validator_file) optimus.save(validator, def_file=validator_file) predictor_file = path.join(driver.output_directory, args.predictor_file) optimus.save(predictor, def_file=predictor_file)
def main(args): # 1.1 Create Inputs input_data = optimus.Input(name='cqt', shape=(None, 1, TIME_DIM, 252)) chord_idx = optimus.Input(name='chord_idx', shape=(None, ), dtype='int32') learning_rate = optimus.Input(name='learning_rate', shape=None) dropout = optimus.Input(name='dropout', shape=None) # 1.2 Create Nodes layer0 = optimus.Conv3D(name='layer0', input_shape=input_data.shape, weight_shape=(30, 1, 9, 19), pool_shape=(1, 3), act_type='relu') layer1 = optimus.Conv3D(name='layer1', input_shape=layer0.output.shape, weight_shape=(50, None, 7, 15), act_type='relu') layer2 = optimus.Affine(name='layer2', input_shape=layer1.output.shape, output_shape=( None, 1024, ), act_type='relu') layer3 = optimus.Affine(name='layer3', input_shape=layer2.output.shape, output_shape=( None, 1024, ), act_type='relu') for n in [layer2, layer3]: n.enable_dropout() chord_classifier = optimus.Softmax(name='chord_classifier', input_shape=layer3.output.shape, n_out=VOCAB, act_type='linear') all_nodes = [layer0, layer1, layer2, layer3, chord_classifier] # 1.1 Create Losses chord_nll = optimus.NegativeLogLikelihood(name="chord_nll") # 2. Define Edges trainer_edges = optimus.ConnectionManager([ (input_data, layer0.input), (layer0.output, layer1.input), (layer1.output, layer2.input), (layer2.output, layer3.input), (layer3.output, chord_classifier.input), (chord_classifier.output, chord_nll.likelihood), (chord_idx, chord_nll.target_idx), (dropout, layer2.dropout), (dropout, layer3.dropout) ]) update_manager = optimus.ConnectionManager([ (learning_rate, layer0.weights), (learning_rate, layer0.bias), (learning_rate, layer1.weights), (learning_rate, layer1.bias), (learning_rate, layer2.weights), (learning_rate, layer2.bias), (learning_rate, layer3.weights), (learning_rate, layer3.bias), (learning_rate, chord_classifier.weights), (learning_rate, chord_classifier.bias) ]) trainer = optimus.Graph( name=GRAPH_NAME, inputs=[input_data, chord_idx, learning_rate, dropout], nodes=all_nodes, connections=trainer_edges.connections, outputs=[optimus.Graph.TOTAL_LOSS], losses=[chord_nll], updates=update_manager.connections) for n in all_nodes: optimus.random_init(n.weights) optimus.random_init(n.bias) validator_edges = optimus.ConnectionManager([ (input_data, layer0.input), (layer0.output, layer1.input), (layer1.output, layer2.input), (layer2.output, layer3.input), (layer3.output, chord_classifier.input), (chord_classifier.output, chord_nll.likelihood), (chord_idx, chord_nll.target_idx) ]) for n in [layer2, layer3]: n.disable_dropout() validator = optimus.Graph(name=GRAPH_NAME, inputs=[input_data, chord_idx], nodes=all_nodes, connections=validator_edges.connections, outputs=[optimus.Graph.TOTAL_LOSS], losses=[chord_nll]) posterior = optimus.Output(name='posterior') predictor_edges = optimus.ConnectionManager([ (input_data, layer0.input), (layer0.output, layer1.input), (layer1.output, layer2.input), (layer2.output, layer3.input), (layer3.output, chord_classifier.input), (chord_classifier.output, posterior) ]) predictor = optimus.Graph(name=GRAPH_NAME, inputs=[input_data], nodes=all_nodes, connections=predictor_edges.connections, outputs=[posterior]) # 3. Create Data print "Loading %s" % args.training_file stash = biggie.Stash(args.training_file) s = D.create_uniform_chord_stream(stash, TIME_DIM, pitch_shift=6, vocab_dim=VOCAB, working_size=10) stream = S.minibatch(FX.drop_frames(FX.awgn(s, 0.05), 0.1), batch_size=BATCH_SIZE) driver = optimus.Driver(graph=trainer, name=args.trial_name, output_directory=args.model_directory) hyperparams = {learning_rate.name: LEARNING_RATE, dropout.name: DROPOUT} validator_file = path.join(driver.output_directory, args.validator_file) optimus.save(validator, def_file=validator_file) predictor_file = path.join(driver.output_directory, args.predictor_file) optimus.save(predictor, def_file=predictor_file) print "Starting '%s'" % args.trial_name driver.fit(stream, hyperparams=hyperparams, **DRIVER_ARGS)
def main(args): # 1.1 Create Inputs input_data = optimus.Input(name='cqt', shape=(None, 1, TIME_DIM, 252)) chord_idx = optimus.Input(name='chord_idx', shape=(None, ), dtype='int32') learning_rate = optimus.Input(name='learning_rate', shape=None) # 1.2 Create Nodes layer0 = optimus.Conv3D(name='layer0', input_shape=input_data.shape, weight_shape=(30, 1, 9, 19), pool_shape=(1, 3), act_type='relu') layer1 = optimus.Conv3D(name='layer1', input_shape=layer0.output.shape, weight_shape=(50, None, 7, 15), act_type='relu') layer2 = optimus.Affine(name='layer2', input_shape=layer1.output.shape, output_shape=( None, 1024, ), act_type='relu') layer3 = optimus.Affine(name='layer3', input_shape=layer2.output.shape, output_shape=( None, 1024, ), act_type='relu') chord_classifier = optimus.Softmax(name='chord_classifier', input_shape=layer3.output.shape, n_out=VOCAB, act_type='linear') all_nodes = [layer0, layer1, layer2, layer3, chord_classifier] # 1.1 Create Losses chord_nll = optimus.NegativeLogLikelihood(name="chord_nll") # 2. Define Edges trainer_edges = optimus.ConnectionManager([ (input_data, layer0.input), (layer0.output, layer1.input), (layer1.output, layer2.input), (layer2.output, layer3.input), (layer3.output, chord_classifier.input), (chord_classifier.output, chord_nll.likelihood), (chord_idx, chord_nll.target_idx) ]) update_manager = optimus.ConnectionManager([ (learning_rate, layer0.weights), (learning_rate, layer0.bias), (learning_rate, layer1.weights), (learning_rate, layer1.bias), (learning_rate, layer2.weights), (learning_rate, layer2.bias), (learning_rate, layer3.weights), (learning_rate, layer3.bias), (learning_rate, chord_classifier.weights), (learning_rate, chord_classifier.bias) ]) trainer = optimus.Graph(name=GRAPH_NAME, inputs=[input_data, chord_idx, learning_rate], nodes=all_nodes, connections=trainer_edges.connections, outputs=[optimus.Graph.TOTAL_LOSS], losses=[chord_nll], updates=update_manager.connections, momentum=None) for n in all_nodes: optimus.random_init(n.weights) optimus.random_init(n.bias) trainer.load_param_values( "/media/attic/dl4mir/chord_estimation/models/nll_chord_uniform_2big/synth_data_01/0/classifier-V157-synth_data_01-041750-2014-08-25_21h59m56s.npz" ) validator = optimus.Graph(name=GRAPH_NAME, inputs=[input_data, chord_idx], nodes=all_nodes, connections=trainer_edges.connections, outputs=[optimus.Graph.TOTAL_LOSS], losses=[chord_nll]) posterior = optimus.Output(name='posterior') predictor_edges = optimus.ConnectionManager([ (input_data, layer0.input), (layer0.output, layer1.input), (layer1.output, layer2.input), (layer2.output, layer3.input), (layer3.output, chord_classifier.input), (chord_classifier.output, posterior) ]) predictor = optimus.Graph(name=GRAPH_NAME, inputs=[input_data], nodes=all_nodes, connections=predictor_edges.connections, outputs=[posterior]) # 3. Create Data print "Loading %s" % args.training_file stash = biggie.Stash(args.training_file) stream = D.create_uniform_chord_stream(stash, TIME_DIM, pitch_shift=False, vocab_dim=VOCAB, working_size=3) stream = S.minibatch(stream, batch_size=BATCH_SIZE) print "Starting '%s'" % args.trial_name driver = optimus.Driver(graph=trainer, name=args.trial_name, output_directory=args.model_directory) hyperparams = {learning_rate.name: LEARNING_RATE} validator_file = path.join(driver.output_directory, args.validator_file) optimus.save(validator, def_file=validator_file) predictor_file = path.join(driver.output_directory, args.predictor_file) optimus.save(predictor, def_file=predictor_file) driver.fit(stream, hyperparams=hyperparams, **DRIVER_ARGS)
def main(args): # 1.1 Create Inputs input_data = optimus.Input( name='cqt', shape=(None, 1, TIME_DIM, 252)) target_chroma = optimus.Input( name='target_chroma', shape=(None, 12),) learning_rate = optimus.Input( name='learning_rate', shape=None) # 1.2 Create Nodes layer0 = optimus.Conv3D( name='layer0', input_shape=input_data.shape, weight_shape=(12, 1, 9, 19), pool_shape=(1, 3), act_type='relu') layer1 = optimus.Conv3D( name='layer1', input_shape=layer0.output.shape, weight_shape=(16, None, 7, 15), act_type='relu') layer2 = optimus.Conv3D( name='layer2', input_shape=layer1.output.shape, weight_shape=(20, None, 6, 15), act_type='relu') layer3 = optimus.Affine( name='layer3', input_shape=layer2.output.shape, output_shape=(None, 512,), act_type='relu') layer4 = optimus.Affine( name='layer4', input_shape=layer3.output.shape, output_shape=(None, 12,), act_type='sigmoid') all_nodes = [layer0, layer1, layer2, layer3, layer4] # 1.1 Create Losses chroma_xentropy = optimus.CrossEntropy( name="chroma_xentropy") # 2. Define Edges trainer_edges = optimus.ConnectionManager([ (input_data, layer0.input), (layer0.output, layer1.input), (layer1.output, layer2.input), (layer2.output, layer3.input), (layer3.output, layer4.input), (layer4.output, chroma_xentropy.prediction), (target_chroma, chroma_xentropy.target)]) update_manager = optimus.ConnectionManager([ (learning_rate, layer0.weights), (learning_rate, layer0.bias), (learning_rate, layer1.weights), (learning_rate, layer1.bias), (learning_rate, layer2.weights), (learning_rate, layer2.bias), (learning_rate, layer3.weights), (learning_rate, layer3.bias), (learning_rate, layer4.weights), (learning_rate, layer4.bias)]) trainer = optimus.Graph( name=GRAPH_NAME, inputs=[input_data, target_chroma, learning_rate], nodes=all_nodes, connections=trainer_edges.connections, outputs=[optimus.Graph.TOTAL_LOSS], losses=[chroma_xentropy], updates=update_manager.connections) optimus.random_init(layer0.weights) optimus.random_init(layer1.weights) optimus.random_init(layer2.weights) optimus.random_init(layer3.weights) optimus.random_init(layer4.weights) validator = optimus.Graph( name=GRAPH_NAME, inputs=[input_data, target_chroma], nodes=all_nodes, connections=trainer_edges.connections, outputs=[optimus.Graph.TOTAL_LOSS], losses=[chroma_xentropy]) chroma_out = optimus.Output( name='chroma') predictor_edges = optimus.ConnectionManager([ (input_data, layer0.input), (layer0.output, layer1.input), (layer1.output, layer2.input), (layer2.output, layer3.input), (layer3.output, layer4.input), (layer4.output, chroma_out)]) predictor = optimus.Graph( name=GRAPH_NAME, inputs=[input_data], nodes=all_nodes, connections=predictor_edges.connections, outputs=[chroma_out]) # 3. Create Data stash = biggie.Stash(args.training_file) stream = S.minibatch( D.uniform_quality_chroma_stream(stash, TIME_DIM), batch_size=50) driver = optimus.Driver( graph=trainer, name=args.trial_name, output_directory=args.model_directory) hyperparams = {learning_rate.name: LEARNING_RATE} driver.fit(stream, hyperparams=hyperparams, **DRIVER_ARGS) validator_file = path.join(driver.output_directory, args.validator_file) optimus.save(validator, def_file=validator_file) predictor_file = path.join(driver.output_directory, args.predictor_file) optimus.save(predictor, def_file=predictor_file)
def main(args): # 1.1 Create Inputs input_data = optimus.Input( name='cqt', shape=(None, 1, TIME_DIM, 252)) chord_idx = optimus.Input( name='chord_idx', shape=(None,), dtype='int32') learning_rate = optimus.Input( name='learning_rate', shape=None) limiter_weight = optimus.Input( name='limiter_weight', shape=None) likelihood_threshold = optimus.Input( name='likelihood_threshold', shape=None) # 1.2 Create Nodes layer0 = optimus.Conv3D( name='layer0', input_shape=input_data.shape, weight_shape=(12, 1, 9, 19), pool_shape=(1, 3), act_type='relu') layer1 = optimus.Conv3D( name='layer1', input_shape=layer0.output.shape, weight_shape=(16, None, 7, 15), act_type='relu') layer2 = optimus.Conv3D( name='layer2', input_shape=layer1.output.shape, weight_shape=(20, None, 6, 15), act_type='relu') layer3 = optimus.Affine( name='layer3', input_shape=layer2.output.shape, output_shape=(None, 512,), act_type='relu') chord_classifier = optimus.Softmax( name='chord_classifier', input_shape=layer3.output.shape, n_out=VOCAB, act_type='linear') all_nodes = [layer0, layer1, layer2, layer3, chord_classifier] # 1.1 Create Losses chord_nll = optimus.NegativeLogLikelihood( name="chord_nll") max_likelihood = optimus.Max( name="max_likelihood") # 2. Define Edges trainer_edges = optimus.ConnectionManager([ (input_data, layer0.input), (layer0.output, layer1.input), (layer1.output, layer2.input), (layer2.output, layer3.input), (layer3.output, chord_classifier.input), (chord_classifier.output, chord_nll.likelihood), (chord_idx, chord_nll.target_idx), (chord_classifier.output, max_likelihood.input), (limiter_weight, max_likelihood.weight), (likelihood_threshold, max_likelihood.threshold)]) update_manager = optimus.ConnectionManager([ (learning_rate, layer0.weights), (learning_rate, layer0.bias), (learning_rate, layer1.weights), (learning_rate, layer1.bias), (learning_rate, layer2.weights), (learning_rate, layer2.bias), (learning_rate, layer3.weights), (learning_rate, layer3.bias), (learning_rate, chord_classifier.weights), (learning_rate, chord_classifier.bias)]) print "Trainer" trainer = optimus.Graph( name=GRAPH_NAME, inputs=[input_data, chord_idx, learning_rate, limiter_weight, likelihood_threshold], nodes=all_nodes, connections=trainer_edges.connections, outputs=[optimus.Graph.TOTAL_LOSS], losses=[chord_nll, max_likelihood], updates=update_manager.connections) optimus.random_init(chord_classifier.weights) print "Validator" validator = optimus.Graph( name=GRAPH_NAME, inputs=[input_data, chord_idx, limiter_weight, likelihood_threshold], nodes=all_nodes, connections=trainer_edges.connections, outputs=[optimus.Graph.TOTAL_LOSS], losses=[chord_nll, max_likelihood]) posterior = optimus.Output( name='posterior') predictor_edges = optimus.ConnectionManager([ (input_data, layer0.input), (layer0.output, layer1.input), (layer1.output, layer2.input), (layer2.output, layer3.input), (layer3.output, chord_classifier.input), (chord_classifier.output, posterior)]) print "Predictor" predictor = optimus.Graph( name=GRAPH_NAME, inputs=[input_data], nodes=all_nodes, connections=predictor_edges.connections, outputs=[posterior]) # 3. Create Data stash = biggie.Stash(args.training_file) stream = S.minibatch( D.create_uniform_quality_stream(stash, TIME_DIM, vocab_dim=VOCAB), batch_size=50) driver = optimus.Driver( graph=trainer, name=args.trial_name, output_directory=args.model_directory) hyperparams = {learning_rate.name: LEARNING_RATE, likelihood_threshold.name: LIKELIHOOD_THRESHOLD, limiter_weight.name: LIMITER_WEIGHT} driver.fit(stream, hyperparams=hyperparams, **DRIVER_ARGS) validator_file = path.join(driver.output_directory, args.validator_file) optimus.save(validator, def_file=validator_file) predictor_file = path.join(driver.output_directory, args.predictor_file) optimus.save(predictor, def_file=predictor_file)
def main(args): # 1.1 Create Inputs input_data = optimus.Input(name='cqt', shape=(None, 1, TIME_DIM, PITCH_DIM)) chord_idx = optimus.Input(name='chord_idx', shape=(None, ), dtype='int32') learning_rate = optimus.Input(name='learning_rate', shape=None) # 1.2 Create Nodes layer0 = optimus.Conv3D(name='layer0', input_shape=input_data.shape, weight_shape=(32, 1, 5, 19), pool_shape=(2, 3), act_type='relu') layer1 = optimus.Conv3D(name='layer1', input_shape=layer0.output.shape, weight_shape=(64, None, 5, 15), act_type='relu') layer2 = optimus.Conv3D(name='layer2', input_shape=layer1.output.shape, weight_shape=(128, None, 3, 15), act_type='relu') layer3 = optimus.Affine(name='layer3', input_shape=layer2.output.shape, output_shape=( None, 1024, ), act_type='relu') chord_classifier = optimus.Softmax(name='chord_classifier', input_shape=layer3.output.shape, n_out=VOCAB, act_type='linear') all_nodes = [layer0, layer1, layer2, layer3, chord_classifier] # 1.1 Create Losses chord_mce = optimus.ClassificationError(name="chord_mce") # 2. Define Edges trainer_edges = optimus.ConnectionManager([ (input_data, layer0.input), (layer0.output, layer1.input), (layer1.output, layer2.input), (layer2.output, layer3.input), (layer3.output, chord_classifier.input), (chord_classifier.output, chord_mce.prediction), (chord_idx, chord_mce.target_idx) ]) update_manager = optimus.ConnectionManager([ (learning_rate, layer0.weights), (learning_rate, layer0.bias), (learning_rate, layer1.weights), (learning_rate, layer1.bias), (learning_rate, layer2.weights), (learning_rate, layer2.bias), (learning_rate, layer3.weights), (learning_rate, layer3.bias), (learning_rate, chord_classifier.weights), (learning_rate, chord_classifier.bias) ]) trainer = optimus.Graph(name=GRAPH_NAME, inputs=[input_data, chord_idx, learning_rate], nodes=all_nodes, connections=trainer_edges.connections, outputs=[optimus.Graph.TOTAL_LOSS], losses=[chord_mce], updates=update_manager.connections) for n in all_nodes: optimus.random_init(n.weights) optimus.random_init(n.bias) if args.init_param_file: trainer.load_param_values(args.init_param_file) posterior = optimus.Output(name='posterior') predictor_edges = optimus.ConnectionManager([ (input_data, layer0.input), (layer0.output, layer1.input), (layer1.output, layer2.input), (layer2.output, layer3.input), (layer3.output, chord_classifier.input), (chord_classifier.output, posterior) ]) predictor = optimus.Graph(name=GRAPH_NAME, inputs=[input_data], nodes=all_nodes, connections=predictor_edges.connections, outputs=[posterior]) # 3. Create Data print "Loading %s" % args.training_file stash = biggie.Stash(args.training_file) synth_stash = biggie.Stash(args.secondary_source) stream = D.muxed_uniform_chord_stream(stash, synth_stash, TIME_DIM, pitch_shift=0, vocab_dim=VOCAB, working_size=10) # stream = D.create_uniform_chord_stream( # stash, TIME_DIM, pitch_shift=0, vocab_dim=VOCAB, working_size=10) # if args.secondary_source: # print "Loading %s" % args.secondary_source # stash2 = biggie.Stash(args.secondary_source) # stream2 = D.create_uniform_chord_stream( # stash2, TIME_DIM, pitch_shift=0, vocab_dim=VOCAB, working_size=5) # stream = S.mux([stream, stream2], [0.5, 0.5]) stream = S.minibatch(stream, batch_size=BATCH_SIZE) print "Starting '%s'" % args.trial_name driver = optimus.Driver(graph=trainer, name=args.trial_name, output_directory=args.model_directory) hyperparams = {learning_rate.name: LEARNING_RATE} predictor_file = path.join(driver.output_directory, args.predictor_file) optimus.save(predictor, def_file=predictor_file) driver.fit(stream, hyperparams=hyperparams, **DRIVER_ARGS)
def main(args): # 1.1 Create Inputs input_data = optimus.Input(name='cqt', shape=(None, 1, TIME_DIM, 252)) target_chroma = optimus.Input( name='target_chroma', shape=(None, 12), ) learning_rate = optimus.Input(name='learning_rate', shape=None) # 1.2 Create Nodes layer0 = optimus.Conv3D(name='layer0', input_shape=input_data.shape, weight_shape=(12, 1, 3, 19), pool_shape=(1, 3), act_type='relu') layer1 = optimus.Conv3D(name='layer1', input_shape=layer0.output.shape, weight_shape=(16, None, 3, 15), act_type='relu') layer2 = optimus.Conv3D(name='layer2', input_shape=layer1.output.shape, weight_shape=(20, None, 1, 15), act_type='relu') layer3 = optimus.Affine(name='layer3', input_shape=layer2.output.shape, output_shape=( None, 12, ), act_type='sigmoid') all_nodes = [layer0, layer1, layer2, layer3] # 1.1 Create Losses chroma_xentropy = optimus.CrossEntropy(name="chroma_xentropy") # 2. Define Edges trainer_edges = optimus.ConnectionManager([ (input_data, layer0.input), (layer0.output, layer1.input), (layer1.output, layer2.input), (layer2.output, layer3.input), (layer3.output, chroma_xentropy.prediction), (target_chroma, chroma_xentropy.target) ]) update_manager = optimus.ConnectionManager([ (learning_rate, layer0.weights), (learning_rate, layer0.bias), (learning_rate, layer1.weights), (learning_rate, layer1.bias), (learning_rate, layer2.weights), (learning_rate, layer2.bias), (learning_rate, layer3.weights), (learning_rate, layer3.bias) ]) trainer = optimus.Graph(name=GRAPH_NAME, inputs=[input_data, target_chroma, learning_rate], nodes=all_nodes, connections=trainer_edges.connections, outputs=[optimus.Graph.TOTAL_LOSS], losses=[chroma_xentropy], updates=update_manager.connections) optimus.random_init(layer0.weights) optimus.random_init(layer1.weights) optimus.random_init(layer2.weights) optimus.random_init(layer3.weights) validator = optimus.Graph(name=GRAPH_NAME, inputs=[input_data, target_chroma], nodes=all_nodes, connections=trainer_edges.connections, outputs=[optimus.Graph.TOTAL_LOSS], losses=[chroma_xentropy]) chroma_out = optimus.Output(name='chroma') predictor_edges = optimus.ConnectionManager([(input_data, layer0.input), (layer0.output, layer1.input), (layer1.output, layer2.input), (layer2.output, layer3.input), (layer3.output, chroma_out)]) predictor = optimus.Graph(name=GRAPH_NAME, inputs=[input_data], nodes=all_nodes, connections=predictor_edges.connections, outputs=[chroma_out]) # 3. Create Data source = optimus.Queue(optimus.File(args.training_file), transformers=[ T.chord_sample(input_data.shape[2]), T.pitch_shift(8), T.map_to_chroma ], **SOURCE_ARGS) driver = optimus.Driver(graph=trainer, name=args.trial_name, output_directory=args.model_directory) hyperparams = {learning_rate.name: LEARNING_RATE} driver.fit(source, hyperparams=hyperparams, **DRIVER_ARGS) validator_file = path.join(driver.output_directory, args.validator_file) optimus.save(validator, def_file=validator_file) predictor_file = path.join(driver.output_directory, args.predictor_file) optimus.save(predictor, def_file=predictor_file)
def main(args): # 1.1 Create Inputs input_data = optimus.Input(name='cqt', shape=(None, 1, TIME_DIM, PITCH_DIM)) chord_idx = optimus.Input(name='chord_idx', shape=(None, ), dtype='int32') learning_rate = optimus.Input(name='learning_rate', shape=None) # 1.2 Create Nodes input_scalar = optimus.Normalize(name='input_scalar', mode='l2', scale_factor=50.0) layer0 = optimus.Conv3D(name='layer0', input_shape=input_data.shape, weight_shape=(32, 1, 5, 19), pool_shape=(2, 3), act_type='relu') layer1 = optimus.Conv3D(name='layer1', input_shape=layer0.output.shape, weight_shape=(64, None, 5, 15), act_type='relu') layer2 = optimus.Conv3D(name='layer2', input_shape=layer1.output.shape, weight_shape=(128, None, 3, 15), act_type='relu') layer3 = optimus.Affine(name='layer3', input_shape=layer2.output.shape, output_shape=( None, 1024, ), act_type='relu') chord_classifier = optimus.Softmax(name='chord_classifier', input_shape=layer3.output.shape, n_out=VOCAB, act_type='linear') all_nodes = [ input_scalar, layer0, layer1, layer2, layer3, chord_classifier ] # 1.1 Create Losses chord_nll = optimus.NegativeLogLikelihood(name="chord_nll") # 2. Define Edges trainer_edges = optimus.ConnectionManager([ (input_data, input_scalar.input), (input_scalar.output, layer0.input), (layer0.output, layer1.input), (layer1.output, layer2.input), (layer2.output, layer3.input), (layer3.output, chord_classifier.input), (chord_classifier.output, chord_nll.likelihood), (chord_idx, chord_nll.target_idx) ]) update_manager = optimus.ConnectionManager([ (learning_rate, layer0.weights), (learning_rate, layer0.bias), (learning_rate, layer1.weights), (learning_rate, layer1.bias), (learning_rate, layer2.weights), (learning_rate, layer2.bias), (learning_rate, layer3.weights), (learning_rate, layer3.bias), (learning_rate, chord_classifier.weights), (learning_rate, chord_classifier.bias) ]) trainer = optimus.Graph(name=GRAPH_NAME, inputs=[input_data, chord_idx, learning_rate], nodes=all_nodes, connections=trainer_edges.connections, outputs=[optimus.Graph.TOTAL_LOSS], losses=[chord_nll], updates=update_manager.connections) for n in all_nodes[1:]: optimus.random_init(n.weights) optimus.random_init(n.bias) if args.init_param_file: param_values = dict(np.load(args.init_param_file)) keys = param_values.keys() for key in keys: if chord_classifier.name in key or layer3.name in key: print "skipping %s" % key del param_values[key] trainer.param_values = param_values posterior = optimus.Output(name='posterior') predictor_edges = optimus.ConnectionManager([ (input_data, input_scalar.input), (input_scalar.output, layer0.input), (layer0.output, layer1.input), (layer1.output, layer2.input), (layer2.output, layer3.input), (layer3.output, chord_classifier.input), (chord_classifier.output, posterior) ]) predictor = optimus.Graph(name=GRAPH_NAME, inputs=[input_data], nodes=all_nodes, connections=predictor_edges.connections, outputs=[posterior]) # 3. Create Data print "Loading %s" % args.training_file stash = biggie.Stash(args.training_file) stream = S.minibatch(D.create_uniform_chord_stream(stash, TIME_DIM, pitch_shift=0, vocab_dim=VOCAB, working_size=10), batch_size=BATCH_SIZE) print "Starting '%s'" % args.trial_name driver = optimus.Driver(graph=trainer, name=args.trial_name, output_directory=args.model_directory) hyperparams = {learning_rate.name: LEARNING_RATE} predictor_file = path.join(driver.output_directory, args.predictor_file) optimus.save(predictor, def_file=predictor_file) driver.fit(stream, hyperparams=hyperparams, **DRIVER_ARGS)
def main(args): # 1.1 Create Inputs input_data = optimus.Input( name='cqt', shape=(None, OCTAVE_DIM, TIME_DIM, PITCH_DIM)) chord_idx = optimus.Input( name='chord_idx', shape=(None,), dtype='int32') learning_rate = optimus.Input( name='learning_rate', shape=None) margin = optimus.Input( name='margin', shape=None) # 1.2 Create Nodes layer0 = optimus.Conv3D( name='layer0', input_shape=input_data.shape, weight_shape=(32, None, 5, 5), pool_shape=(2, 3), act_type='relu') layer1 = optimus.Conv3D( name='layer1', input_shape=layer0.output.shape, weight_shape=(64, None, 5, 7), act_type='relu') layer2 = optimus.Conv3D( name='layer2', input_shape=layer1.output.shape, weight_shape=(128, None, 3, 6), act_type='relu') layer3 = optimus.Affine( name='layer3', input_shape=layer2.output.shape, output_shape=(None, 1024,), act_type='relu') chord_estimator = optimus.Softmax( name='chord_estimator', input_shape=layer3.output.shape, output_shape=(None, VOCAB,), act_type='sigmoid') all_nodes = [layer0, layer1, layer2, layer3, chord_classifier] log = optimus.Log(name='log') neg = optimus.Gain(name='gain') neg.weight = np.array(-1) energy = optimus.SelectIndex(name='selector') loss = optimus.Mean(name='total_loss') # 1.1 Create Losses chord_margin = optimus.Margin( name="chord_margin", mode='max') # 2. Define Edges trainer_edges = optimus.ConnectionManager([ (input_data, layer0.input), (layer0.output, layer1.input), (layer1.output, layer2.input), (layer2.output, layer3.input), (layer3.output, chord_estimator.input), (chord_estimator.output, log.input), (log.output, neg.input), (neg.output, energy.input), (chord_idx, energy.index), (energy.output, loss.input)]) update_manager = optimus.ConnectionManager([ (learning_rate, layer0.weights), (learning_rate, layer0.bias), (learning_rate, layer1.weights), (learning_rate, layer1.bias), (learning_rate, layer2.weights), (learning_rate, layer2.bias), (learning_rate, layer3.weights), (learning_rate, layer3.bias), (learning_rate, chord_estimator.weights), (learning_rate, chord_estimator.bias)]) trainer = optimus.Graph( name=GRAPH_NAME, inputs=[input_data, chord_idx, learning_rate, margin], nodes=all_nodes, connections=trainer_edges.connections, outputs=[optimus.Graph.TOTAL_LOSS], loss=[loss.output], updates=update_manager.connections) for n in all_nodes: optimus.random_init(n.weights) optimus.random_init(n.bias) if args.init_param_file: print "Loading parameters: %s" % args.init_param_file trainer.load_param_values(args.init_param_file) for n in all_nodes[-2:]: optimus.random_init(n.weights) optimus.random_init(n.bias) posterior = optimus.Output( name='posterior') predictor_edges = optimus.ConnectionManager([ (input_data, layer0.input), (layer0.output, layer1.input), (layer1.output, layer2.input), (layer2.output, layer3.input), (layer3.output, chord_classifier.input), (chord_classifier.output, posterior)]) predictor = optimus.Graph( name=GRAPH_NAME, inputs=[input_data], nodes=all_nodes, connections=predictor_edges.connections, outputs=[posterior]) # 3. Create Data print "Loading %s" % args.training_file stash = biggie.Stash(args.training_file) stream = D.create_stash_stream( stash, TIME_DIM, pitch_shift=0, vocab_dim=VOCAB, pool_size=25) if args.secondary_source: stash2 = biggie.Stash(args.secondary_source) stream2 = D.create_uniform_chord_stream( stash2, TIME_DIM, pitch_shift=0, vocab_dim=VOCAB, working_size=5) stream = S.mux([stream, stream2], [0.5, 0.5]) stream = S.minibatch(stream, batch_size=BATCH_SIZE) print "Starting '%s'" % args.trial_name driver = optimus.Driver( graph=trainer, name=args.trial_name, output_directory=args.model_directory) hyperparams = {learning_rate.name: LEARNING_RATE, margin.name: MARGIN} predictor_file = path.join(driver.output_directory, args.predictor_file) optimus.save(predictor, def_file=predictor_file) driver.fit(stream, hyperparams=hyperparams, **DRIVER_ARGS)
def main(args): # 1.1 Create Inputs input_data = optimus.Input(name='cqt', shape=(None, 1, TIME_DIM, 252)) fret_bitmap = optimus.Input(name='fret_bitmap', shape=(None, 6, FRET_DIM)) learning_rate = optimus.Input(name='learning_rate', shape=None) # 1.2 Create Nodes layer0 = optimus.Conv3D(name='layer0', input_shape=input_data.shape, weight_shape=(12, 1, 3, 19), pool_shape=(1, 3), act_type='relu') layer1 = optimus.Conv3D(name='layer1', input_shape=layer0.output.shape, weight_shape=(16, None, 3, 15), act_type='relu') layer2 = optimus.Conv3D(name='layer2', input_shape=layer1.output.shape, weight_shape=(20, None, 1, 15), act_type='relu') layer3 = optimus.Affine(name='layer3', input_shape=layer2.output.shape, output_shape=( None, 512, ), act_type='relu') fretboard = optimus.MultiSoftmax(name='fretboard', input_shape=layer3.output.shape, output_shape=(None, 6, FRET_DIM), act_type='linear') all_nodes = [layer0, layer1, layer2, layer3, fretboard] # 1.1 Create Losses mse = optimus.MeanSquaredError(name="mean_squared_error") # 2. Define Edges trainer_edges = optimus.ConnectionManager([ (input_data, layer0.input), (layer0.output, layer1.input), (layer1.output, layer2.input), (layer2.output, layer3.input), (layer3.output, fretboard.input), (fretboard.output, mse.prediction), (fret_bitmap, mse.target) ]) update_manager = optimus.ConnectionManager([ (learning_rate, layer0.weights), (learning_rate, layer0.bias), (learning_rate, layer1.weights), (learning_rate, layer1.bias), (learning_rate, layer2.weights), (learning_rate, layer2.bias), (learning_rate, layer3.weights), (learning_rate, layer3.bias), (learning_rate, fretboard.weights), (learning_rate, fretboard.bias) ]) trainer = optimus.Graph(name=GRAPH_NAME, inputs=[input_data, fret_bitmap, learning_rate], nodes=all_nodes, connections=trainer_edges.connections, outputs=[optimus.Graph.TOTAL_LOSS], losses=[mse], updates=update_manager.connections) optimus.random_init(fretboard.weights) validator = optimus.Graph(name=GRAPH_NAME, inputs=[input_data, fret_bitmap], nodes=all_nodes, connections=trainer_edges.connections, outputs=[optimus.Graph.TOTAL_LOSS], losses=[mse]) posterior = optimus.Output(name='posterior') predictor_edges = optimus.ConnectionManager([ (input_data, layer0.input), (layer0.output, layer1.input), (layer1.output, layer2.input), (layer2.output, layer3.input), (layer3.output, fretboard.input), (fretboard.output, posterior) ]) predictor = optimus.Graph(name=GRAPH_NAME, inputs=[input_data], nodes=all_nodes, connections=predictor_edges.connections, outputs=[posterior]) # 3. Create Data source = optimus.Queue(optimus.File(args.training_file), transformers=[ T.cqt_sample(input_data.shape[2]), T.pitch_shift(MAX_FRETS, bins_per_pitch=3), T.fret_indexes_to_bitmap(FRET_DIM) ], **SOURCE_ARGS) driver = optimus.Driver(graph=trainer, name=args.trial_name, output_directory=args.model_directory) hyperparams = {learning_rate.name: LEARNING_RATE} driver.fit(source, hyperparams=hyperparams, **DRIVER_ARGS) validator_file = path.join(driver.output_directory, args.validator_file) optimus.save(validator, def_file=validator_file) predictor_file = path.join(driver.output_directory, args.predictor_file) optimus.save(predictor, def_file=predictor_file)
def main(args): # 1.1 Create Inputs input_data = optimus.Input(name='cqt', shape=(None, 1, TIME_DIM, 252)) chord_idx = optimus.Input(name='chord_idx', shape=(None, ), dtype='int32') learning_rate = optimus.Input(name='learning_rate', shape=None) # 1.2 Create Nodes layer0 = optimus.Conv3D(name='layer0', input_shape=input_data.shape, weight_shape=(30, 1, 9, 19), pool_shape=(1, 3), act_type='relu') layer1 = optimus.Conv3D(name='layer1', input_shape=layer0.output.shape, weight_shape=(50, None, 7, 15), act_type='relu') layer2 = optimus.Affine(name='layer2', input_shape=layer1.output.shape, output_shape=( None, 1024, ), act_type='relu') layer3 = optimus.Affine(name='layer3', input_shape=layer2.output.shape, output_shape=( None, 1024, ), act_type='relu') chord_classifier = optimus.Softmax(name='chord_classifier', input_shape=layer3.output.shape, n_out=VOCAB, act_type='linear') all_nodes = [layer0, layer1, layer2, layer3, chord_classifier] # 1.1 Create Losses chord_nll = optimus.NegativeLogLikelihood(name="chord_nll") # 2. Define Edges trainer_edges = optimus.ConnectionManager([ (input_data, layer0.input), (layer0.output, layer1.input), (layer1.output, layer2.input), (layer2.output, layer3.input), (layer3.output, chord_classifier.input), (chord_classifier.output, chord_nll.likelihood), (chord_idx, chord_nll.target_idx) ]) update_manager = optimus.ConnectionManager([ (learning_rate, layer0.weights), (learning_rate, layer0.bias), (learning_rate, layer1.weights), (learning_rate, layer1.bias), (learning_rate, layer2.weights), (learning_rate, layer2.bias), (learning_rate, layer3.weights), (learning_rate, layer3.bias), (learning_rate, chord_classifier.weights), (learning_rate, chord_classifier.bias) ]) trainer = optimus.Graph(name=GRAPH_NAME, inputs=[input_data, chord_idx, learning_rate], nodes=all_nodes, connections=trainer_edges.connections, outputs=[optimus.Graph.TOTAL_LOSS], losses=[chord_nll], updates=update_manager.connections) for n in all_nodes: optimus.random_init(n.weights) optimus.random_init(n.bias) validator = optimus.Graph(name=GRAPH_NAME, inputs=[input_data, chord_idx], nodes=all_nodes, connections=trainer_edges.connections, outputs=[optimus.Graph.TOTAL_LOSS], losses=[chord_nll]) posterior = optimus.Output(name='posterior') predictor_edges = optimus.ConnectionManager([ (input_data, layer0.input), (layer0.output, layer1.input), (layer1.output, layer2.input), (layer2.output, layer3.input), (layer3.output, chord_classifier.input), (chord_classifier.output, posterior) ]) predictor = optimus.Graph(name=GRAPH_NAME, inputs=[input_data], nodes=all_nodes, connections=predictor_edges.connections, outputs=[posterior]) # 3. Create Data stash = biggie.Stash(args.training_file) hyperparams = {learning_rate.name: LEARNING_RATE} valid_idx = [0] for q in range(1, 13): valid_idx.append(q) stream = S.minibatch(D.create_uniform_quality_stream( stash, TIME_DIM, vocab_dim=VOCAB, valid_idx=valid_idx), batch_size=BATCH_SIZE) driver = optimus.Driver(graph=trainer, name=args.trial_name + "_c%02d" % q, output_directory=args.model_directory) driver.fit(stream, hyperparams=hyperparams, max_iter=20000, **DRIVER_ARGS) validator_file = path.join(driver.output_directory, args.validator_file) optimus.save(validator, def_file=validator_file) predictor_file = path.join(driver.output_directory, args.predictor_file) optimus.save(predictor, def_file=predictor_file)