def main(args): trainer, predictor = models.MODELS[args.model_name]() time_dim = trainer.inputs['cqt'].shape[2] if args.init_param_file: print "Loading parameters: %s" % args.init_param_file trainer.load_param_values(args.init_param_file) print "Opening %s" % args.training_file stash = biggie.Stash(args.training_file, cache=True) stream = D.create_uniform_chord_stream( stash, time_dim, pitch_shift=0, vocab_dim=VOCAB, working_size=5,) stream = S.minibatch(stream, batch_size=BATCH_SIZE) print "Starting '%s'" % args.trial_name driver = optimus.Driver( graph=trainer, name=args.trial_name, output_directory=args.output_directory) hyperparams = dict(learning_rate=LEARNING_RATE) predictor_file = path.join(driver.output_directory, args.predictor_file) optimus.save(predictor, def_file=predictor_file) driver.fit(stream, hyperparams=hyperparams, **DRIVER_ARGS)
def main(args): trainer, predictor = build_model() if args.init_param_file: print "Loading parameters: %s" % args.init_param_file trainer.load_param_values(args.init_param_file) optimus.random_init(trainer.params['layer3'].weights) optimus.random_init(trainer.params['layer3'].bias) # 3. Create Data print "Loading %s" % args.training_file stash = biggie.Stash(args.training_file) stream = D.create_stash_stream( stash, TIME_DIM, pitch_shift=0, vocab_dim=VOCAB, pool_size=25) if args.secondary_source: stash2 = biggie.Stash(args.secondary_source) stream2 = D.create_uniform_chord_stream( stash2, TIME_DIM, pitch_shift=0, vocab_dim=VOCAB, working_size=5) stream = S.mux([stream, stream2], [0.5, 0.5]) stream = S.minibatch(stream, batch_size=BATCH_SIZE) print "Starting '%s'" % args.trial_name driver = optimus.Driver( graph=trainer, name=args.trial_name, output_directory=args.model_directory) predictor_file = path.join(driver.output_directory, args.predictor_file) optimus.save(predictor, def_file=predictor_file) hyperparams = dict(learning_rate=LEARNING_RATE) driver.fit(stream, hyperparams=hyperparams, **DRIVER_ARGS)
def main(args): trainer, predictor = models.MODELS[args.model_name]() time_dim = trainer.inputs['cqt'].shape[2] if args.init_param_file: print "Loading parameters: %s" % args.init_param_file trainer.load_param_values(args.init_param_file) print "Opening %s" % args.training_file stash = biggie.Stash(args.training_file, cache=True) stream = D.create_uniform_chord_stream( stash, time_dim, pitch_shift=0, vocab_dim=VOCAB, working_size=5, ) stream = S.minibatch(stream, batch_size=BATCH_SIZE) print "Starting '%s'" % args.trial_name driver = optimus.Driver(graph=trainer, name=args.trial_name, output_directory=args.output_directory) hyperparams = dict(learning_rate=LEARNING_RATE) predictor_file = path.join(driver.output_directory, args.predictor_file) optimus.save(predictor, def_file=predictor_file) driver.fit(stream, hyperparams=hyperparams, **DRIVER_ARGS)
def main(args): trainer, predictor = build_model() if args.init_param_file: print "Loading parameters: %s" % args.init_param_file trainer.load_param_values(args.init_param_file) optimus.random_init(trainer.params['layer3'].weights) optimus.random_init(trainer.params['layer3'].bias) # 3. Create Data print "Loading %s" % args.training_file stash = biggie.Stash(args.training_file) stream = D.create_stash_stream(stash, TIME_DIM, pitch_shift=0, vocab_dim=VOCAB, pool_size=25) if args.secondary_source: stash2 = biggie.Stash(args.secondary_source) stream2 = D.create_uniform_chord_stream(stash2, TIME_DIM, pitch_shift=0, vocab_dim=VOCAB, working_size=5) stream = S.mux([stream, stream2], [0.5, 0.5]) stream = S.minibatch(stream, batch_size=BATCH_SIZE) print "Starting '%s'" % args.trial_name driver = optimus.Driver(graph=trainer, name=args.trial_name, output_directory=args.model_directory) predictor_file = path.join(driver.output_directory, args.predictor_file) optimus.save(predictor, def_file=predictor_file) hyperparams = dict(learning_rate=LEARNING_RATE) driver.fit(stream, hyperparams=hyperparams, **DRIVER_ARGS)
def main(args): # 1.1 Create Inputs input_data = optimus.Input( name='cqt', shape=(None, 1, TIME_DIM, PITCH_DIM)) target = optimus.Input( name='target', shape=(None, VOCAB)) learning_rate = optimus.Input( name='learning_rate', shape=None) # 1.2 Create Nodes layer0 = optimus.Conv3D( name='layer0', input_shape=input_data.shape, weight_shape=(32, 1, 5, 19), pool_shape=(2, 3), act_type='relu') layer1 = optimus.Conv3D( name='layer1', input_shape=layer0.output.shape, weight_shape=(64, None, 5, 15), act_type='relu') layer2 = optimus.Conv3D( name='layer2', input_shape=layer1.output.shape, weight_shape=(128, None, 3, 15), act_type='relu') layer3 = optimus.Affine( name='layer3', input_shape=layer2.output.shape, output_shape=(None, 1024,), act_type='relu') chord_classifier = optimus.Affine( name='chord_classifier', input_shape=layer3.output.shape, output_shape=(None, 6,), act_type='sigmoid') all_nodes = [layer0, layer1, layer2, layer3, chord_classifier] # 1.1 Create Losses chord_mse = optimus.MeanSquaredError( name="chord_mse") # 2. Define Edges trainer_edges = optimus.ConnectionManager([ (input_data, layer0.input), (layer0.output, layer1.input), (layer1.output, layer2.input), (layer2.output, layer3.input), (layer3.output, chord_classifier.input), (chord_classifier.output, chord_mse.prediction), (target, chord_mse.target)]) update_manager = optimus.ConnectionManager([ # (learning_rate, layer0.weights), # (learning_rate, layer0.bias), # (learning_rate, layer1.weights), # (learning_rate, layer1.bias), # (learning_rate, layer2.weights), # (learning_rate, layer2.bias), (learning_rate, layer3.weights), (learning_rate, layer3.bias), (learning_rate, chord_classifier.weights), (learning_rate, chord_classifier.bias)]) trainer = optimus.Graph( name=GRAPH_NAME, inputs=[input_data, target, learning_rate], nodes=all_nodes, connections=trainer_edges.connections, outputs=[optimus.Graph.TOTAL_LOSS], losses=[chord_mse], updates=update_manager.connections) for n in all_nodes: optimus.random_init(n.weights, 0, 0.01) optimus.random_init(n.bias, 0, 0.01) if args.init_param_file: param_values = dict(np.load(args.init_param_file)) keys = param_values.keys() for key in keys: if chord_classifier.name in key or layer3.name in key: print "skipping %s" % key del param_values[key] trainer.param_values = param_values posterior = optimus.Output( name='posterior') predictor_edges = optimus.ConnectionManager([ (input_data, layer0.input), (layer0.output, layer1.input), (layer1.output, layer2.input), (layer2.output, layer3.input), (layer3.output, chord_classifier.input), (chord_classifier.output, posterior)]) predictor = optimus.Graph( name=GRAPH_NAME, inputs=[input_data], nodes=all_nodes, connections=predictor_edges.connections, outputs=[posterior]) # 3. Create Data print "Loading %s" % args.training_file stash = biggie.Stash(args.training_file) stream = D.create_uniform_chord_stream( stash, TIME_DIM, pitch_shift=0, vocab_dim=VOCAB, working_size=10) stream = S.minibatch( FX.chord_index_to_tonnetz(stream, vocab_dim=VOCAB), batch_size=BATCH_SIZE) print "Starting '%s'" % args.trial_name driver = optimus.Driver( graph=trainer, name=args.trial_name, output_directory=args.model_directory) hyperparams = {learning_rate.name: LEARNING_RATE} predictor_file = path.join(driver.output_directory, args.predictor_file) optimus.save(predictor, def_file=predictor_file) driver.fit(stream, hyperparams=hyperparams, **DRIVER_ARGS)
def main(args): # 1.1 Create Inputs input_data = optimus.Input( name='cqt', shape=(None, 1, TIME_DIM, PITCH_DIM)) chord_idx = optimus.Input( name='chord_idx', shape=(None,), dtype='int32') learning_rate = optimus.Input( name='learning_rate', shape=None) # 1.2 Create Nodes input_scalar = optimus.Normalize( name='input_scalar', mode='l2', scale_factor=50.0) layer0 = optimus.Conv3D( name='layer0', input_shape=input_data.shape, weight_shape=(32, 1, 5, 19), pool_shape=(2, 3), act_type='relu') layer1 = optimus.Conv3D( name='layer1', input_shape=layer0.output.shape, weight_shape=(64, None, 5, 15), act_type='relu') layer2 = optimus.Conv3D( name='layer2', input_shape=layer1.output.shape, weight_shape=(128, None, 3, 15), act_type='relu') layer3 = optimus.Affine( name='layer3', input_shape=layer2.output.shape, output_shape=(None, 1024,), act_type='relu') layer4 = optimus.Affine( name='bottleneck', input_shape=layer3.output.shape, output_shape=(None, 3,), act_type='linear') chord_classifier = optimus.Softmax( name='chord_classifier', input_shape=layer4.output.shape, n_out=VOCAB, act_type='linear') all_nodes = [input_scalar, layer0, layer1, layer2, layer3, layer4, chord_classifier] # 1.1 Create Losses chord_nll = optimus.NegativeLogLikelihood( name="chord_nll") # 2. Define Edges trainer_edges = optimus.ConnectionManager([ (input_data, input_scalar.input), (input_scalar.output, layer0.input), (layer0.output, layer1.input), (layer1.output, layer2.input), (layer2.output, layer3.input), (layer3.output, layer4.input), (layer4.output, chord_classifier.input), (chord_classifier.output, chord_nll.likelihood), (chord_idx, chord_nll.target_idx)]) update_manager = optimus.ConnectionManager([ (learning_rate, layer0.weights), (learning_rate, layer0.bias), (learning_rate, layer1.weights), (learning_rate, layer1.bias), (learning_rate, layer2.weights), (learning_rate, layer2.bias), (learning_rate, layer3.weights), (learning_rate, layer3.bias), (learning_rate, layer4.weights), (learning_rate, layer4.bias), (learning_rate, chord_classifier.weights), (learning_rate, chord_classifier.bias)]) trainer = optimus.Graph( name=GRAPH_NAME, inputs=[input_data, chord_idx, learning_rate], nodes=all_nodes, connections=trainer_edges.connections, outputs=[optimus.Graph.TOTAL_LOSS], losses=[chord_nll], updates=update_manager.connections) for n in all_nodes[1:]: optimus.random_init(n.weights) optimus.random_init(n.bias) if args.init_param_file: param_values = dict(np.load(args.init_param_file)) keys = param_values.keys() for key in keys: if chord_classifier.name in key or layer3.name in key: print "skipping %s" % key del param_values[key] trainer.param_values = param_values posterior = optimus.Output( name='posterior') embedding = optimus.Output( name='embedding') predictor_edges = optimus.ConnectionManager([ (input_data, input_scalar.input), (input_scalar.output, layer0.input), (layer0.output, layer1.input), (layer1.output, layer2.input), (layer2.output, layer3.input), (layer3.output, layer4.input), (layer4.output, chord_classifier.input), (layer4.output, embedding), (chord_classifier.output, posterior)]) predictor = optimus.Graph( name=GRAPH_NAME, inputs=[input_data], nodes=all_nodes, connections=predictor_edges.connections, outputs=[posterior, embedding]) # 3. Create Data print "Loading %s" % args.training_file stash = biggie.Stash(args.training_file) stream = S.minibatch( D.create_uniform_chord_stream( stash, TIME_DIM, pitch_shift=0, vocab_dim=VOCAB, working_size=10), batch_size=BATCH_SIZE) print "Starting '%s'" % args.trial_name driver = optimus.Driver( graph=trainer, name=args.trial_name, output_directory=args.model_directory) hyperparams = {learning_rate.name: LEARNING_RATE} predictor_file = path.join(driver.output_directory, args.predictor_file) optimus.save(predictor, def_file=predictor_file) driver.fit(stream, hyperparams=hyperparams, **DRIVER_ARGS)
def main(args): # 1.1 Create Inputs input_data = optimus.Input(name='cqt', shape=(None, OCTAVE_DIM, TIME_DIM, PITCH_DIM)) chord_idx = optimus.Input(name='chord_idx', shape=(None, ), dtype='int32') learning_rate = optimus.Input(name='learning_rate', shape=None) # 1.2 Create Nodes layer0 = optimus.Conv3D(name='layer0', input_shape=input_data.shape, weight_shape=(32, None, 5, 5), pool_shape=(2, 3), act_type='relu') layer1 = optimus.Conv3D(name='layer1', input_shape=layer0.output.shape, weight_shape=(64, None, 5, 7), act_type='relu') layer2 = optimus.Conv3D(name='layer2', input_shape=layer1.output.shape, weight_shape=(128, None, 3, 6), act_type='relu') layer3 = optimus.Affine(name='layer3', input_shape=layer2.output.shape, output_shape=( None, 1024, ), act_type='relu') chord_classifier = optimus.Softmax(name='chord_classifier', input_shape=layer3.output.shape, n_out=VOCAB, act_type='linear') all_nodes = [layer0, layer1, layer2, layer3, chord_classifier] # 1.1 Create Losses chord_nll = optimus.NegativeLogLikelihood(name="chord_nll") # 2. Define Edges trainer_edges = optimus.ConnectionManager([ (input_data, layer0.input), (layer0.output, layer1.input), (layer1.output, layer2.input), (layer2.output, layer3.input), (layer3.output, chord_classifier.input), (chord_classifier.output, chord_nll.likelihood), (chord_idx, chord_nll.target_idx) ]) update_manager = optimus.ConnectionManager([ (learning_rate, layer0.weights), (learning_rate, layer0.bias), (learning_rate, layer1.weights), (learning_rate, layer1.bias), (learning_rate, layer2.weights), (learning_rate, layer2.bias), (learning_rate, layer3.weights), (learning_rate, layer3.bias), (learning_rate, chord_classifier.weights), (learning_rate, chord_classifier.bias) ]) trainer = optimus.Graph(name=GRAPH_NAME, inputs=[input_data, chord_idx, learning_rate], nodes=all_nodes, connections=trainer_edges.connections, outputs=[optimus.Graph.TOTAL_LOSS], losses=[chord_nll], updates=update_manager.connections) for n in all_nodes: optimus.random_init(n.weights) optimus.random_init(n.bias) if args.init_param_file: print "Loading parameters: %s" % args.init_param_file trainer.load_param_values(args.init_param_file) posterior = optimus.Output(name='posterior') predictor_edges = optimus.ConnectionManager([ (input_data, layer0.input), (layer0.output, layer1.input), (layer1.output, layer2.input), (layer2.output, layer3.input), (layer3.output, chord_classifier.input), (chord_classifier.output, posterior) ]) predictor = optimus.Graph(name=GRAPH_NAME, inputs=[input_data], nodes=all_nodes, connections=predictor_edges.connections, outputs=[posterior]) # 3. Create Data print "Loading %s" % args.training_file stash = biggie.Stash(args.training_file) stream = D.create_uniform_chord_stream(stash, TIME_DIM, pitch_shift=0, vocab_dim=VOCAB, working_size=10, valid_idx=range(60) + [156]) if args.secondary_source: stash2 = biggie.Stash(args.secondary_source) stream2 = D.create_uniform_chord_stream(stash2, TIME_DIM, pitch_shift=0, vocab_dim=VOCAB, working_size=5) stream = S.mux([stream, stream2], [0.5, 0.5]) stream = S.minibatch(stream, batch_size=BATCH_SIZE) print "Starting '%s'" % args.trial_name driver = optimus.Driver(graph=trainer, name=args.trial_name, output_directory=args.model_directory) hyperparams = {learning_rate.name: LEARNING_RATE} predictor_file = path.join(driver.output_directory, args.predictor_file) optimus.save(predictor, def_file=predictor_file) driver.fit(stream, hyperparams=hyperparams, **DRIVER_ARGS)
def main(args): # 1.1 Create Inputs input_data = optimus.Input( name='cqt', shape=(None, 1, TIME_DIM, 252)) chord_idx = optimus.Input( name='chord_idx', shape=(None,), dtype='int32') learning_rate = optimus.Input( name='learning_rate', shape=None) dropout = optimus.Input( name='dropout', shape=None) # 1.2 Create Nodes layer0 = optimus.Conv3D( name='layer0', input_shape=input_data.shape, weight_shape=(30, 1, 9, 19), pool_shape=(1, 3), act_type='relu') layer1 = optimus.Conv3D( name='layer1', input_shape=layer0.output.shape, weight_shape=(50, None, 7, 15), act_type='relu') layer2 = optimus.Affine( name='layer2', input_shape=layer1.output.shape, output_shape=(None, 1024,), act_type='relu') layer3 = optimus.Affine( name='layer3', input_shape=layer2.output.shape, output_shape=(None, 1024,), act_type='relu') for n in [layer2, layer3]: n.enable_dropout() chord_classifier = optimus.Softmax( name='chord_classifier', input_shape=layer3.output.shape, n_out=VOCAB, act_type='linear') all_nodes = [layer0, layer1, layer2, layer3, chord_classifier] # 1.1 Create Losses chord_nll = optimus.NegativeLogLikelihood( name="chord_nll") # 2. Define Edges trainer_edges = optimus.ConnectionManager([ (input_data, layer0.input), (layer0.output, layer1.input), (layer1.output, layer2.input), (layer2.output, layer3.input), (layer3.output, chord_classifier.input), (chord_classifier.output, chord_nll.likelihood), (chord_idx, chord_nll.target_idx), (dropout, layer2.dropout), (dropout, layer3.dropout)]) update_manager = optimus.ConnectionManager([ (learning_rate, layer0.weights), (learning_rate, layer0.bias), (learning_rate, layer1.weights), (learning_rate, layer1.bias), (learning_rate, layer2.weights), (learning_rate, layer2.bias), (learning_rate, layer3.weights), (learning_rate, layer3.bias), (learning_rate, chord_classifier.weights), (learning_rate, chord_classifier.bias)]) trainer = optimus.Graph( name=GRAPH_NAME, inputs=[input_data, chord_idx, learning_rate, dropout], nodes=all_nodes, connections=trainer_edges.connections, outputs=[optimus.Graph.TOTAL_LOSS], losses=[chord_nll], updates=update_manager.connections) for n in all_nodes: optimus.random_init(n.weights) optimus.random_init(n.bias) validator_edges = optimus.ConnectionManager([ (input_data, layer0.input), (layer0.output, layer1.input), (layer1.output, layer2.input), (layer2.output, layer3.input), (layer3.output, chord_classifier.input), (chord_classifier.output, chord_nll.likelihood), (chord_idx, chord_nll.target_idx)]) for n in [layer2, layer3]: n.disable_dropout() validator = optimus.Graph( name=GRAPH_NAME, inputs=[input_data, chord_idx], nodes=all_nodes, connections=validator_edges.connections, outputs=[optimus.Graph.TOTAL_LOSS], losses=[chord_nll]) posterior = optimus.Output( name='posterior') predictor_edges = optimus.ConnectionManager([ (input_data, layer0.input), (layer0.output, layer1.input), (layer1.output, layer2.input), (layer2.output, layer3.input), (layer3.output, chord_classifier.input), (chord_classifier.output, posterior)]) predictor = optimus.Graph( name=GRAPH_NAME, inputs=[input_data], nodes=all_nodes, connections=predictor_edges.connections, outputs=[posterior]) # 3. Create Data print "Loading %s" % args.training_file stash = biggie.Stash(args.training_file) s = D.create_uniform_chord_stream( stash, TIME_DIM, pitch_shift=6, vocab_dim=VOCAB, working_size=10) stream = S.minibatch( FX.drop_frames(FX.awgn(s, 0.05), 0.1), batch_size=BATCH_SIZE) driver = optimus.Driver( graph=trainer, name=args.trial_name, output_directory=args.model_directory) hyperparams = {learning_rate.name: LEARNING_RATE, dropout.name: DROPOUT} validator_file = path.join(driver.output_directory, args.validator_file) optimus.save(validator, def_file=validator_file) predictor_file = path.join(driver.output_directory, args.predictor_file) optimus.save(predictor, def_file=predictor_file) print "Starting '%s'" % args.trial_name driver.fit(stream, hyperparams=hyperparams, **DRIVER_ARGS)
def main(args): # 1.1 Create Inputs input_data = optimus.Input(name='cqt', shape=(None, 1, TIME_DIM, 252)) chord_idx = optimus.Input(name='chord_idx', shape=(None, ), dtype='int32') learning_rate = optimus.Input(name='learning_rate', shape=None) dropout = optimus.Input(name='dropout', shape=None) # 1.2 Create Nodes layer0 = optimus.Conv3D(name='layer0', input_shape=input_data.shape, weight_shape=(30, 1, 9, 19), pool_shape=(1, 3), act_type='relu') layer1 = optimus.Conv3D(name='layer1', input_shape=layer0.output.shape, weight_shape=(50, None, 7, 15), act_type='relu') layer2 = optimus.Affine(name='layer2', input_shape=layer1.output.shape, output_shape=( None, 1024, ), act_type='relu') layer3 = optimus.Affine(name='layer3', input_shape=layer2.output.shape, output_shape=( None, 1024, ), act_type='relu') for n in [layer2, layer3]: n.enable_dropout() chord_classifier = optimus.Softmax(name='chord_classifier', input_shape=layer3.output.shape, n_out=VOCAB, act_type='linear') all_nodes = [layer0, layer1, layer2, layer3, chord_classifier] # 1.1 Create Losses chord_nll = optimus.NegativeLogLikelihood(name="chord_nll") # 2. Define Edges trainer_edges = optimus.ConnectionManager([ (input_data, layer0.input), (layer0.output, layer1.input), (layer1.output, layer2.input), (layer2.output, layer3.input), (layer3.output, chord_classifier.input), (chord_classifier.output, chord_nll.likelihood), (chord_idx, chord_nll.target_idx), (dropout, layer2.dropout), (dropout, layer3.dropout) ]) update_manager = optimus.ConnectionManager([ (learning_rate, layer0.weights), (learning_rate, layer0.bias), (learning_rate, layer1.weights), (learning_rate, layer1.bias), (learning_rate, layer2.weights), (learning_rate, layer2.bias), (learning_rate, layer3.weights), (learning_rate, layer3.bias), (learning_rate, chord_classifier.weights), (learning_rate, chord_classifier.bias) ]) trainer = optimus.Graph( name=GRAPH_NAME, inputs=[input_data, chord_idx, learning_rate, dropout], nodes=all_nodes, connections=trainer_edges.connections, outputs=[optimus.Graph.TOTAL_LOSS], losses=[chord_nll], updates=update_manager.connections) for n in all_nodes: optimus.random_init(n.weights) optimus.random_init(n.bias) validator_edges = optimus.ConnectionManager([ (input_data, layer0.input), (layer0.output, layer1.input), (layer1.output, layer2.input), (layer2.output, layer3.input), (layer3.output, chord_classifier.input), (chord_classifier.output, chord_nll.likelihood), (chord_idx, chord_nll.target_idx) ]) for n in [layer2, layer3]: n.disable_dropout() validator = optimus.Graph(name=GRAPH_NAME, inputs=[input_data, chord_idx], nodes=all_nodes, connections=validator_edges.connections, outputs=[optimus.Graph.TOTAL_LOSS], losses=[chord_nll]) posterior = optimus.Output(name='posterior') predictor_edges = optimus.ConnectionManager([ (input_data, layer0.input), (layer0.output, layer1.input), (layer1.output, layer2.input), (layer2.output, layer3.input), (layer3.output, chord_classifier.input), (chord_classifier.output, posterior) ]) predictor = optimus.Graph(name=GRAPH_NAME, inputs=[input_data], nodes=all_nodes, connections=predictor_edges.connections, outputs=[posterior]) # 3. Create Data print "Loading %s" % args.training_file stash = biggie.Stash(args.training_file) s = D.create_uniform_chord_stream(stash, TIME_DIM, pitch_shift=6, vocab_dim=VOCAB, working_size=10) stream = S.minibatch(FX.drop_frames(FX.awgn(s, 0.05), 0.1), batch_size=BATCH_SIZE) driver = optimus.Driver(graph=trainer, name=args.trial_name, output_directory=args.model_directory) hyperparams = {learning_rate.name: LEARNING_RATE, dropout.name: DROPOUT} validator_file = path.join(driver.output_directory, args.validator_file) optimus.save(validator, def_file=validator_file) predictor_file = path.join(driver.output_directory, args.predictor_file) optimus.save(predictor, def_file=predictor_file) print "Starting '%s'" % args.trial_name driver.fit(stream, hyperparams=hyperparams, **DRIVER_ARGS)
def main(args): # 1.1 Create Inputs input_data = optimus.Input( name='cqt', shape=(None, OCTAVE_DIM, TIME_DIM, PITCH_DIM)) chord_idx = optimus.Input( name='chord_idx', shape=(None,), dtype='int32') learning_rate = optimus.Input( name='learning_rate', shape=None) margin = optimus.Input( name='margin', shape=None) # 1.2 Create Nodes layer0 = optimus.Conv3D( name='layer0', input_shape=input_data.shape, weight_shape=(32, None, 5, 5), pool_shape=(2, 3), act_type='relu') layer1 = optimus.Conv3D( name='layer1', input_shape=layer0.output.shape, weight_shape=(64, None, 5, 7), act_type='relu') layer2 = optimus.Conv3D( name='layer2', input_shape=layer1.output.shape, weight_shape=(128, None, 3, 6), act_type='relu') layer3 = optimus.Affine( name='layer3', input_shape=layer2.output.shape, output_shape=(None, 1024,), act_type='relu') chord_classifier = optimus.Affine( name='chord_classifier', input_shape=layer3.output.shape, output_shape=(None, VOCAB,), act_type='sigmoid') all_nodes = [layer0, layer1, layer2, layer3, chord_classifier] # 1.1 Create Losses chord_margin = optimus.Margin( name="chord_margin", mode='max') # 2. Define Edges trainer_edges = optimus.ConnectionManager([ (input_data, layer0.input), (layer0.output, layer1.input), (layer1.output, layer2.input), (layer2.output, layer3.input), (layer3.output, chord_classifier.input), (chord_classifier.output, chord_margin.prediction), (chord_idx, chord_margin.target_idx), (margin, chord_margin.margin)]) update_manager = optimus.ConnectionManager([ (learning_rate, layer0.weights), (learning_rate, layer0.bias), (learning_rate, layer1.weights), (learning_rate, layer1.bias), (learning_rate, layer2.weights), (learning_rate, layer2.bias), (learning_rate, layer3.weights), (learning_rate, layer3.bias), (learning_rate, chord_classifier.weights), (learning_rate, chord_classifier.bias)]) trainer = optimus.Graph( name=GRAPH_NAME, inputs=[input_data, chord_idx, learning_rate, margin], nodes=all_nodes, connections=trainer_edges.connections, outputs=[optimus.Graph.TOTAL_LOSS], losses=[chord_margin], updates=update_manager.connections) for n in all_nodes: optimus.random_init(n.weights) optimus.random_init(n.bias) if args.init_param_file: print "Loading parameters: %s" % args.init_param_file trainer.load_param_values(args.init_param_file) for n in all_nodes[-2:]: optimus.random_init(n.weights) optimus.random_init(n.bias) posterior = optimus.Output( name='posterior') predictor_edges = optimus.ConnectionManager([ (input_data, layer0.input), (layer0.output, layer1.input), (layer1.output, layer2.input), (layer2.output, layer3.input), (layer3.output, chord_classifier.input), (chord_classifier.output, posterior)]) predictor = optimus.Graph( name=GRAPH_NAME, inputs=[input_data], nodes=all_nodes, connections=predictor_edges.connections, outputs=[posterior]) # 3. Create Data print "Loading %s" % args.training_file stash = biggie.Stash(args.training_file) stream = D.create_stash_stream( stash, TIME_DIM, pitch_shift=0, vocab_dim=VOCAB, pool_size=25) if args.secondary_source: stash2 = biggie.Stash(args.secondary_source) stream2 = D.create_uniform_chord_stream( stash2, TIME_DIM, pitch_shift=0, vocab_dim=VOCAB, working_size=5) stream = S.mux([stream, stream2], [0.5, 0.5]) stream = S.minibatch(stream, batch_size=BATCH_SIZE) print "Starting '%s'" % args.trial_name driver = optimus.Driver( graph=trainer, name=args.trial_name, output_directory=args.model_directory) hyperparams = {learning_rate.name: LEARNING_RATE, margin.name: MARGIN} predictor_file = path.join(driver.output_directory, args.predictor_file) optimus.save(predictor, def_file=predictor_file) driver.fit(stream, hyperparams=hyperparams, **DRIVER_ARGS)
def main(args): # 1.1 Create Inputs input_data = optimus.Input(name='cqt', shape=(None, 6, TIME_DIM, 40)) target = optimus.Input(name='target', shape=(None, VOCAB)) learning_rate = optimus.Input(name='learning_rate', shape=None) # 1.2 Create Nodes layer0 = optimus.Conv3D(name='layer0', input_shape=input_data.shape, weight_shape=(32, None, 5, 5), pool_shape=(2, 3), act_type='relu') layer1 = optimus.Conv3D(name='layer1', input_shape=layer0.output.shape, weight_shape=(64, None, 5, 7), act_type='relu') layer2 = optimus.Conv3D(name='layer2', input_shape=layer1.output.shape, weight_shape=(128, None, 3, 6), act_type='relu') layer3 = optimus.Affine(name='layer3', input_shape=layer2.output.shape, output_shape=( None, 1024, ), act_type='relu') chord_estimator = optimus.Affine(name='chord_estimator', input_shape=layer3.output.shape, output_shape=( None, VOCAB, ), act_type='sigmoid') all_nodes = [layer0, layer1, layer2, layer3, chord_estimator] # 1.1 Create Losses chord_xentropy = optimus.CrossEntropy(name="chord_xentropy") # 2. Define Edges trainer_edges = optimus.ConnectionManager([ (input_data, layer0.input), (layer0.output, layer1.input), (layer1.output, layer2.input), (layer2.output, layer3.input), (layer3.output, chord_estimator.input), (chord_estimator.output, chord_xentropy.prediction), (target, chord_xentropy.target) ]) update_manager = optimus.ConnectionManager([ (learning_rate, layer0.weights), (learning_rate, layer0.bias), (learning_rate, layer1.weights), (learning_rate, layer1.bias), (learning_rate, layer2.weights), (learning_rate, layer2.bias), (learning_rate, layer3.weights), (learning_rate, layer3.bias), (learning_rate, chord_estimator.weights), (learning_rate, chord_estimator.bias) ]) trainer = optimus.Graph(name=GRAPH_NAME, inputs=[input_data, target, learning_rate], nodes=all_nodes, connections=trainer_edges.connections, outputs=[optimus.Graph.TOTAL_LOSS], losses=[chord_xentropy], updates=update_manager.connections, momentum=None) for n in all_nodes: optimus.random_init(n.weights, 0, 0.01) optimus.random_init(n.bias, 0, 0.01) validator = optimus.Graph(name=GRAPH_NAME, inputs=[input_data, target], nodes=all_nodes, connections=trainer_edges.connections, outputs=[optimus.Graph.TOTAL_LOSS], losses=[chord_xentropy]) posterior = optimus.Output(name='posterior') predictor_edges = optimus.ConnectionManager([ (input_data, layer0.input), (layer0.output, layer1.input), (layer1.output, layer2.input), (layer2.output, layer3.input), (layer3.output, chord_estimator.input), (chord_estimator.output, posterior) ]) predictor = optimus.Graph(name=GRAPH_NAME, inputs=[input_data], nodes=all_nodes, connections=predictor_edges.connections, outputs=[posterior]) # 3. Create Data print "Loading %s" % args.training_file stash = biggie.Stash(args.training_file) # partition_labels = json.load( # open("/home/ejhumphrey/Dropbox/tmp/train0_v2_merged_partition.json")) stream = D.create_uniform_chord_stream(stash, TIME_DIM, pitch_shift=False, vocab_dim=VOCAB, working_size=5) stream = S.minibatch(FX.chord_index_to_affinity_vectors( FX.wrap_cqt(stream, length=40, stride=36), VOCAB), batch_size=BATCH_SIZE) print "Starting '%s'" % args.trial_name driver = optimus.Driver(graph=trainer, name=args.trial_name, output_directory=args.model_directory) hyperparams = {learning_rate.name: LEARNING_RATE} validator_file = path.join(driver.output_directory, args.validator_file) optimus.save(validator, def_file=validator_file) predictor_file = path.join(driver.output_directory, args.predictor_file) optimus.save(predictor, def_file=predictor_file) driver.fit(stream, hyperparams=hyperparams, **DRIVER_ARGS)
def main(args): # 1.1 Create Inputs input_data = optimus.Input(name='cqt', shape=(None, 1, TIME_DIM, 252)) chord_idx = optimus.Input(name='chord_idx', shape=(None, ), dtype='int32') learning_rate = optimus.Input(name='learning_rate', shape=None) # 1.2 Create Nodes layer0 = optimus.Conv3D(name='layer0', input_shape=input_data.shape, weight_shape=(30, 1, 9, 19), pool_shape=(1, 3), act_type='relu') layer1 = optimus.Conv3D(name='layer1', input_shape=layer0.output.shape, weight_shape=(50, None, 7, 15), act_type='relu') layer2 = optimus.Affine(name='layer2', input_shape=layer1.output.shape, output_shape=( None, 1024, ), act_type='relu') layer3 = optimus.Affine(name='layer3', input_shape=layer2.output.shape, output_shape=( None, 1024, ), act_type='relu') chord_classifier = optimus.Softmax(name='chord_classifier', input_shape=layer3.output.shape, n_out=VOCAB, act_type='linear') all_nodes = [layer0, layer1, layer2, layer3, chord_classifier] # 1.1 Create Losses chord_nll = optimus.NegativeLogLikelihood(name="chord_nll") # 2. Define Edges trainer_edges = optimus.ConnectionManager([ (input_data, layer0.input), (layer0.output, layer1.input), (layer1.output, layer2.input), (layer2.output, layer3.input), (layer3.output, chord_classifier.input), (chord_classifier.output, chord_nll.likelihood), (chord_idx, chord_nll.target_idx) ]) update_manager = optimus.ConnectionManager([ (learning_rate, layer0.weights), (learning_rate, layer0.bias), (learning_rate, layer1.weights), (learning_rate, layer1.bias), (learning_rate, layer2.weights), (learning_rate, layer2.bias), (learning_rate, layer3.weights), (learning_rate, layer3.bias), (learning_rate, chord_classifier.weights), (learning_rate, chord_classifier.bias) ]) trainer = optimus.Graph(name=GRAPH_NAME, inputs=[input_data, chord_idx, learning_rate], nodes=all_nodes, connections=trainer_edges.connections, outputs=[optimus.Graph.TOTAL_LOSS], losses=[chord_nll], updates=update_manager.connections, momentum=None) for n in all_nodes: optimus.random_init(n.weights) optimus.random_init(n.bias) trainer.load_param_values( "/media/attic/dl4mir/chord_estimation/models/nll_chord_uniform_2big/synth_data_01/0/classifier-V157-synth_data_01-041750-2014-08-25_21h59m56s.npz" ) validator = optimus.Graph(name=GRAPH_NAME, inputs=[input_data, chord_idx], nodes=all_nodes, connections=trainer_edges.connections, outputs=[optimus.Graph.TOTAL_LOSS], losses=[chord_nll]) posterior = optimus.Output(name='posterior') predictor_edges = optimus.ConnectionManager([ (input_data, layer0.input), (layer0.output, layer1.input), (layer1.output, layer2.input), (layer2.output, layer3.input), (layer3.output, chord_classifier.input), (chord_classifier.output, posterior) ]) predictor = optimus.Graph(name=GRAPH_NAME, inputs=[input_data], nodes=all_nodes, connections=predictor_edges.connections, outputs=[posterior]) # 3. Create Data print "Loading %s" % args.training_file stash = biggie.Stash(args.training_file) stream = D.create_uniform_chord_stream(stash, TIME_DIM, pitch_shift=False, vocab_dim=VOCAB, working_size=3) stream = S.minibatch(stream, batch_size=BATCH_SIZE) print "Starting '%s'" % args.trial_name driver = optimus.Driver(graph=trainer, name=args.trial_name, output_directory=args.model_directory) hyperparams = {learning_rate.name: LEARNING_RATE} validator_file = path.join(driver.output_directory, args.validator_file) optimus.save(validator, def_file=validator_file) predictor_file = path.join(driver.output_directory, args.predictor_file) optimus.save(predictor, def_file=predictor_file) driver.fit(stream, hyperparams=hyperparams, **DRIVER_ARGS)
def main(args): # 1.1 Create Inputs input_data = optimus.Input( name='cqt', shape=(None, 1, TIME_DIM, 252)) chord_idx = optimus.Input( name='chord_idx', shape=(None,), dtype='int32') learning_rate = optimus.Input( name='learning_rate', shape=None) # 1.2 Create Nodes layer0 = optimus.Conv3D( name='layer0', input_shape=input_data.shape, weight_shape=(30, 1, 9, 19), pool_shape=(1, 3), act_type='relu') layer1 = optimus.Conv3D( name='layer1', input_shape=layer0.output.shape, weight_shape=(50, None, 7, 15), act_type='relu') layer2 = optimus.Affine( name='layer2', input_shape=layer1.output.shape, output_shape=(None, 1024,), act_type='relu') layer3 = optimus.Affine( name='layer3', input_shape=layer2.output.shape, output_shape=(None, 1024,), act_type='relu') chord_classifier = optimus.Softmax( name='chord_classifier', input_shape=layer3.output.shape, n_out=VOCAB, act_type='linear') all_nodes = [layer0, layer1, layer2, layer3, chord_classifier] # 1.1 Create Losses chord_nll = optimus.NegativeLogLikelihood( name="chord_nll") # 2. Define Edges trainer_edges = optimus.ConnectionManager([ (input_data, layer0.input), (layer0.output, layer1.input), (layer1.output, layer2.input), (layer2.output, layer3.input), (layer3.output, chord_classifier.input), (chord_classifier.output, chord_nll.likelihood), (chord_idx, chord_nll.target_idx)]) update_manager = optimus.ConnectionManager([ (learning_rate, layer0.weights), (learning_rate, layer0.bias), (learning_rate, layer1.weights), (learning_rate, layer1.bias), (learning_rate, layer2.weights), (learning_rate, layer2.bias), (learning_rate, layer3.weights), (learning_rate, layer3.bias), (learning_rate, chord_classifier.weights), (learning_rate, chord_classifier.bias)]) trainer = optimus.Graph( name=GRAPH_NAME, inputs=[input_data, chord_idx, learning_rate], nodes=all_nodes, connections=trainer_edges.connections, outputs=[optimus.Graph.TOTAL_LOSS], losses=[chord_nll], updates=update_manager.connections) for n in all_nodes: optimus.random_init(n.weights) optimus.random_init(n.bias) validator = optimus.Graph( name=GRAPH_NAME, inputs=[input_data, chord_idx], nodes=all_nodes, connections=trainer_edges.connections, outputs=[optimus.Graph.TOTAL_LOSS], losses=[chord_nll]) posterior = optimus.Output( name='posterior') predictor_edges = optimus.ConnectionManager([ (input_data, layer0.input), (layer0.output, layer1.input), (layer1.output, layer2.input), (layer2.output, layer3.input), (layer3.output, chord_classifier.input), (chord_classifier.output, posterior)]) predictor = optimus.Graph( name=GRAPH_NAME, inputs=[input_data], nodes=all_nodes, connections=predictor_edges.connections, outputs=[posterior]) # 3. Create Data stash = biggie.Stash(args.training_file) partition_labels = D.util.partition(stash, D.chord_map) hyperparams = {learning_rate.name: LEARNING_RATE} valid_idx = [] for q in range(14): if q < 13: valid_idx.extend([q*12 + r for r in range(12)]) else: valid_idx.append(q) stream = S.minibatch( D.create_uniform_chord_stream(stash, TIME_DIM, pitch_shift=False, vocab_dim=VOCAB, working_size=3, partition_labels=partition_labels), batch_size=BATCH_SIZE) driver = optimus.Driver( graph=trainer, name=args.trial_name + "_c%02d" % q, output_directory=args.model_directory) driver.fit( stream, hyperparams=hyperparams, max_iter=25000, **DRIVER_ARGS) validator_file = path.join(driver.output_directory, args.validator_file) optimus.save(validator, def_file=validator_file) predictor_file = path.join(driver.output_directory, args.predictor_file) optimus.save(predictor, def_file=predictor_file)
def main(args): # 1.1 Create Inputs input_data = optimus.Input( name='cqt', shape=(None, 6, TIME_DIM, 40)) chord_idx = optimus.Input( name='chord_idx', shape=(None,), dtype='int32') learning_rate = optimus.Input( name='learning_rate', shape=None) # 1.2 Create Nodes layer0 = optimus.Conv3D( name='layer0', input_shape=input_data.shape, weight_shape=(32, None, 5, 5), pool_shape=(2, 3), act_type='relu') layer1 = optimus.Conv3D( name='layer1', input_shape=layer0.output.shape, weight_shape=(64, None, 5, 7), act_type='relu') layer2 = optimus.Conv3D( name='layer2', input_shape=layer1.output.shape, weight_shape=(128, None, 3, 6), act_type='relu') layer3 = optimus.Affine( name='layer3', input_shape=layer2.output.shape, output_shape=(None, 1024,), act_type='relu') # layer3 = optimus.Affine( # name='layer3', # input_shape=layer2.output.shape, # output_shape=(None, 1024,), # act_type='relu') chord_classifier = optimus.Softmax( name='chord_classifier', input_shape=layer3.output.shape, n_out=VOCAB, act_type='linear') all_nodes = [layer0, layer1, layer2, layer3, chord_classifier] # 1.1 Create Losses chord_nll = optimus.NegativeLogLikelihood( name="chord_nll") # 2. Define Edges trainer_edges = optimus.ConnectionManager([ (input_data, layer0.input), (layer0.output, layer1.input), (layer1.output, layer2.input), (layer2.output, layer3.input), (layer3.output, chord_classifier.input), (chord_classifier.output, chord_nll.likelihood), (chord_idx, chord_nll.target_idx)]) update_manager = optimus.ConnectionManager([ (learning_rate, layer0.weights), (learning_rate, layer0.bias), (learning_rate, layer1.weights), (learning_rate, layer1.bias), (learning_rate, layer2.weights), (learning_rate, layer2.bias), (learning_rate, layer3.weights), (learning_rate, layer3.bias), (learning_rate, chord_classifier.weights), (learning_rate, chord_classifier.bias)]) trainer = optimus.Graph( name=GRAPH_NAME, inputs=[input_data, chord_idx, learning_rate], nodes=all_nodes, connections=trainer_edges.connections, outputs=[optimus.Graph.TOTAL_LOSS], losses=[chord_nll], updates=update_manager.connections, momentum=None) for n in all_nodes: optimus.random_init(n.weights) optimus.random_init(n.bias) # trainer.load_param_values("/media/attic/dl4mir/chord_estimation/models/nll_chord_uniform_2big/synth_data_01/0/classifier-V157-synth_data_01-041750-2014-08-25_21h59m56s.npz") validator = optimus.Graph( name=GRAPH_NAME, inputs=[input_data, chord_idx], nodes=all_nodes, connections=trainer_edges.connections, outputs=[optimus.Graph.TOTAL_LOSS], losses=[chord_nll]) posterior = optimus.Output( name='posterior') predictor_edges = optimus.ConnectionManager([ (input_data, layer0.input), (layer0.output, layer1.input), (layer1.output, layer2.input), (layer2.output, layer3.input), (layer3.output, chord_classifier.input), (chord_classifier.output, posterior)]) predictor = optimus.Graph( name=GRAPH_NAME, inputs=[input_data], nodes=all_nodes, connections=predictor_edges.connections, outputs=[posterior]) # 3. Create Data print "Loading %s" % args.training_file stash = biggie.Stash(args.training_file) stream = D.create_uniform_chord_stream( stash, TIME_DIM, pitch_shift=False, vocab_dim=VOCAB, working_size=10) stream = S.minibatch( FX.wrap_cqt(stream, length=40, stride=36), batch_size=BATCH_SIZE) print "Starting '%s'" % args.trial_name driver = optimus.Driver( graph=trainer, name=args.trial_name, output_directory=args.model_directory) hyperparams = {learning_rate.name: LEARNING_RATE} validator_file = path.join(driver.output_directory, args.validator_file) optimus.save(validator, def_file=validator_file) predictor_file = path.join(driver.output_directory, args.predictor_file) optimus.save(predictor, def_file=predictor_file) driver.fit(stream, hyperparams=hyperparams, **DRIVER_ARGS)
def main(args): # 1.1 Create Inputs input_data = optimus.Input(name='cqt', shape=(None, 1, TIME_DIM, 252)) chord_idx = optimus.Input(name='chord_idx', shape=(None, ), dtype='int32') learning_rate = optimus.Input(name='learning_rate', shape=None) # 1.2 Create Nodes layer0 = optimus.Conv3D(name='layer0', input_shape=input_data.shape, weight_shape=(30, 1, 9, 19), pool_shape=(1, 3), act_type='relu') layer1 = optimus.Conv3D(name='layer1', input_shape=layer0.output.shape, weight_shape=(50, None, 7, 15), act_type='relu') layer2 = optimus.Affine(name='layer2', input_shape=layer1.output.shape, output_shape=( None, 1024, ), act_type='relu') layer3 = optimus.Affine(name='layer3', input_shape=layer2.output.shape, output_shape=( None, 1024, ), act_type='relu') chord_classifier = optimus.Softmax(name='chord_classifier', input_shape=layer3.output.shape, n_out=VOCAB, act_type='linear') all_nodes = [layer0, layer1, layer2, layer3, chord_classifier] # 1.1 Create Losses chord_nll = optimus.NegativeLogLikelihood(name="chord_nll") # 2. Define Edges trainer_edges = optimus.ConnectionManager([ (input_data, layer0.input), (layer0.output, layer1.input), (layer1.output, layer2.input), (layer2.output, layer3.input), (layer3.output, chord_classifier.input), (chord_classifier.output, chord_nll.likelihood), (chord_idx, chord_nll.target_idx) ]) update_manager = optimus.ConnectionManager([ (learning_rate, layer0.weights), (learning_rate, layer0.bias), (learning_rate, layer1.weights), (learning_rate, layer1.bias), (learning_rate, layer2.weights), (learning_rate, layer2.bias), (learning_rate, layer3.weights), (learning_rate, layer3.bias), (learning_rate, chord_classifier.weights), (learning_rate, chord_classifier.bias) ]) trainer = optimus.Graph(name=GRAPH_NAME, inputs=[input_data, chord_idx, learning_rate], nodes=all_nodes, connections=trainer_edges.connections, outputs=[optimus.Graph.TOTAL_LOSS], losses=[chord_nll], updates=update_manager.connections) for n in all_nodes: optimus.random_init(n.weights) optimus.random_init(n.bias) validator = optimus.Graph(name=GRAPH_NAME, inputs=[input_data, chord_idx], nodes=all_nodes, connections=trainer_edges.connections, outputs=[optimus.Graph.TOTAL_LOSS], losses=[chord_nll]) posterior = optimus.Output(name='posterior') predictor_edges = optimus.ConnectionManager([ (input_data, layer0.input), (layer0.output, layer1.input), (layer1.output, layer2.input), (layer2.output, layer3.input), (layer3.output, chord_classifier.input), (chord_classifier.output, posterior) ]) predictor = optimus.Graph(name=GRAPH_NAME, inputs=[input_data], nodes=all_nodes, connections=predictor_edges.connections, outputs=[posterior]) # 3. Create Data stash = biggie.Stash(args.training_file) partition_labels = D.util.partition(stash, D.chord_map) hyperparams = {learning_rate.name: LEARNING_RATE} valid_idx = [] for q in range(14): if q < 13: valid_idx.extend([q * 12 + r for r in range(12)]) else: valid_idx.append(q) stream = S.minibatch(D.create_uniform_chord_stream( stash, TIME_DIM, pitch_shift=False, vocab_dim=VOCAB, working_size=3, partition_labels=partition_labels), batch_size=BATCH_SIZE) driver = optimus.Driver(graph=trainer, name=args.trial_name + "_c%02d" % q, output_directory=args.model_directory) driver.fit(stream, hyperparams=hyperparams, max_iter=25000, **DRIVER_ARGS) validator_file = path.join(driver.output_directory, args.validator_file) optimus.save(validator, def_file=validator_file) predictor_file = path.join(driver.output_directory, args.predictor_file) optimus.save(predictor, def_file=predictor_file)
def main(args): # 1.1 Create Inputs input_data = optimus.Input( name='cqt', shape=(None, 1, TIME_DIM, PITCH_DIM)) chord_idx = optimus.Input( name='chord_idx', shape=(None,), dtype='int32') learning_rate = optimus.Input( name='learning_rate', shape=None) # 1.2 Create Nodes layer0 = optimus.Conv3D( name='layer0', input_shape=input_data.shape, weight_shape=(32, 1, 5, 19), pool_shape=(2, 3), act_type='relu') layer1 = optimus.Conv3D( name='layer1', input_shape=layer0.output.shape, weight_shape=(64, None, 5, 15), act_type='relu') layer2 = optimus.Conv3D( name='layer2', input_shape=layer1.output.shape, weight_shape=(128, None, 3, 15), act_type='relu') layer3 = optimus.Affine( name='layer3', input_shape=layer2.output.shape, output_shape=(None, 1024,), act_type='relu') chord_classifier = optimus.Softmax( name='chord_classifier', input_shape=layer3.output.shape, n_out=VOCAB, act_type='linear') all_nodes = [layer0, layer1, layer2, layer3, chord_classifier] # 1.1 Create Losses chord_nll = optimus.NegativeLogLikelihood( name="chord_nll") # 2. Define Edges trainer_edges = optimus.ConnectionManager([ (input_data, layer0.input), (layer0.output, layer1.input), (layer1.output, layer2.input), (layer2.output, layer3.input), (layer3.output, chord_classifier.input), (chord_classifier.output, chord_nll.likelihood), (chord_idx, chord_nll.target_idx)]) update_manager = optimus.ConnectionManager([ (learning_rate, layer0.weights), (learning_rate, layer0.bias), (learning_rate, layer1.weights), (learning_rate, layer1.bias), (learning_rate, layer2.weights), (learning_rate, layer2.bias), (learning_rate, layer3.weights), (learning_rate, layer3.bias), (learning_rate, chord_classifier.weights), (learning_rate, chord_classifier.bias)]) trainer = optimus.Graph( name=GRAPH_NAME, inputs=[input_data, chord_idx, learning_rate], nodes=all_nodes, connections=trainer_edges.connections, outputs=[optimus.Graph.TOTAL_LOSS], losses=[chord_nll], updates=update_manager.connections) for n in all_nodes: optimus.random_init(n.weights) optimus.random_init(n.bias) if args.init_param_file: trainer.load_param_values(args.init_param_file) posterior = optimus.Output( name='posterior') predictor_edges = optimus.ConnectionManager([ (input_data, layer0.input), (layer0.output, layer1.input), (layer1.output, layer2.input), (layer2.output, layer3.input), (layer3.output, chord_classifier.input), (chord_classifier.output, posterior)]) predictor = optimus.Graph( name=GRAPH_NAME, inputs=[input_data], nodes=all_nodes, connections=predictor_edges.connections, outputs=[posterior]) # 3. Create Data print "Loading %s" % args.training_file stash = biggie.Stash(args.training_file) # synth_stash = biggie.Stash(args.secondary_source) # stream = D.muxed_uniform_chord_stream( # stash, synth_stash, TIME_DIM, pitch_shift=0, vocab_dim=VOCAB, # working_size=10) stream = D.create_uniform_chord_stream( stash, TIME_DIM, pitch_shift=0, vocab_dim=VOCAB, working_size=10, valid_idx=range(60) + [156]) # if args.secondary_source: # print "Loading %s" % args.secondary_source # stash2 = biggie.Stash(args.secondary_source) # stream2 = D.create_uniform_chord_stream( # stash2, TIME_DIM, pitch_shift=0, vocab_dim=VOCAB, working_size=5) # stream = S.mux([stream, stream2], [0.5, 0.5]) stream = S.minibatch(stream, batch_size=BATCH_SIZE) print "Starting '%s'" % args.trial_name driver = optimus.Driver( graph=trainer, name=args.trial_name, output_directory=args.model_directory) hyperparams = {learning_rate.name: LEARNING_RATE} predictor_file = path.join(driver.output_directory, args.predictor_file) optimus.save(predictor, def_file=predictor_file) driver.fit(stream, hyperparams=hyperparams, **DRIVER_ARGS)
def main(args): # 1.1 Create Inputs input_data = optimus.Input( name='cqt', shape=(None, 6, TIME_DIM, 40)) target = optimus.Input( name='target', shape=(None, VOCAB)) learning_rate = optimus.Input( name='learning_rate', shape=None) # 1.2 Create Nodes layer0 = optimus.Conv3D( name='layer0', input_shape=input_data.shape, weight_shape=(32, None, 5, 5), pool_shape=(2, 3), act_type='relu') layer1 = optimus.Conv3D( name='layer1', input_shape=layer0.output.shape, weight_shape=(64, None, 5, 7), act_type='relu') layer2 = optimus.Conv3D( name='layer2', input_shape=layer1.output.shape, weight_shape=(128, None, 3, 6), act_type='relu') layer3 = optimus.Affine( name='layer3', input_shape=layer2.output.shape, output_shape=(None, 1024,), act_type='relu') chord_estimator = optimus.Affine( name='chord_estimator', input_shape=layer3.output.shape, output_shape=(None, VOCAB,), act_type='sigmoid') all_nodes = [layer0, layer1, layer2, layer3, chord_estimator] # 1.1 Create Losses chord_mse = optimus.MeanSquaredError( name="chord_mse") # 2. Define Edges trainer_edges = optimus.ConnectionManager([ (input_data, layer0.input), (layer0.output, layer1.input), (layer1.output, layer2.input), (layer2.output, layer3.input), (layer3.output, chord_estimator.input), (chord_estimator.output, chord_mse.prediction), (target, chord_mse.target)]) update_manager = optimus.ConnectionManager([ (learning_rate, layer0.weights), (learning_rate, layer0.bias), (learning_rate, layer1.weights), (learning_rate, layer1.bias), (learning_rate, layer2.weights), (learning_rate, layer2.bias), (learning_rate, layer3.weights), (learning_rate, layer3.bias), (learning_rate, chord_estimator.weights), (learning_rate, chord_estimator.bias)]) trainer = optimus.Graph( name=GRAPH_NAME, inputs=[input_data, target, learning_rate], nodes=all_nodes, connections=trainer_edges.connections, outputs=[optimus.Graph.TOTAL_LOSS], losses=[chord_mse], updates=update_manager.connections, momentum=None) for n in all_nodes: optimus.random_init(n.weights, 0, 0.01) optimus.random_init(n.bias, 0, 0.01) validator = optimus.Graph( name=GRAPH_NAME, inputs=[input_data, target], nodes=all_nodes, connections=trainer_edges.connections, outputs=[optimus.Graph.TOTAL_LOSS], losses=[chord_mse]) posterior = optimus.Output( name='posterior') predictor_edges = optimus.ConnectionManager([ (input_data, layer0.input), (layer0.output, layer1.input), (layer1.output, layer2.input), (layer2.output, layer3.input), (layer3.output, chord_estimator.input), (chord_estimator.output, posterior)]) predictor = optimus.Graph( name=GRAPH_NAME, inputs=[input_data], nodes=all_nodes, connections=predictor_edges.connections, outputs=[posterior]) # 3. Create Data print "Loading %s" % args.training_file stash = biggie.Stash(args.training_file) # partition_labels = json.load( # open("/home/ejhumphrey/Dropbox/tmp/train0_v2_merged_partition.json")) stream = D.create_uniform_chord_stream( stash, TIME_DIM, pitch_shift=False, vocab_dim=VOCAB, working_size=5) stream = S.minibatch( FX.chord_index_to_tonnetz_distance( FX.wrap_cqt(stream, length=40, stride=36), VOCAB), batch_size=BATCH_SIZE) print "Starting '%s'" % args.trial_name driver = optimus.Driver( graph=trainer, name=args.trial_name, output_directory=args.model_directory) hyperparams = {learning_rate.name: LEARNING_RATE} validator_file = path.join(driver.output_directory, args.validator_file) optimus.save(validator, def_file=validator_file) predictor_file = path.join(driver.output_directory, args.predictor_file) optimus.save(predictor, def_file=predictor_file) driver.fit(stream, hyperparams=hyperparams, **DRIVER_ARGS)
def main(args): # 1.1 Create Inputs input_data = optimus.Input(name='cqt', shape=(None, 1, TIME_DIM, PITCH_DIM)) chord_idx = optimus.Input(name='chord_idx', shape=(None, ), dtype='int32') learning_rate = optimus.Input(name='learning_rate', shape=None) # 1.2 Create Nodes input_scalar = optimus.Normalize(name='input_scalar', mode='l2', scale_factor=50.0) layer0 = optimus.Conv3D(name='layer0', input_shape=input_data.shape, weight_shape=(32, 1, 5, 19), pool_shape=(2, 3), act_type='relu') layer1 = optimus.Conv3D(name='layer1', input_shape=layer0.output.shape, weight_shape=(64, None, 5, 15), act_type='relu') layer2 = optimus.Conv3D(name='layer2', input_shape=layer1.output.shape, weight_shape=(128, None, 3, 15), act_type='relu') layer3 = optimus.Affine(name='layer3', input_shape=layer2.output.shape, output_shape=( None, 1024, ), act_type='relu') chord_classifier = optimus.Softmax(name='chord_classifier', input_shape=layer3.output.shape, n_out=VOCAB, act_type='linear') all_nodes = [ input_scalar, layer0, layer1, layer2, layer3, chord_classifier ] # 1.1 Create Losses chord_nll = optimus.NegativeLogLikelihood(name="chord_nll") # 2. Define Edges trainer_edges = optimus.ConnectionManager([ (input_data, input_scalar.input), (input_scalar.output, layer0.input), (layer0.output, layer1.input), (layer1.output, layer2.input), (layer2.output, layer3.input), (layer3.output, chord_classifier.input), (chord_classifier.output, chord_nll.likelihood), (chord_idx, chord_nll.target_idx) ]) update_manager = optimus.ConnectionManager([ (learning_rate, layer0.weights), (learning_rate, layer0.bias), (learning_rate, layer1.weights), (learning_rate, layer1.bias), (learning_rate, layer2.weights), (learning_rate, layer2.bias), (learning_rate, layer3.weights), (learning_rate, layer3.bias), (learning_rate, chord_classifier.weights), (learning_rate, chord_classifier.bias) ]) trainer = optimus.Graph(name=GRAPH_NAME, inputs=[input_data, chord_idx, learning_rate], nodes=all_nodes, connections=trainer_edges.connections, outputs=[optimus.Graph.TOTAL_LOSS], losses=[chord_nll], updates=update_manager.connections) for n in all_nodes[1:]: optimus.random_init(n.weights) optimus.random_init(n.bias) if args.init_param_file: param_values = dict(np.load(args.init_param_file)) keys = param_values.keys() for key in keys: if chord_classifier.name in key or layer3.name in key: print "skipping %s" % key del param_values[key] trainer.param_values = param_values posterior = optimus.Output(name='posterior') predictor_edges = optimus.ConnectionManager([ (input_data, input_scalar.input), (input_scalar.output, layer0.input), (layer0.output, layer1.input), (layer1.output, layer2.input), (layer2.output, layer3.input), (layer3.output, chord_classifier.input), (chord_classifier.output, posterior) ]) predictor = optimus.Graph(name=GRAPH_NAME, inputs=[input_data], nodes=all_nodes, connections=predictor_edges.connections, outputs=[posterior]) # 3. Create Data print "Loading %s" % args.training_file stash = biggie.Stash(args.training_file) stream = S.minibatch(D.create_uniform_chord_stream(stash, TIME_DIM, pitch_shift=0, vocab_dim=VOCAB, working_size=10), batch_size=BATCH_SIZE) print "Starting '%s'" % args.trial_name driver = optimus.Driver(graph=trainer, name=args.trial_name, output_directory=args.model_directory) hyperparams = {learning_rate.name: LEARNING_RATE} predictor_file = path.join(driver.output_directory, args.predictor_file) optimus.save(predictor, def_file=predictor_file) driver.fit(stream, hyperparams=hyperparams, **DRIVER_ARGS)
def main(args): # 1.1 Create Inputs input_data = optimus.Input( name='cqt', shape=(None, OCTAVE_DIM, TIME_DIM, PITCH_DIM)) chord_idx = optimus.Input( name='chord_idx', shape=(None,), dtype='int32') learning_rate = optimus.Input( name='learning_rate', shape=None) margin = optimus.Input( name='margin', shape=None) # 1.2 Create Nodes layer0 = optimus.Conv3D( name='layer0', input_shape=input_data.shape, weight_shape=(32, None, 5, 5), pool_shape=(2, 3), act_type='relu') layer1 = optimus.Conv3D( name='layer1', input_shape=layer0.output.shape, weight_shape=(64, None, 5, 7), act_type='relu') layer2 = optimus.Conv3D( name='layer2', input_shape=layer1.output.shape, weight_shape=(128, None, 3, 6), act_type='relu') layer3 = optimus.Affine( name='layer3', input_shape=layer2.output.shape, output_shape=(None, 1024,), act_type='relu') chord_estimator = optimus.Softmax( name='chord_estimator', input_shape=layer3.output.shape, output_shape=(None, VOCAB,), act_type='sigmoid') all_nodes = [layer0, layer1, layer2, layer3, chord_classifier] log = optimus.Log(name='log') neg = optimus.Gain(name='gain') neg.weight = np.array(-1) energy = optimus.SelectIndex(name='selector') loss = optimus.Mean(name='total_loss') # 1.1 Create Losses chord_margin = optimus.Margin( name="chord_margin", mode='max') # 2. Define Edges trainer_edges = optimus.ConnectionManager([ (input_data, layer0.input), (layer0.output, layer1.input), (layer1.output, layer2.input), (layer2.output, layer3.input), (layer3.output, chord_estimator.input), (chord_estimator.output, log.input), (log.output, neg.input), (neg.output, energy.input), (chord_idx, energy.index), (energy.output, loss.input)]) update_manager = optimus.ConnectionManager([ (learning_rate, layer0.weights), (learning_rate, layer0.bias), (learning_rate, layer1.weights), (learning_rate, layer1.bias), (learning_rate, layer2.weights), (learning_rate, layer2.bias), (learning_rate, layer3.weights), (learning_rate, layer3.bias), (learning_rate, chord_estimator.weights), (learning_rate, chord_estimator.bias)]) trainer = optimus.Graph( name=GRAPH_NAME, inputs=[input_data, chord_idx, learning_rate, margin], nodes=all_nodes, connections=trainer_edges.connections, outputs=[optimus.Graph.TOTAL_LOSS], loss=[loss.output], updates=update_manager.connections) for n in all_nodes: optimus.random_init(n.weights) optimus.random_init(n.bias) if args.init_param_file: print "Loading parameters: %s" % args.init_param_file trainer.load_param_values(args.init_param_file) for n in all_nodes[-2:]: optimus.random_init(n.weights) optimus.random_init(n.bias) posterior = optimus.Output( name='posterior') predictor_edges = optimus.ConnectionManager([ (input_data, layer0.input), (layer0.output, layer1.input), (layer1.output, layer2.input), (layer2.output, layer3.input), (layer3.output, chord_classifier.input), (chord_classifier.output, posterior)]) predictor = optimus.Graph( name=GRAPH_NAME, inputs=[input_data], nodes=all_nodes, connections=predictor_edges.connections, outputs=[posterior]) # 3. Create Data print "Loading %s" % args.training_file stash = biggie.Stash(args.training_file) stream = D.create_stash_stream( stash, TIME_DIM, pitch_shift=0, vocab_dim=VOCAB, pool_size=25) if args.secondary_source: stash2 = biggie.Stash(args.secondary_source) stream2 = D.create_uniform_chord_stream( stash2, TIME_DIM, pitch_shift=0, vocab_dim=VOCAB, working_size=5) stream = S.mux([stream, stream2], [0.5, 0.5]) stream = S.minibatch(stream, batch_size=BATCH_SIZE) print "Starting '%s'" % args.trial_name driver = optimus.Driver( graph=trainer, name=args.trial_name, output_directory=args.model_directory) hyperparams = {learning_rate.name: LEARNING_RATE, margin.name: MARGIN} predictor_file = path.join(driver.output_directory, args.predictor_file) optimus.save(predictor, def_file=predictor_file) driver.fit(stream, hyperparams=hyperparams, **DRIVER_ARGS)