예제 #1
0
def build_model():
    x_in = optimus.Input(name="x", shape=(None, 2))
    class_idx = optimus.Input(name="y", shape=(None, ), dtype='int32')
    learning_rate = optimus.Input(name='learning_rate', shape=None)

    layer0 = optimus.Affine(name='layer0',
                            input_shape=x_in.shape,
                            output_shape=(None, 100),
                            act_type='relu')

    layer1 = optimus.Affine(name='layer1',
                            input_shape=layer0.output.shape,
                            output_shape=(None, 100),
                            act_type='relu')

    classifier = optimus.Softmax(name='classifier',
                                 input_shape=layer1.output.shape,
                                 n_out=N_CLASSES,
                                 act_type='linear')

    nll = optimus.NegativeLogLikelihood(name="nll")
    posterior = optimus.Output(name='posterior')

    trainer_edges = optimus.ConnectionManager([
        (x_in, layer0.input), (layer0.output, layer1.input),
        (layer1.output, classifier.input), (classifier.output, nll.likelihood),
        (class_idx, nll.target_idx)
    ])

    update_manager = optimus.ConnectionManager([
        (learning_rate, layer0.weights), (learning_rate, layer0.bias),
        (learning_rate, layer1.weights), (learning_rate, layer1.bias),
        (learning_rate, classifier.weights), (learning_rate, classifier.bias)
    ])

    trainer = optimus.Graph(name='trainer',
                            inputs=[x_in, class_idx, learning_rate],
                            nodes=[layer0, layer1, classifier],
                            connections=trainer_edges.connections,
                            outputs=[optimus.Graph.TOTAL_LOSS],
                            losses=[nll],
                            updates=update_manager.connections)

    optimus.random_init(layer0.weights)
    optimus.random_init(layer1.weights)
    optimus.random_init(classifier.weights)

    predictor_edges = optimus.ConnectionManager([
        (x_in, layer0.input), (layer0.output, layer1.input),
        (layer1.output, classifier.input), (classifier.output, posterior)
    ])

    predictor = optimus.Graph(name='predictor',
                              inputs=[x_in],
                              nodes=[layer0, layer1, classifier],
                              connections=predictor_edges.connections,
                              outputs=[posterior])

    driver = optimus.Driver(graph=trainer, name='test')
    return driver, predictor
예제 #2
0
def main(args):
    # 1.1 Create Inputs
    input_data = optimus.Input(name='cqt', shape=(None, 1, TIME_DIM, 252))

    chord_idx = optimus.Input(name='chord_idx', shape=(None, ), dtype='int32')

    learning_rate = optimus.Input(name='learning_rate', shape=None)

    # 1.2 Create Nodes
    layer0 = optimus.Conv3D(name='layer0',
                            input_shape=input_data.shape,
                            weight_shape=(12, 1, 5, 19),
                            pool_shape=(1, 3),
                            act_type='relu')

    layer1 = optimus.Conv3D(name='layer1',
                            input_shape=layer0.output.shape,
                            weight_shape=(16, None, 5, 15),
                            act_type='relu')

    layer2 = optimus.Conv3D(name='layer2',
                            input_shape=layer1.output.shape,
                            weight_shape=(20, None, 2, 15),
                            act_type='relu')

    layer3 = optimus.Affine(name='layer3',
                            input_shape=layer2.output.shape,
                            output_shape=(
                                None,
                                512,
                            ),
                            act_type='relu')

    chord_classifier = optimus.Softmax(name='chord_classifier',
                                       input_shape=layer3.output.shape,
                                       n_out=VOCAB,
                                       act_type='linear')

    all_nodes = [layer0, layer1, layer2, layer3, chord_classifier]

    # 1.1 Create Losses
    chord_nll = optimus.NegativeLogLikelihood(name="chord_nll")

    # 2. Define Edges
    trainer_edges = optimus.ConnectionManager([
        (input_data, layer0.input), (layer0.output, layer1.input),
        (layer1.output, layer2.input), (layer2.output, layer3.input),
        (layer3.output, chord_classifier.input),
        (chord_classifier.output, chord_nll.likelihood),
        (chord_idx, chord_nll.target_idx)
    ])

    update_manager = optimus.ConnectionManager([
        (learning_rate, layer0.weights), (learning_rate, layer0.bias),
        (learning_rate, layer1.weights), (learning_rate, layer1.bias),
        (learning_rate, layer2.weights), (learning_rate, layer2.bias),
        (learning_rate, layer3.weights), (learning_rate, layer3.bias),
        (learning_rate, chord_classifier.weights),
        (learning_rate, chord_classifier.bias)
    ])

    trainer = optimus.Graph(name=GRAPH_NAME,
                            inputs=[input_data, chord_idx, learning_rate],
                            nodes=all_nodes,
                            connections=trainer_edges.connections,
                            outputs=[optimus.Graph.TOTAL_LOSS],
                            losses=[chord_nll],
                            updates=update_manager.connections)

    optimus.random_init(chord_classifier.weights)

    validator = optimus.Graph(name=GRAPH_NAME,
                              inputs=[input_data, chord_idx],
                              nodes=all_nodes,
                              connections=trainer_edges.connections,
                              outputs=[optimus.Graph.TOTAL_LOSS],
                              losses=[chord_nll])

    posterior = optimus.Output(name='posterior')

    predictor_edges = optimus.ConnectionManager([
        (input_data, layer0.input), (layer0.output, layer1.input),
        (layer1.output, layer2.input), (layer2.output, layer3.input),
        (layer3.output, chord_classifier.input),
        (chord_classifier.output, posterior)
    ])

    predictor = optimus.Graph(name=GRAPH_NAME,
                              inputs=[input_data],
                              nodes=all_nodes,
                              connections=predictor_edges.connections,
                              outputs=[posterior])

    # 3. Create Data
    source = optimus.Queue(optimus.File(args.training_file),
                           transformers=[
                               T.chord_sample(input_data.shape[2]),
                               T.pitch_shift(8),
                               T.map_to_index(VOCAB)
                           ],
                           **SOURCE_ARGS)

    driver = optimus.Driver(graph=trainer,
                            name=args.trial_name,
                            output_directory=args.model_directory)

    hyperparams = {learning_rate.name: LEARNING_RATE}

    driver.fit(source, hyperparams=hyperparams, **DRIVER_ARGS)

    validator_file = path.join(driver.output_directory, args.validator_file)
    optimus.save(validator, def_file=validator_file)

    predictor_file = path.join(driver.output_directory, args.predictor_file)
    optimus.save(predictor, def_file=predictor_file)
예제 #3
0
def build_model():
    # 1.1 Create Inputs
    input_data = optimus.Input(name='cqt',
                               shape=(None, OCTAVE_DIM, TIME_DIM, PITCH_DIM))

    fret_map = optimus.Input(name='fret_map', shape=(None, 6, 9))

    learning_rate = optimus.Input(name='learning_rate', shape=None)

    # 1.2 Create Nodes
    layer0 = optimus.Conv3D(name='layer0',
                            input_shape=input_data.shape,
                            weight_shape=(32, None, 5, 5),
                            pool_shape=(2, 3),
                            act_type='relu')

    layer1 = optimus.Conv3D(name='layer1',
                            input_shape=layer0.output.shape,
                            weight_shape=(64, None, 5, 7),
                            act_type='relu')

    layer2 = optimus.Conv3D(name='layer2',
                            input_shape=layer1.output.shape,
                            weight_shape=(128, None, 3, 6),
                            act_type='relu')

    layer3 = optimus.Affine(name='layer3',
                            input_shape=layer2.output.shape,
                            output_shape=(
                                None,
                                1024,
                            ),
                            act_type='relu')

    strings = []
    for n in range(6):
        strings.append(
            optimus.Affine(name='string_%d' % n,
                           input_shape=layer3.output.shape,
                           output_shape=(None, 9),
                           act_type='sigmoid'))

    param_nodes = [layer0, layer1, layer2, layer3] + strings

    # 1.1 Create Loss
    stack = optimus.Stack('stack', axes=[1, 0, 2])
    error = optimus.SquaredEuclidean(name='squared_error')
    loss = optimus.Mean(name='mse')

    # 2. Define Edges
    base_edges = [(input_data, layer0.input), (layer0.output, layer1.input),
                  (layer1.output, layer2.input), (layer2.output, layer3.input),
                  (layer3.output, strings[0].input),
                  (strings[0].output, stack.input_list),
                  (layer3.output, strings[1].input),
                  (strings[1].output, stack.input_list),
                  (layer3.output, strings[2].input),
                  (strings[2].output, stack.input_list),
                  (layer3.output, strings[3].input),
                  (strings[3].output, stack.input_list),
                  (layer3.output, strings[4].input),
                  (strings[4].output, stack.input_list),
                  (layer3.output, strings[5].input),
                  (strings[5].output, stack.input_list)]

    trainer_edges = optimus.ConnectionManager(base_edges + [(
        stack.output,
        error.input_a), (fret_map, error.input_b), (error.output, loss.input)])

    update_manager = optimus.ConnectionManager(
        map(lambda n: (learning_rate, n.weights), param_nodes) +
        map(lambda n: (learning_rate, n.bias), param_nodes))

    trainer = optimus.Graph(name=GRAPH_NAME,
                            inputs=[input_data, fret_map, learning_rate],
                            nodes=param_nodes + [stack, error, loss],
                            connections=trainer_edges.connections,
                            outputs=[loss.output],
                            loss=loss.output,
                            updates=update_manager.connections,
                            verbose=True)

    for n in param_nodes:
        optimus.random_init(n.weights)
        optimus.random_init(n.bias)

    fret_posterior = optimus.Output(name='fret_posterior')

    predictor_edges = optimus.ConnectionManager(base_edges +
                                                [(stack.output,
                                                  fret_posterior)])

    predictor = optimus.Graph(name=GRAPH_NAME,
                              inputs=[input_data],
                              nodes=param_nodes + [stack],
                              connections=predictor_edges.connections,
                              outputs=[fret_posterior])
    return trainer, predictor
예제 #4
0
def main(args):
    # 1.1 Create Inputs
    input_data = optimus.Input(name='cqt', shape=(None, 1, TIME_DIM, 252))

    chord_idx = optimus.Input(name='chord_idx', shape=(None, ), dtype='int32')

    learning_rate = optimus.Input(name='learning_rate', shape=None)

    margin = optimus.Input(name='margin', shape=None)

    margin_weight = optimus.Input(name='margin_weight', shape=None)

    nll_weight = optimus.Input(name='nll_weight', shape=None)

    # 1.2 Create Nodes
    layer0 = optimus.Conv3D(name='layer0',
                            input_shape=input_data.shape,
                            weight_shape=(12, 1, 9, 19),
                            pool_shape=(1, 3),
                            act_type='relu')

    layer1 = optimus.Conv3D(name='layer1',
                            input_shape=layer0.output.shape,
                            weight_shape=(16, None, 7, 15),
                            act_type='relu')

    layer2 = optimus.Conv3D(name='layer2',
                            input_shape=layer1.output.shape,
                            weight_shape=(20, None, 6, 15),
                            act_type='relu')

    layer3 = optimus.Affine(name='layer3',
                            input_shape=layer2.output.shape,
                            output_shape=(None, 512),
                            act_type='relu')

    chord_classifier = optimus.Affine(name='chord_classifier',
                                      input_shape=layer3.output.shape,
                                      output_shape=(None, VOCAB),
                                      act_type='sigmoid')

    all_nodes = [layer0, layer1, layer2, layer3, chord_classifier]

    # 1.1 Create Losses
    nll = optimus.NegativeLogLikelihood(name="nll", weighted=True)

    likelihood_margin = optimus.LikelihoodMargin(name="likelihood_margin",
                                                 mode='l1',
                                                 weighted=True)

    # likelihood_margin = optimus.NLLMargin(
    #     name="likelihood_margin",
    #     mode='l2',
    #     weighted=True)

    # 2. Define Edges
    trainer_edges = optimus.ConnectionManager([
        (input_data, layer0.input), (layer0.output, layer1.input),
        (layer1.output, layer2.input), (layer2.output, layer3.input),
        (layer3.output, chord_classifier.input),
        (chord_classifier.output, likelihood_margin.likelihood),
        (chord_idx, likelihood_margin.target_idx),
        (margin, likelihood_margin.margin),
        (margin_weight, likelihood_margin.weight),
        (chord_classifier.output, nll.likelihood), (chord_idx, nll.target_idx),
        (nll_weight, nll.weight)
    ])

    update_manager = optimus.ConnectionManager([
        (learning_rate, layer0.weights), (learning_rate, layer0.bias),
        (learning_rate, layer1.weights), (learning_rate, layer1.bias),
        (learning_rate, layer2.weights), (learning_rate, layer2.bias),
        (learning_rate, layer3.weights), (learning_rate, layer3.bias),
        (learning_rate, chord_classifier.weights),
        (learning_rate, chord_classifier.bias)
    ])

    trainer = optimus.Graph(name=GRAPH_NAME,
                            inputs=[
                                input_data, chord_idx, margin, learning_rate,
                                margin_weight, nll_weight
                            ],
                            nodes=all_nodes,
                            connections=trainer_edges.connections,
                            outputs=[optimus.Graph.TOTAL_LOSS],
                            losses=[nll, likelihood_margin],
                            updates=update_manager.connections)

    optimus.random_init(chord_classifier.weights)
    optimus.random_init(chord_classifier.bias)

    validator = optimus.Graph(
        name=GRAPH_NAME,
        inputs=[input_data, chord_idx, margin, margin_weight, nll_weight],
        nodes=all_nodes,
        connections=trainer_edges.connections,
        outputs=[optimus.Graph.TOTAL_LOSS],
        losses=[nll, likelihood_margin])

    posterior = optimus.Output(name='posterior')

    predictor_edges = optimus.ConnectionManager([
        (input_data, layer0.input), (layer0.output, layer1.input),
        (layer1.output, layer2.input), (layer2.output, layer3.input),
        (layer3.output, chord_classifier.input),
        (chord_classifier.output, posterior)
    ])

    predictor = optimus.Graph(name=GRAPH_NAME,
                              inputs=[input_data],
                              nodes=all_nodes,
                              connections=predictor_edges.connections,
                              outputs=[posterior])

    # 3. Create Data
    stash = biggie.Stash(args.training_file)
    stream = S.minibatch(D.create_uniform_quality_stream(stash,
                                                         TIME_DIM,
                                                         vocab_dim=VOCAB),
                         batch_size=50)

    driver = optimus.Driver(graph=trainer,
                            name=args.trial_name,
                            output_directory=args.model_directory)

    hyperparams = {
        learning_rate.name: LEARNING_RATE,
        margin_weight.name: MARGIN_WEIGHT,
        nll_weight.name: NLL_WEIGHT,
        margin.name: MARGIN
    }

    driver.fit(stream, hyperparams=hyperparams, **DRIVER_ARGS)

    validator_file = path.join(driver.output_directory, args.validator_file)
    optimus.save(validator, def_file=validator_file)

    predictor_file = path.join(driver.output_directory, args.predictor_file)
    optimus.save(predictor, def_file=predictor_file)
예제 #5
0
def main(args):
    # 1.1 Create Inputs
    input_data = optimus.Input(name='cqt', shape=(None, 6, TIME_DIM, 40))

    target = optimus.Input(name='target', shape=(None, VOCAB))

    learning_rate = optimus.Input(name='learning_rate', shape=None)

    # 1.2 Create Nodes
    layer0 = optimus.Conv3D(name='layer0',
                            input_shape=input_data.shape,
                            weight_shape=(32, None, 5, 5),
                            pool_shape=(2, 3),
                            act_type='relu')

    layer1 = optimus.Conv3D(name='layer1',
                            input_shape=layer0.output.shape,
                            weight_shape=(64, None, 5, 7),
                            act_type='relu')

    layer2 = optimus.Conv3D(name='layer2',
                            input_shape=layer1.output.shape,
                            weight_shape=(128, None, 3, 6),
                            act_type='relu')

    layer3 = optimus.Affine(name='layer3',
                            input_shape=layer2.output.shape,
                            output_shape=(
                                None,
                                1024,
                            ),
                            act_type='relu')

    chord_estimator = optimus.Affine(name='chord_estimator',
                                     input_shape=layer3.output.shape,
                                     output_shape=(
                                         None,
                                         VOCAB,
                                     ),
                                     act_type='sigmoid')

    all_nodes = [layer0, layer1, layer2, layer3, chord_estimator]

    # 1.1 Create Losses
    chord_xentropy = optimus.CrossEntropy(name="chord_xentropy")

    # 2. Define Edges
    trainer_edges = optimus.ConnectionManager([
        (input_data, layer0.input), (layer0.output, layer1.input),
        (layer1.output, layer2.input), (layer2.output, layer3.input),
        (layer3.output, chord_estimator.input),
        (chord_estimator.output, chord_xentropy.prediction),
        (target, chord_xentropy.target)
    ])

    update_manager = optimus.ConnectionManager([
        (learning_rate, layer0.weights), (learning_rate, layer0.bias),
        (learning_rate, layer1.weights), (learning_rate, layer1.bias),
        (learning_rate, layer2.weights), (learning_rate, layer2.bias),
        (learning_rate, layer3.weights), (learning_rate, layer3.bias),
        (learning_rate, chord_estimator.weights),
        (learning_rate, chord_estimator.bias)
    ])

    trainer = optimus.Graph(name=GRAPH_NAME,
                            inputs=[input_data, target, learning_rate],
                            nodes=all_nodes,
                            connections=trainer_edges.connections,
                            outputs=[optimus.Graph.TOTAL_LOSS],
                            losses=[chord_xentropy],
                            updates=update_manager.connections,
                            momentum=None)

    for n in all_nodes:
        optimus.random_init(n.weights, 0, 0.01)
        optimus.random_init(n.bias, 0, 0.01)

    validator = optimus.Graph(name=GRAPH_NAME,
                              inputs=[input_data, target],
                              nodes=all_nodes,
                              connections=trainer_edges.connections,
                              outputs=[optimus.Graph.TOTAL_LOSS],
                              losses=[chord_xentropy])

    posterior = optimus.Output(name='posterior')

    predictor_edges = optimus.ConnectionManager([
        (input_data, layer0.input), (layer0.output, layer1.input),
        (layer1.output, layer2.input), (layer2.output, layer3.input),
        (layer3.output, chord_estimator.input),
        (chord_estimator.output, posterior)
    ])

    predictor = optimus.Graph(name=GRAPH_NAME,
                              inputs=[input_data],
                              nodes=all_nodes,
                              connections=predictor_edges.connections,
                              outputs=[posterior])

    # 3. Create Data
    print "Loading %s" % args.training_file
    stash = biggie.Stash(args.training_file)
    # partition_labels = json.load(
    #     open("/home/ejhumphrey/Dropbox/tmp/train0_v2_merged_partition.json"))
    stream = D.create_uniform_chord_stream(stash,
                                           TIME_DIM,
                                           pitch_shift=False,
                                           vocab_dim=VOCAB,
                                           working_size=5)
    stream = S.minibatch(FX.chord_index_to_affinity_vectors(
        FX.wrap_cqt(stream, length=40, stride=36), VOCAB),
                         batch_size=BATCH_SIZE)

    print "Starting '%s'" % args.trial_name
    driver = optimus.Driver(graph=trainer,
                            name=args.trial_name,
                            output_directory=args.model_directory)

    hyperparams = {learning_rate.name: LEARNING_RATE}

    validator_file = path.join(driver.output_directory, args.validator_file)
    optimus.save(validator, def_file=validator_file)

    predictor_file = path.join(driver.output_directory, args.predictor_file)
    optimus.save(predictor, def_file=predictor_file)

    driver.fit(stream, hyperparams=hyperparams, **DRIVER_ARGS)
예제 #6
0
def main(args):
    # 1.1 Create Inputs
    input_data = optimus.Input(name='cqt', shape=(None, 1, TIME_DIM, 252))

    chord_idx = optimus.Input(name='chord_idx', shape=(None, ), dtype='int32')

    is_chord = optimus.Input(name='is_chord', shape=(None, ))

    learning_rate = optimus.Input(name='learning_rate', shape=None)

    # 1.2 Create Nodes
    layer0 = optimus.Conv3D(name='layer0',
                            input_shape=input_data.shape,
                            weight_shape=(12, 1, 9, 19),
                            pool_shape=(1, 3),
                            act_type='relu')

    layer1 = optimus.Conv3D(name='layer1',
                            input_shape=layer0.output.shape,
                            weight_shape=(16, None, 7, 15),
                            act_type='relu')

    layer2 = optimus.Conv3D(name='layer2',
                            input_shape=layer1.output.shape,
                            weight_shape=(20, None, 6, 15),
                            act_type='relu')

    layer3 = optimus.Affine(name='layer3',
                            input_shape=layer2.output.shape,
                            output_shape=(
                                None,
                                512,
                            ),
                            act_type='relu')

    chord_estimator = optimus.Affine(name='chord_estimator',
                                     input_shape=layer3.output.shape,
                                     output_shape=(None, VOCAB),
                                     act_type='sigmoid')

    all_nodes = [layer0, layer1, layer2, layer3, chord_estimator]

    # 1.1 Create Losses
    chord_mse = optimus.SparseMeanSquaredError(
        # chord_mse = optimus.SparseCrossEntropy(
        name="chord_mse")

    # 2. Define Edges
    trainer_edges = optimus.ConnectionManager([
        (input_data, layer0.input), (layer0.output, layer1.input),
        (layer1.output, layer2.input), (layer2.output, layer3.input),
        (layer3.output, chord_estimator.input),
        (chord_estimator.output, chord_mse.prediction),
        (chord_idx, chord_mse.index), (is_chord, chord_mse.target)
    ])

    update_manager = optimus.ConnectionManager([
        (learning_rate, layer0.weights), (learning_rate, layer0.bias),
        (learning_rate, layer1.weights), (learning_rate, layer1.bias),
        (learning_rate, layer2.weights), (learning_rate, layer2.bias),
        (learning_rate, layer3.weights), (learning_rate, layer3.bias),
        (learning_rate, chord_estimator.weights),
        (learning_rate, chord_estimator.bias)
    ])

    print "Building trainer"
    trainer = optimus.Graph(
        name=GRAPH_NAME,
        inputs=[input_data, chord_idx, is_chord, learning_rate],
        nodes=all_nodes,
        connections=trainer_edges.connections,
        outputs=[optimus.Graph.TOTAL_LOSS],
        losses=[chord_mse],
        updates=update_manager.connections)

    optimus.random_init(chord_estimator.weights)

    print "Building validator"
    validator = optimus.Graph(name=GRAPH_NAME,
                              inputs=[input_data, chord_idx, is_chord],
                              nodes=all_nodes,
                              connections=trainer_edges.connections,
                              outputs=[optimus.Graph.TOTAL_LOSS],
                              losses=[chord_mse])

    posterior = optimus.Output(name='posterior')

    predictor_edges = optimus.ConnectionManager([
        (input_data, layer0.input), (layer0.output, layer1.input),
        (layer1.output, layer2.input), (layer2.output, layer3.input),
        (layer3.output, chord_estimator.input),
        (chord_estimator.output, posterior)
    ])

    print "Building predictor"
    predictor = optimus.Graph(name=GRAPH_NAME,
                              inputs=[input_data],
                              nodes=all_nodes,
                              connections=predictor_edges.connections,
                              outputs=[posterior])

    # 3. Create Data
    print "Opening Data"
    stash = biggie.Stash(args.training_file)
    stream = S.minibatch(D.create_contrastive_quality_stream(stash,
                                                             TIME_DIM,
                                                             vocab_dim=VOCAB),
                         batch_size=50)

    driver = optimus.Driver(graph=trainer,
                            name=args.trial_name,
                            output_directory=args.model_directory)

    hyperparams = {learning_rate.name: LEARNING_RATE}

    print "...aaand we're off!"
    driver.fit(stream, hyperparams=hyperparams, **DRIVER_ARGS)

    validator_file = path.join(driver.output_directory, args.validator_file)
    optimus.save(validator, def_file=validator_file)

    predictor_file = path.join(driver.output_directory, args.predictor_file)
    optimus.save(predictor, def_file=predictor_file)
예제 #7
0
def iXc3_nll(n_in, size='large', use_dropout=False):
    k0, k1, k2 = dict(
        small=(10, 20, 40),
        med=(12, 24, 48),
        large=(16, 32, 64),
        xlarge=(20, 40, 80),
        xxlarge=(24, 48, 96))[size]

    n0, n1, n2 = {
        1: (1, 1, 1),
        4: (3, 2, 1),
        8: (5, 3, 2),
        10: (3, 3, 1),
        20: (5, 5, 1)}[n_in]

    p0, p1, p2 = {
        1: (1, 1, 1),
        4: (1, 1, 1),
        8: (1, 1, 1),
        10: (2, 2, 1),
        12: (2, 2, 1),
        20: (2, 2, 2)}[n_in]

    input_data = optimus.Input(
        name='cqt',
        shape=(None, 1, n_in, 252))

    indexes = []
    for name in 'EADGBe':
        indexes.append(optimus.Input(
            name='{0}_index'.format(name),
            shape=(None,),
            dtype='int32'))

    learning_rate = optimus.Input(
        name='learning_rate',
        shape=None)

    inputs = [input_data, learning_rate] + indexes

    dropout = optimus.Input(
        name='dropout',
        shape=None)

    # 1.2 Create Nodes
    layer0 = optimus.Conv3D(
        name='layer0',
        input_shape=input_data.shape,
        weight_shape=(k0, None, n0, 13),
        pool_shape=(p0, 3),
        act_type='relu')

    layer1 = optimus.Conv3D(
        name='layer1',
        input_shape=layer0.output.shape,
        weight_shape=(k1, None, n1, 37),
        pool_shape=(p1, 1),
        act_type='relu')

    layer2 = optimus.Conv3D(
        name='layer2',
        input_shape=layer1.output.shape,
        weight_shape=(k2, None, n2, 33),
        pool_shape=(p2, 1),
        act_type='relu')

    trainer_edges = []
    if use_dropout:
        layer0.enable_dropout()
        layer1.enable_dropout()
        layer2.enable_dropout()
        inputs += [dropout]
        trainer_edges += [(dropout, layer0.dropout),
                          (dropout, layer1.dropout),
                          (dropout, layer2.dropout)]

    predictors = []
    softmaxes = []
    for name in 'EADGBe':
        predictors.append(optimus.Affine(
            name='{0}_predictor'.format(name),
            input_shape=layer2.output.shape,
            output_shape=(None, NUM_FRETS),
            act_type='linear'))
        softmaxes.append(optimus.Softmax('{0}_softmax'.format(name)))

    stack = optimus.Stack('stacker', num_inputs=6, axes=(1, 0, 2))

    param_nodes = [layer0, layer1, layer2] + predictors
    misc_nodes = [stack] + softmaxes

    # 1.1 Create Loss
    likelihoods = []
    logs = []
    neg_ones = []
    for name in 'EADGBe':
        likelihoods.append(
            optimus.SelectIndex(name='{0}_likelihood'.format(name)))

        logs.append(optimus.Log(name='{0}_log'.format(name)))
        neg_ones.append(optimus.Multiply(name='{0}_gain'.format(name),
                                         weight_shape=None))
        neg_ones[-1].weight.value = -1.0

    loss_sum = optimus.Add(name='loss_sum', num_inputs=6)
    ave_loss = optimus.Mean(name='ave_loss')
    loss_nodes = likelihoods + logs + neg_ones + [loss_sum, ave_loss]
    total_loss = optimus.Output(name='total_loss')

    fretboard = optimus.Output(name='fretboard')

    # 2. Define Edges
    base_edges = [
        (input_data, layer0.input),
        (layer0.output, layer1.input),
        (layer1.output, layer2.input)]

    for p, smax in zip(predictors, softmaxes):
        base_edges += [
            (layer2.output, p.input),
            (p.output, smax.input),
        ]
    base_edges += [
        (softmaxes[0].output, stack.input_0),
        (softmaxes[1].output, stack.input_1),
        (softmaxes[2].output, stack.input_2),
        (softmaxes[3].output, stack.input_3),
        (softmaxes[4].output, stack.input_4),
        (softmaxes[5].output, stack.input_5),
        (stack.output, fretboard)
    ]

    for n, name in enumerate('EADGBe'):
        trainer_edges += [
            (softmaxes[n].output, likelihoods[n].input),
            (indexes[n], likelihoods[n].index),
            (likelihoods[n].output, logs[n].input),
            (logs[n].output, neg_ones[n].input)
        ]
    trainer_edges += [
        (neg_ones[0].output, loss_sum.input_0),
        (neg_ones[1].output, loss_sum.input_1),
        (neg_ones[2].output, loss_sum.input_2),
        (neg_ones[3].output, loss_sum.input_3),
        (neg_ones[4].output, loss_sum.input_4),
        (neg_ones[5].output, loss_sum.input_5),
        (loss_sum.output, ave_loss.input),
        (ave_loss.output, total_loss)
    ]

    update_manager = optimus.ConnectionManager(
        map(lambda n: (learning_rate, n.weights), param_nodes) +
        map(lambda n: (learning_rate, n.bias), param_nodes))

    classifier_init(param_nodes)

    trainer = optimus.Graph(
        name=GRAPH_NAME,
        inputs=inputs,
        nodes=param_nodes + misc_nodes + loss_nodes,
        connections=optimus.ConnectionManager(
            base_edges + trainer_edges).connections,
        outputs=[total_loss, fretboard],
        loss=total_loss,
        updates=update_manager.connections,
        verbose=True)

    if use_dropout:
        layer0.disable_dropout()
        layer1.disable_dropout()
        layer2.disable_dropout()

    predictor = optimus.Graph(
        name=GRAPH_NAME,
        inputs=[input_data],
        nodes=param_nodes + misc_nodes,
        connections=optimus.ConnectionManager(base_edges).connections,
        outputs=[fretboard],
        verbose=True)

    return trainer, predictor
예제 #8
0
def main(args):
    # 1.1 Create Inputs
    input_data = optimus.Input(
        name='cqt',
        shape=(None, 1, TIME_DIM, PITCH_DIM))

    target = optimus.Input(
        name='target',
        shape=(None, VOCAB))

    learning_rate = optimus.Input(
        name='learning_rate',
        shape=None)

    # 1.2 Create Nodes
    layer0 = optimus.Conv3D(
        name='layer0',
        input_shape=input_data.shape,
        weight_shape=(32, 1, 5, 19),
        pool_shape=(2, 3),
        act_type='relu')

    layer1 = optimus.Conv3D(
        name='layer1',
        input_shape=layer0.output.shape,
        weight_shape=(64, None, 5, 15),
        act_type='relu')

    layer2 = optimus.Conv3D(
        name='layer2',
        input_shape=layer1.output.shape,
        weight_shape=(128, None, 3, 15),
        act_type='relu')

    layer3 = optimus.Affine(
        name='layer3',
        input_shape=layer2.output.shape,
        output_shape=(None, 1024,),
        act_type='relu')

    chord_classifier = optimus.Affine(
        name='chord_classifier',
        input_shape=layer3.output.shape,
        output_shape=(None, 6,),
        act_type='sigmoid')

    all_nodes = [layer0, layer1, layer2, layer3, chord_classifier]

    # 1.1 Create Losses
    chord_mse = optimus.MeanSquaredError(
        name="chord_mse")

    # 2. Define Edges
    trainer_edges = optimus.ConnectionManager([
        (input_data, layer0.input),
        (layer0.output, layer1.input),
        (layer1.output, layer2.input),
        (layer2.output, layer3.input),
        (layer3.output, chord_classifier.input),
        (chord_classifier.output, chord_mse.prediction),
        (target, chord_mse.target)])

    update_manager = optimus.ConnectionManager([
        # (learning_rate, layer0.weights),
        # (learning_rate, layer0.bias),
        # (learning_rate, layer1.weights),
        # (learning_rate, layer1.bias),
        # (learning_rate, layer2.weights),
        # (learning_rate, layer2.bias),
        (learning_rate, layer3.weights),
        (learning_rate, layer3.bias),
        (learning_rate, chord_classifier.weights),
        (learning_rate, chord_classifier.bias)])

    trainer = optimus.Graph(
        name=GRAPH_NAME,
        inputs=[input_data, target, learning_rate],
        nodes=all_nodes,
        connections=trainer_edges.connections,
        outputs=[optimus.Graph.TOTAL_LOSS],
        losses=[chord_mse],
        updates=update_manager.connections)

    for n in all_nodes:
        optimus.random_init(n.weights, 0, 0.01)
        optimus.random_init(n.bias, 0, 0.01)

    if args.init_param_file:
        param_values = dict(np.load(args.init_param_file))
        keys = param_values.keys()
        for key in keys:
            if chord_classifier.name in key or layer3.name in key:
                print "skipping %s" % key
                del param_values[key]
        trainer.param_values = param_values

    posterior = optimus.Output(
        name='posterior')

    predictor_edges = optimus.ConnectionManager([
        (input_data, layer0.input),
        (layer0.output, layer1.input),
        (layer1.output, layer2.input),
        (layer2.output, layer3.input),
        (layer3.output, chord_classifier.input),
        (chord_classifier.output, posterior)])

    predictor = optimus.Graph(
        name=GRAPH_NAME,
        inputs=[input_data],
        nodes=all_nodes,
        connections=predictor_edges.connections,
        outputs=[posterior])

    # 3. Create Data
    print "Loading %s" % args.training_file
    stash = biggie.Stash(args.training_file)
    stream = D.create_uniform_chord_stream(
        stash, TIME_DIM, pitch_shift=0, vocab_dim=VOCAB, working_size=10)
    stream = S.minibatch(
        FX.chord_index_to_tonnetz(stream, vocab_dim=VOCAB),
        batch_size=BATCH_SIZE)

    print "Starting '%s'" % args.trial_name
    driver = optimus.Driver(
        graph=trainer,
        name=args.trial_name,
        output_directory=args.model_directory)

    hyperparams = {learning_rate.name: LEARNING_RATE}

    predictor_file = path.join(driver.output_directory, args.predictor_file)
    optimus.save(predictor, def_file=predictor_file)

    driver.fit(stream, hyperparams=hyperparams, **DRIVER_ARGS)
예제 #9
0
def i8c4b10_nll_dropout(size='large'):
    k0, k1, k2 = dict(
        large=(24, 48, 64))[size]

    input_data = optimus.Input(
        name='cqt',
        shape=(None, 1, 8, 252))

    chord_idx = optimus.Input(
        name='chord_idx',
        shape=(None,),
        dtype='int32')

    learning_rate = optimus.Input(
        name='learning_rate',
        shape=None)

    dropout = optimus.Input(
        name='dropout',
        shape=None)

    # 1.2 Create Nodes
    layer0 = optimus.Conv3D(
        name='layer0',
        input_shape=input_data.shape,
        weight_shape=(k0, None, 3, 13),
        pool_shape=(1, 3),
        act_type='relu')

    layer1 = optimus.Conv3D(
        name='layer1',
        input_shape=layer0.output.shape,
        weight_shape=(k1, None, 3, 37),
        act_type='relu')

    layer2 = optimus.Conv3D(
        name='layer2',
        input_shape=layer1.output.shape,
        weight_shape=(k2, None, 3, 33),
        act_type='relu')

    layer3 = optimus.Conv3D(
        name='layer3',
        input_shape=layer2.output.shape,
        weight_shape=(10, None, 2, 1),
        act_type='relu')

    chord_classifier = optimus.Conv3D(
        name='chord_classifier',
        input_shape=layer3.output.shape,
        weight_shape=(13, None, 1, 1),
        act_type='linear')

    flatten = optimus.Flatten('flatten', 2)

    null_classifier = optimus.Affine(
        name='null_classifier',
        input_shape=layer3.output.shape,
        output_shape=(None, 1),
        act_type='linear')

    cat = optimus.Concatenate('concatenate', num_inputs=2, axis=1)
    softmax = optimus.Softmax('softmax')

    param_nodes = [layer0, layer1, layer2, layer3,
                   null_classifier, chord_classifier]
    misc_nodes = [flatten, cat, softmax]

    # 1.1 Create Loss
    likelihoods = optimus.SelectIndex(name='likelihoods')

    log = optimus.Log(name='log')
    neg = optimus.Gain(name='gain')
    neg.weight.value = -1.0

    loss = optimus.Mean(name='negative_log_likelihood')
    loss_nodes = [likelihoods, log, neg, loss]
    total_loss = optimus.Output(name='total_loss')

    layer0.enable_dropout()
    layer1.enable_dropout()
    layer2.enable_dropout()

    # 2. Define Edges
    base_edges = [
        (input_data, layer0.input),
        (layer0.output, layer1.input),
        (layer1.output, layer2.input),
        (layer2.output, layer3.input),
        (layer3.output, chord_classifier.input),
        (layer3.output, null_classifier.input),
        (chord_classifier.output, flatten.input),
        (flatten.output, cat.input_0),
        (null_classifier.output, cat.input_1),
        (cat.output, softmax.input)]

    trainer_edges = optimus.ConnectionManager(
        base_edges + [
            (dropout, layer0.dropout),
            (dropout, layer1.dropout),
            (dropout, layer2.dropout),
            (softmax.output, likelihoods.input),
            (chord_idx, likelihoods.index),
            (likelihoods.output, log.input),
            (log.output, neg.input),
            (neg.output, loss.input),
            (loss.output, total_loss)])

    update_manager = optimus.ConnectionManager(
        map(lambda n: (learning_rate, n.weights), param_nodes[:-1]) +
        map(lambda n: (learning_rate, n.bias), param_nodes[:-1]))

    trainer = optimus.Graph(
        name=GRAPH_NAME,
        inputs=[input_data, chord_idx, learning_rate, dropout],
        nodes=param_nodes + misc_nodes + loss_nodes,
        connections=trainer_edges.connections,
        outputs=[total_loss],
        loss=total_loss,
        updates=update_manager.connections,
        verbose=True)

    classifier_init(param_nodes[:-1])

    semitones = L.semitone_matrix(157)[:13, 2:]
    chord_classifier.weights.value = semitones.reshape(13, 10, 1, 1)

    posterior = optimus.Output(name='posterior')

    predictor_edges = optimus.ConnectionManager(
        base_edges + [(softmax.output, posterior)])

    layer0.disable_dropout()
    layer1.disable_dropout()
    layer2.disable_dropout()

    predictor = optimus.Graph(
        name=GRAPH_NAME,
        inputs=[input_data],
        nodes=param_nodes + misc_nodes,
        connections=predictor_edges.connections,
        outputs=[posterior])

    return trainer, predictor
예제 #10
0
def wcqt_likelihood_wmoia(n_dim=VOCAB):
    input_data = optimus.Input(
        name='cqt',
        shape=(None, 6, TIME_DIM, 40))

    target = optimus.Input(
        name='target',
        shape=(None, 1))

    chord_idx = optimus.Input(
        name='chord_idx',
        shape=(None,),
        dtype='int32')

    learning_rate = optimus.Input(
        name='learning_rate',
        shape=None)

    # 1.2 Create Nodes
    layer0 = optimus.Conv3D(
        name='layer0',
        input_shape=input_data.shape,
        weight_shape=(32, None, 5, 5),
        pool_shape=(2, 3),
        act_type='relu')

    layer1 = optimus.Conv3D(
        name='layer1',
        input_shape=layer0.output.shape,
        weight_shape=(64, None, 5, 7),
        act_type='relu')

    layer2 = optimus.Conv3D(
        name='layer2',
        input_shape=layer1.output.shape,
        weight_shape=(128, None, 3, 6),
        act_type='relu')

    layer3 = optimus.Affine(
        name='layer3',
        input_shape=layer2.output.shape,
        output_shape=(None, 1024,),
        act_type='relu')

    chord_estimator = optimus.Affine(
        name='chord_estimator',
        input_shape=layer3.output.shape,
        output_shape=(None, n_dim),
        act_type='sigmoid')

    param_nodes = [layer0, layer1, layer2, layer3, chord_estimator]

    # 1.1 Create Loss
    likelihoods = optimus.SelectIndex('select')
    dimshuffle = optimus.Dimshuffle('dimshuffle', (0, 'x'))
    error = optimus.SquaredEuclidean(name='squared_error')
    main_loss = optimus.Mean(name='mean_squared_error')
    loss_nodes1 = [likelihoods, dimshuffle, error, main_loss]

    negone = optimus.Gain(name='negate')
    negone.weight.value = -1.0
    summer = optimus.Add(name='moia_sum')
    flatten = optimus.Sum('flatten', axis=1)
    dimshuffle2 = optimus.Dimshuffle('dimshuffle2', (0, 'x'))
    margin = optimus.RectifiedLinear(name='margin')
    weight = optimus.Multiply(name="margin_weight")
    margin_loss = optimus.Mean(name='margin_loss', axis=None)

    loss_nodes2 = [negone, summer, margin, flatten,
                   dimshuffle2, margin_loss, weight]
    total_loss = optimus.Add("total_loss")

    # 2. Define Edges
    base_edges = [
        (input_data, layer0.input),
        (layer0.output, layer1.input),
        (layer1.output, layer2.input),
        (layer2.output, layer3.input),
        (layer3.output, chord_estimator.input)]

    trainer_edges = optimus.ConnectionManager(
        base_edges + [
            (chord_estimator.output, likelihoods.input),
            (chord_idx, likelihoods.index),
            (likelihoods.output, dimshuffle.input),
            (dimshuffle.output, error.input_a),
            (target, error.input_b),
            (error.output, main_loss.input),
            # Margin loss
            (dimshuffle.output, negone.input),
            (negone.output, summer.input_list),
            (chord_estimator.output, summer.input_list),
            (summer.output, margin.input),
            (margin.output, flatten.input),
            (flatten.output, dimshuffle2.input),
            (dimshuffle2.output, weight.input_a),
            (target, weight.input_b),
            (weight.output, margin_loss.input),
            (margin_loss.output, total_loss.input_list),
            (main_loss.output, total_loss.input_list)])

    update_manager = optimus.ConnectionManager(
        map(lambda n: (learning_rate, n.weights), param_nodes) +
        map(lambda n: (learning_rate, n.bias), param_nodes))

    all_nodes = param_nodes + loss_nodes1 + loss_nodes2 + [total_loss]
    trainer = optimus.Graph(
        name=GRAPH_NAME,
        inputs=[input_data, target, chord_idx, learning_rate],
        nodes=all_nodes,
        connections=trainer_edges.connections,
        outputs=[total_loss.output],
        loss=total_loss.output,
        updates=update_manager.connections,
        verbose=True)

    for n in param_nodes:
        for p in n.params.values():
            optimus.random_init(p)

    posterior = optimus.Output(
        name='posterior')

    predictor_edges = optimus.ConnectionManager(
        base_edges + [(chord_estimator.output, posterior)])

    predictor = optimus.Graph(
        name=GRAPH_NAME,
        inputs=[input_data],
        nodes=param_nodes,
        connections=predictor_edges.connections,
        outputs=[posterior])

    return trainer, predictor
예제 #11
0
def main(args):
    # 1.1 Create Inputs
    input_data = optimus.Input(
        name='cqt',
        shape=(None, 1, TIME_DIM, 252))

    target_chroma = optimus.Input(
        name='target_chroma',
        shape=(None, 12),)

    learning_rate = optimus.Input(
        name='learning_rate',
        shape=None)

    # 1.2 Create Nodes
    layer0 = optimus.Conv3D(
        name='layer0',
        input_shape=input_data.shape,
        weight_shape=(12, 1, 9, 19),
        pool_shape=(1, 3),
        act_type='relu')

    layer1 = optimus.Conv3D(
        name='layer1',
        input_shape=layer0.output.shape,
        weight_shape=(16, None, 7, 15),
        act_type='relu')

    layer2 = optimus.Conv3D(
        name='layer2',
        input_shape=layer1.output.shape,
        weight_shape=(20, None, 6, 15),
        act_type='relu')

    layer3 = optimus.Affine(
        name='layer3',
        input_shape=layer2.output.shape,
        output_shape=(None, 512,),
        act_type='relu')

    layer4 = optimus.Affine(
        name='layer4',
        input_shape=layer3.output.shape,
        output_shape=(None, 12,),
        act_type='sigmoid')

    all_nodes = [layer0, layer1, layer2, layer3, layer4]

    # 1.1 Create Losses
    chroma_xentropy = optimus.CrossEntropy(
        name="chroma_xentropy")

    # 2. Define Edges
    trainer_edges = optimus.ConnectionManager([
        (input_data, layer0.input),
        (layer0.output, layer1.input),
        (layer1.output, layer2.input),
        (layer2.output, layer3.input),
        (layer3.output, layer4.input),
        (layer4.output, chroma_xentropy.prediction),
        (target_chroma, chroma_xentropy.target)])

    update_manager = optimus.ConnectionManager([
        (learning_rate, layer0.weights),
        (learning_rate, layer0.bias),
        (learning_rate, layer1.weights),
        (learning_rate, layer1.bias),
        (learning_rate, layer2.weights),
        (learning_rate, layer2.bias),
        (learning_rate, layer3.weights),
        (learning_rate, layer3.bias),
        (learning_rate, layer4.weights),
        (learning_rate, layer4.bias)])

    trainer = optimus.Graph(
        name=GRAPH_NAME,
        inputs=[input_data, target_chroma, learning_rate],
        nodes=all_nodes,
        connections=trainer_edges.connections,
        outputs=[optimus.Graph.TOTAL_LOSS],
        losses=[chroma_xentropy],
        updates=update_manager.connections)

    optimus.random_init(layer0.weights)
    optimus.random_init(layer1.weights)
    optimus.random_init(layer2.weights)
    optimus.random_init(layer3.weights)
    optimus.random_init(layer4.weights)

    validator = optimus.Graph(
        name=GRAPH_NAME,
        inputs=[input_data, target_chroma],
        nodes=all_nodes,
        connections=trainer_edges.connections,
        outputs=[optimus.Graph.TOTAL_LOSS],
        losses=[chroma_xentropy])

    chroma_out = optimus.Output(
        name='chroma')

    predictor_edges = optimus.ConnectionManager([
        (input_data, layer0.input),
        (layer0.output, layer1.input),
        (layer1.output, layer2.input),
        (layer2.output, layer3.input),
        (layer3.output, layer4.input),
        (layer4.output, chroma_out)])

    predictor = optimus.Graph(
        name=GRAPH_NAME,
        inputs=[input_data],
        nodes=all_nodes,
        connections=predictor_edges.connections,
        outputs=[chroma_out])

    # 3. Create Data
    stash = biggie.Stash(args.training_file)
    stream = S.minibatch(
        D.uniform_quality_chroma_stream(stash, TIME_DIM),
        batch_size=50)

    driver = optimus.Driver(
        graph=trainer,
        name=args.trial_name,
        output_directory=args.model_directory)

    hyperparams = {learning_rate.name: LEARNING_RATE}

    driver.fit(stream, hyperparams=hyperparams, **DRIVER_ARGS)

    validator_file = path.join(driver.output_directory, args.validator_file)
    optimus.save(validator, def_file=validator_file)

    predictor_file = path.join(driver.output_directory, args.predictor_file)
    optimus.save(predictor, def_file=predictor_file)
예제 #12
0
파일: models.py 프로젝트: saifrahmed/dl4mir
def iX_c3f2_oY(n_in, n_out, size='large'):
    # Kernel shapes
    k0, k1, k2, k3 = dict(
        small=(10, 20, 40, 96),
        med=(12, 24, 48, 128),
        large=(16, 32, 64, 192),
        xlarge=(20, 40, 80, 256),
        xxlarge=(24, 48, 96, 512))[size]

    # Input dimensions
    n0, n1, n2 = {
        1: (1, 1, 1),
        4: (3, 2, 1),
        8: (5, 3, 2),
        10: (3, 3, 1),
        20: (5, 5, 1)}[n_in]

    # Pool shapes
    p0, p1, p2 = {
        1: (1, 1, 1),
        4: (1, 1, 1),
        8: (1, 1, 1),
        10: (2, 2, 1),
        12: (2, 2, 1),
        20: (2, 2, 2)}[n_in]

    input_data = optimus.Input(
        name='cqt',
        shape=(None, 1, n_in, 192))

    input_data_2 = optimus.Input(
        name='cqt_2',
        shape=(None, 1, n_in, 192))

    score = optimus.Input(
        name='score',
        shape=(None,))

    learning_rate = optimus.Input(
        name='learning_rate',
        shape=None)

    sim_margin = optimus.Input(
        name='sim_margin',
        shape=None)

    diff_margin = optimus.Input(
        name='diff_margin',
        shape=None)

    inputs = [input_data, input_data_2, score, learning_rate,
              sim_margin, diff_margin]

    # 1.2 Create Nodes
    logscale = optimus.Log("logscale", 1.0)
    layer0 = optimus.Conv3D(
        name='layer0',
        input_shape=input_data.shape,
        weight_shape=(k0, None, n0, 13),
        pool_shape=(p0, 2),
        act_type='tanh')

    layer1 = optimus.Conv3D(
        name='layer1',
        input_shape=layer0.output.shape,
        weight_shape=(k1, None, n1, 11),
        pool_shape=(p1, 2),
        act_type='tanh')

    layer2 = optimus.Conv3D(
        name='layer2',
        input_shape=layer1.output.shape,
        weight_shape=(k2, None, n2, 9),
        pool_shape=(p2, 2),
        act_type='tanh')

    layer3 = optimus.Affine(
        name='layer3',
        input_shape=layer2.output.shape,
        output_shape=(None, k3),
        act_type='tanh')

    layer4 = optimus.Affine(
        name='layer4',
        input_shape=layer3.output.shape,
        output_shape=(None, n_out),
        act_type='linear')

    param_nodes = [layer0, layer1, layer2, layer3, layer4]

    # 1.1 Create cloned nodes
    logscale_2 = logscale.clone("logscale_2")
    layer0_2 = layer0.clone('layer0_2')
    layer1_2 = layer1.clone('layer1_2')
    layer2_2 = layer2.clone('layer2_2')
    layer3_2 = layer3.clone('layer3_2')
    layer4_2 = layer4.clone('layer4_2')

    param_nodes_2 = [layer0_2, layer1_2, layer2_2, layer3_2, layer4_2]

    # 1.2 Create Loss
    # ---------------
    #  sim_cost = y*hwr(D - sim_margin)^2
    #  diff_cost = (1 - y) * hwr(diff_margin - D)^2
    #  total = ave(sim_cost + diff_cost)
    sqdistance = optimus.SquaredEuclidean(name='euclidean')
    distance = optimus.Sqrt(name='sqrt')

    # Sim terms
    sim_margin_sum = optimus.Add(name="sim_margin_sum", num_inputs=2)
    sim_hwr = optimus.RectifiedLinear(name="sim_hwr")
    sim_sqhwr = optimus.Power(name='sim_sqhwr', exponent=2.0)
    sim_cost = optimus.Product(name="sim_cost")

    # Diff terms
    neg_distance = optimus.Multiply(name='neg_distance', weight_shape=None)
    neg_distance.weight.value = -1.0
    diff_margin_sum = optimus.Add(name="diff_margin_sum", num_inputs=2)
    diff_hwr = optimus.RectifiedLinear(name="diff_hwr")
    diff_sqhwr = optimus.Power(name='diff_sqhwr', exponent=2.0)

    pos_one = optimus.Constant(name='pos_one', shape=None)
    pos_one.data.value = 1.0
    neg_score = optimus.Multiply(name='neg_score', weight_shape=None)
    neg_score.weight.value = -1.0

    diff_selector = optimus.Add("diff_selector", num_inputs=2)
    diff_cost = optimus.Product(name='diff_cost')

    total_cost = optimus.Add('total_cost', num_inputs=2)
    loss = optimus.Mean(name='loss')

    loss_nodes = [sqdistance, distance,
                  sim_margin_sum, sim_hwr, sim_sqhwr, sim_cost,
                  neg_distance, diff_margin_sum, diff_hwr, diff_sqhwr,
                  pos_one, neg_score, diff_selector, diff_cost,
                  total_cost, loss]

    # Graph outputs
    total_loss = optimus.Output(name='total_loss')
    embedding = optimus.Output(name='embedding')
    embedding_2 = optimus.Output(name='embedding_2')

    # 2. Define Edges
    base_edges = [
        (input_data, logscale.input),
        (logscale.output, layer0.input),
        (layer0.output, layer1.input),
        (layer1.output, layer2.input),
        (layer2.output, layer3.input),
        (layer3.output, layer4.input),
        (layer4.output, embedding)]

    base_edges_2 = [
        (input_data_2, logscale_2.input),
        (logscale_2.output, layer0_2.input),
        (layer0_2.output, layer1_2.input),
        (layer1_2.output, layer2_2.input),
        (layer2_2.output, layer3_2.input),
        (layer3_2.output, layer4_2.input),
        (layer4_2.output, embedding_2)]

    cost_edges = [
        (layer4.output, sqdistance.input_a),
        (layer4_2.output, sqdistance.input_b),
        (sqdistance.output, distance.input),
        # Sim terms
        (score, sim_cost.input_a),
        (distance.output, sim_margin_sum.input_0),
        (sim_margin, sim_margin_sum.input_1),
        (sim_margin_sum.output, sim_hwr.input),
        (sim_hwr.output, sim_sqhwr.input),
        (sim_sqhwr.output, sim_cost.input_b),
        (sim_cost.output, total_cost.input_0),
        # Diff terms
        # - margin term
        (distance.output, neg_distance.input),
        (diff_margin, diff_margin_sum.input_0),
        (neg_distance.output, diff_margin_sum.input_1),
        (diff_margin_sum.output, diff_hwr.input),
        (diff_hwr.output, diff_sqhwr.input),
        # - score selector
        (score, neg_score.input),
        (pos_one.output, diff_selector.input_0),
        (neg_score.output, diff_selector.input_1),
        # - product
        (diff_selector.output, diff_cost.input_a),
        (diff_sqhwr.output, diff_cost.input_b),
        (diff_cost.output, total_cost.input_1)]

    trainer_edges = optimus.ConnectionManager(
        base_edges + base_edges_2 + cost_edges + [
            # Combined
            (total_cost.output, loss.input),
            (loss.output, total_loss)])

    update_manager = optimus.ConnectionManager(
        map(lambda n: (learning_rate, n.weights), param_nodes) +
        map(lambda n: (learning_rate, n.bias), param_nodes))

    param_init(param_nodes)

    misc_nodes = [logscale, logscale_2]

    trainer = optimus.Graph(
        name=GRAPH_NAME,
        inputs=inputs,
        nodes=param_nodes + param_nodes_2 + loss_nodes + misc_nodes,
        connections=trainer_edges.connections,
        outputs=[total_loss, embedding, embedding_2],
        loss=total_loss,
        updates=update_manager.connections,
        verbose=False)

    pw_cost = optimus.Output(name='pw_cost')
    zerofilt_edges = optimus.ConnectionManager(
        base_edges + base_edges_2 + cost_edges + [
            # Combined
            (total_cost.output, pw_cost)])

    zerofilter = optimus.Graph(
        name=GRAPH_NAME + "_zerofilter",
        inputs=[input_data, input_data_2, score, sim_margin, diff_margin],
        nodes=param_nodes + param_nodes_2 + loss_nodes[:-1] + misc_nodes,
        connections=zerofilt_edges.connections,
        outputs=[pw_cost, embedding, embedding_2],
        verbose=False)

    predictor = optimus.Graph(
        name=GRAPH_NAME,
        inputs=[input_data],
        nodes=param_nodes + [logscale],
        connections=optimus.ConnectionManager(base_edges).connections,
        outputs=[embedding],
        verbose=False)

    return trainer, predictor, zerofilter
예제 #13
0
def main(args):
    # 1.1 Create Inputs
    input_data = optimus.Input(
        name='cqt',
        shape=(None, 1, TIME_DIM, 252))

    chord_idx = optimus.Input(
        name='chord_idx',
        shape=(None,),
        dtype='int32')

    learning_rate = optimus.Input(
        name='learning_rate',
        shape=None)

    limiter_weight = optimus.Input(
        name='limiter_weight',
        shape=None)

    likelihood_threshold = optimus.Input(
        name='likelihood_threshold',
        shape=None)

    # 1.2 Create Nodes
    layer0 = optimus.Conv3D(
        name='layer0',
        input_shape=input_data.shape,
        weight_shape=(12, 1, 9, 19),
        pool_shape=(1, 3),
        act_type='relu')

    layer1 = optimus.Conv3D(
        name='layer1',
        input_shape=layer0.output.shape,
        weight_shape=(16, None, 7, 15),
        act_type='relu')

    layer2 = optimus.Conv3D(
        name='layer2',
        input_shape=layer1.output.shape,
        weight_shape=(20, None, 6, 15),
        act_type='relu')

    layer3 = optimus.Affine(
        name='layer3',
        input_shape=layer2.output.shape,
        output_shape=(None, 512,),
        act_type='relu')

    chord_classifier = optimus.Softmax(
        name='chord_classifier',
        input_shape=layer3.output.shape,
        n_out=VOCAB,
        act_type='linear')

    all_nodes = [layer0, layer1, layer2, layer3, chord_classifier]

    # 1.1 Create Losses
    chord_nll = optimus.NegativeLogLikelihood(
        name="chord_nll")

    max_likelihood = optimus.Max(
        name="max_likelihood")

    # 2. Define Edges
    trainer_edges = optimus.ConnectionManager([
        (input_data, layer0.input),
        (layer0.output, layer1.input),
        (layer1.output, layer2.input),
        (layer2.output, layer3.input),
        (layer3.output, chord_classifier.input),
        (chord_classifier.output, chord_nll.likelihood),
        (chord_idx, chord_nll.target_idx),
        (chord_classifier.output, max_likelihood.input),
        (limiter_weight, max_likelihood.weight),
        (likelihood_threshold, max_likelihood.threshold)])

    update_manager = optimus.ConnectionManager([
        (learning_rate, layer0.weights),
        (learning_rate, layer0.bias),
        (learning_rate, layer1.weights),
        (learning_rate, layer1.bias),
        (learning_rate, layer2.weights),
        (learning_rate, layer2.bias),
        (learning_rate, layer3.weights),
        (learning_rate, layer3.bias),
        (learning_rate, chord_classifier.weights),
        (learning_rate, chord_classifier.bias)])

    print "Trainer"
    trainer = optimus.Graph(
        name=GRAPH_NAME,
        inputs=[input_data, chord_idx, learning_rate,
                limiter_weight, likelihood_threshold],
        nodes=all_nodes,
        connections=trainer_edges.connections,
        outputs=[optimus.Graph.TOTAL_LOSS],
        losses=[chord_nll, max_likelihood],
        updates=update_manager.connections)

    optimus.random_init(chord_classifier.weights)

    print "Validator"
    validator = optimus.Graph(
        name=GRAPH_NAME,
        inputs=[input_data, chord_idx, limiter_weight, likelihood_threshold],
        nodes=all_nodes,
        connections=trainer_edges.connections,
        outputs=[optimus.Graph.TOTAL_LOSS],
        losses=[chord_nll, max_likelihood])

    posterior = optimus.Output(
        name='posterior')

    predictor_edges = optimus.ConnectionManager([
        (input_data, layer0.input),
        (layer0.output, layer1.input),
        (layer1.output, layer2.input),
        (layer2.output, layer3.input),
        (layer3.output, chord_classifier.input),
        (chord_classifier.output, posterior)])

    print "Predictor"
    predictor = optimus.Graph(
        name=GRAPH_NAME,
        inputs=[input_data],
        nodes=all_nodes,
        connections=predictor_edges.connections,
        outputs=[posterior])

    # 3. Create Data
    stash = biggie.Stash(args.training_file)
    stream = S.minibatch(
        D.create_uniform_quality_stream(stash, TIME_DIM, vocab_dim=VOCAB),
        batch_size=50)

    driver = optimus.Driver(
        graph=trainer,
        name=args.trial_name,
        output_directory=args.model_directory)

    hyperparams = {learning_rate.name: LEARNING_RATE,
                   likelihood_threshold.name: LIKELIHOOD_THRESHOLD,
                   limiter_weight.name: LIMITER_WEIGHT}

    driver.fit(stream, hyperparams=hyperparams, **DRIVER_ARGS)

    validator_file = path.join(driver.output_directory, args.validator_file)
    optimus.save(validator, def_file=validator_file)

    predictor_file = path.join(driver.output_directory, args.predictor_file)
    optimus.save(predictor, def_file=predictor_file)
예제 #14
0
def main(args):
    # 1.1 Create Inputs
    input_data = optimus.Input(name='cqt', shape=(None, 1, TIME_DIM, 252))

    target_chroma = optimus.Input(
        name='target_chroma',
        shape=(None, 12),
    )

    learning_rate = optimus.Input(name='learning_rate', shape=None)

    # 1.2 Create Nodes
    layer0 = optimus.Conv3D(name='layer0',
                            input_shape=input_data.shape,
                            weight_shape=(12, 1, 3, 19),
                            pool_shape=(1, 3),
                            act_type='relu')

    layer1 = optimus.Conv3D(name='layer1',
                            input_shape=layer0.output.shape,
                            weight_shape=(16, None, 3, 15),
                            act_type='relu')

    layer2 = optimus.Conv3D(name='layer2',
                            input_shape=layer1.output.shape,
                            weight_shape=(20, None, 1, 15),
                            act_type='relu')

    layer3 = optimus.Affine(name='layer3',
                            input_shape=layer2.output.shape,
                            output_shape=(
                                None,
                                12,
                            ),
                            act_type='sigmoid')

    all_nodes = [layer0, layer1, layer2, layer3]

    # 1.1 Create Losses
    chroma_xentropy = optimus.CrossEntropy(name="chroma_xentropy")

    # 2. Define Edges
    trainer_edges = optimus.ConnectionManager([
        (input_data, layer0.input), (layer0.output, layer1.input),
        (layer1.output, layer2.input), (layer2.output, layer3.input),
        (layer3.output, chroma_xentropy.prediction),
        (target_chroma, chroma_xentropy.target)
    ])

    update_manager = optimus.ConnectionManager([
        (learning_rate, layer0.weights), (learning_rate, layer0.bias),
        (learning_rate, layer1.weights), (learning_rate, layer1.bias),
        (learning_rate, layer2.weights), (learning_rate, layer2.bias),
        (learning_rate, layer3.weights), (learning_rate, layer3.bias)
    ])

    trainer = optimus.Graph(name=GRAPH_NAME,
                            inputs=[input_data, target_chroma, learning_rate],
                            nodes=all_nodes,
                            connections=trainer_edges.connections,
                            outputs=[optimus.Graph.TOTAL_LOSS],
                            losses=[chroma_xentropy],
                            updates=update_manager.connections)

    optimus.random_init(layer0.weights)
    optimus.random_init(layer1.weights)
    optimus.random_init(layer2.weights)
    optimus.random_init(layer3.weights)

    validator = optimus.Graph(name=GRAPH_NAME,
                              inputs=[input_data, target_chroma],
                              nodes=all_nodes,
                              connections=trainer_edges.connections,
                              outputs=[optimus.Graph.TOTAL_LOSS],
                              losses=[chroma_xentropy])

    chroma_out = optimus.Output(name='chroma')

    predictor_edges = optimus.ConnectionManager([(input_data, layer0.input),
                                                 (layer0.output, layer1.input),
                                                 (layer1.output, layer2.input),
                                                 (layer2.output, layer3.input),
                                                 (layer3.output, chroma_out)])

    predictor = optimus.Graph(name=GRAPH_NAME,
                              inputs=[input_data],
                              nodes=all_nodes,
                              connections=predictor_edges.connections,
                              outputs=[chroma_out])

    # 3. Create Data
    source = optimus.Queue(optimus.File(args.training_file),
                           transformers=[
                               T.chord_sample(input_data.shape[2]),
                               T.pitch_shift(8), T.map_to_chroma
                           ],
                           **SOURCE_ARGS)

    driver = optimus.Driver(graph=trainer,
                            name=args.trial_name,
                            output_directory=args.model_directory)

    hyperparams = {learning_rate.name: LEARNING_RATE}

    driver.fit(source, hyperparams=hyperparams, **DRIVER_ARGS)

    validator_file = path.join(driver.output_directory, args.validator_file)
    optimus.save(validator, def_file=validator_file)

    predictor_file = path.join(driver.output_directory, args.predictor_file)
    optimus.save(predictor, def_file=predictor_file)
예제 #15
0
def iXc3_fc_nll(n_in, size='large', use_dropout=False):
    k0, k1, k2 = dict(
        small=(10, 20, 40),
        med=(12, 24, 48),
        large=(16, 32, 64),
        xlarge=(20, 40, 80),
        xxlarge=(24, 48, 96))[size]

    n0, n1, n2 = {
        1: (1, 1, 1),
        4: (3, 2, 1),
        8: (5, 3, 2),
        10: (3, 3, 1),
        20: (5, 5, 1)}[n_in]

    p0, p1, p2 = {
        1: (1, 1, 1),
        4: (1, 1, 1),
        8: (1, 1, 1),
        10: (2, 2, 1),
        12: (2, 2, 1),
        20: (2, 2, 2)}[n_in]

    input_data = optimus.Input(
        name='data',
        shape=(None, 1, n_in, 252))

    chord_idx = optimus.Input(
        name='class_idx',
        shape=(None,),
        dtype='int32')

    learning_rate = optimus.Input(
        name='learning_rate',
        shape=None)

    inputs = [input_data, chord_idx, learning_rate]

    dropout = optimus.Input(
        name='dropout',
        shape=None)

    # 1.2 Create Nodes
    layer0 = optimus.Conv3D(
        name='layer0',
        input_shape=input_data.shape,
        weight_shape=(k0, None, n0, 13),
        pool_shape=(p0, 3),
        act_type='relu')

    layer1 = optimus.Conv3D(
        name='layer1',
        input_shape=layer0.output.shape,
        weight_shape=(k1, None, n1, 37),
        pool_shape=(p1, 1),
        act_type='relu')

    layer2 = optimus.Conv3D(
        name='layer2',
        input_shape=layer1.output.shape,
        weight_shape=(k2, None, n2, 33),
        pool_shape=(p2, 1),
        act_type='relu')

    dropout_edges = []
    if use_dropout:
        layer0.enable_dropout()
        layer1.enable_dropout()
        layer2.enable_dropout()
        inputs += [dropout]
        dropout_edges += [(dropout, layer0.dropout),
                          (dropout, layer1.dropout),
                          (dropout, layer2.dropout)]

    chord_classifier = optimus.Affine(
        name='chord_classifier',
        input_shape=layer2.output.shape,
        output_shape=(None, VOCAB),
        act_type='softmax')

    prior = optimus.Multiply("prior", weight_shape=(1, 157), broadcast=[0])
    prior.weight.value = np.ones([1, 157])

    param_nodes = [layer0, layer1, layer2, chord_classifier]
    misc_nodes = [prior]

    # 1.1 Create Loss
    nll = optimus.NegativeLogLikelihoodLoss(name='negative_log_likelihood')
    total_loss = optimus.Output(name='total_loss')

    # features = optimus.Output(name='features')
    posterior = optimus.Output(name='posterior')

    # 2. Define Edges
    base_edges = [
        (input_data, layer0.input),
        (layer0.output, layer1.input),
        (layer1.output, layer2.input),
        (layer2.output, chord_classifier.input),
        (chord_classifier.output, prior.input),
        (prior.output, posterior)]

    trainer_edges = optimus.ConnectionManager(
        base_edges + dropout_edges + [
            (chord_classifier.output, nll.likelihoods),
            (chord_idx, nll.index),
            (nll.output, total_loss)])

    update_manager = optimus.ConnectionManager(
        map(lambda n: (learning_rate, n.weights), param_nodes) +
        map(lambda n: (learning_rate, n.bias), param_nodes))

    classifier_init(param_nodes)

    trainer = optimus.Graph(
        name=GRAPH_NAME,
        inputs=inputs,
        nodes=param_nodes + misc_nodes + [nll],
        connections=trainer_edges.connections,
        outputs=[total_loss, posterior],
        loss=total_loss,
        updates=update_manager.connections,
        verbose=True)

    if use_dropout:
        layer0.disable_dropout()
        layer1.disable_dropout()
        layer2.disable_dropout()

    predictor = optimus.Graph(
        name=GRAPH_NAME,
        inputs=[input_data],
        nodes=param_nodes + misc_nodes,
        connections=optimus.ConnectionManager(base_edges).connections,
        outputs=[posterior],
        verbose=True)

    return trainer, predictor
예제 #16
0
def main(args):
    # 1.1 Create Inputs
    input_data = optimus.Input(name='cqt', shape=(None, 1, TIME_DIM, 252))

    chord_idx = optimus.Input(name='chord_idx', shape=(None, ), dtype='int32')

    learning_rate = optimus.Input(name='learning_rate', shape=None)

    # 1.2 Create Nodes
    layer0 = optimus.Conv3D(name='layer0',
                            input_shape=input_data.shape,
                            weight_shape=(30, 1, 9, 19),
                            pool_shape=(1, 3),
                            act_type='relu')

    layer1 = optimus.Conv3D(name='layer1',
                            input_shape=layer0.output.shape,
                            weight_shape=(50, None, 7, 15),
                            act_type='relu')

    layer2 = optimus.Affine(name='layer2',
                            input_shape=layer1.output.shape,
                            output_shape=(
                                None,
                                1024,
                            ),
                            act_type='relu')

    layer3 = optimus.Affine(name='layer3',
                            input_shape=layer2.output.shape,
                            output_shape=(
                                None,
                                1024,
                            ),
                            act_type='relu')

    chord_classifier = optimus.Softmax(name='chord_classifier',
                                       input_shape=layer3.output.shape,
                                       n_out=VOCAB,
                                       act_type='linear')

    all_nodes = [layer0, layer1, layer2, layer3, chord_classifier]

    # 1.1 Create Losses
    chord_nll = optimus.NegativeLogLikelihood(name="chord_nll")

    # 2. Define Edges
    trainer_edges = optimus.ConnectionManager([
        (input_data, layer0.input), (layer0.output, layer1.input),
        (layer1.output, layer2.input), (layer2.output, layer3.input),
        (layer3.output, chord_classifier.input),
        (chord_classifier.output, chord_nll.likelihood),
        (chord_idx, chord_nll.target_idx)
    ])

    update_manager = optimus.ConnectionManager([
        (learning_rate, layer0.weights), (learning_rate, layer0.bias),
        (learning_rate, layer1.weights), (learning_rate, layer1.bias),
        (learning_rate, layer2.weights), (learning_rate, layer2.bias),
        (learning_rate, layer3.weights), (learning_rate, layer3.bias),
        (learning_rate, chord_classifier.weights),
        (learning_rate, chord_classifier.bias)
    ])

    trainer = optimus.Graph(name=GRAPH_NAME,
                            inputs=[input_data, chord_idx, learning_rate],
                            nodes=all_nodes,
                            connections=trainer_edges.connections,
                            outputs=[optimus.Graph.TOTAL_LOSS],
                            losses=[chord_nll],
                            updates=update_manager.connections)

    for n in all_nodes:
        optimus.random_init(n.weights)
        optimus.random_init(n.bias)

    validator = optimus.Graph(name=GRAPH_NAME,
                              inputs=[input_data, chord_idx],
                              nodes=all_nodes,
                              connections=trainer_edges.connections,
                              outputs=[optimus.Graph.TOTAL_LOSS],
                              losses=[chord_nll])

    posterior = optimus.Output(name='posterior')

    predictor_edges = optimus.ConnectionManager([
        (input_data, layer0.input), (layer0.output, layer1.input),
        (layer1.output, layer2.input), (layer2.output, layer3.input),
        (layer3.output, chord_classifier.input),
        (chord_classifier.output, posterior)
    ])

    predictor = optimus.Graph(name=GRAPH_NAME,
                              inputs=[input_data],
                              nodes=all_nodes,
                              connections=predictor_edges.connections,
                              outputs=[posterior])

    # 3. Create Data
    stash = biggie.Stash(args.training_file)
    hyperparams = {learning_rate.name: LEARNING_RATE}
    valid_idx = [0]
    for q in range(1, 13):
        valid_idx.append(q)
        stream = S.minibatch(D.create_uniform_quality_stream(
            stash, TIME_DIM, vocab_dim=VOCAB, valid_idx=valid_idx),
                             batch_size=BATCH_SIZE)

        driver = optimus.Driver(graph=trainer,
                                name=args.trial_name + "_c%02d" % q,
                                output_directory=args.model_directory)
        driver.fit(stream,
                   hyperparams=hyperparams,
                   max_iter=20000,
                   **DRIVER_ARGS)

    validator_file = path.join(driver.output_directory, args.validator_file)
    optimus.save(validator, def_file=validator_file)

    predictor_file = path.join(driver.output_directory, args.predictor_file)
    optimus.save(predictor, def_file=predictor_file)
예제 #17
0
def i8x1a3T_nll2(size, use_dropout=False):
    k0, k1, k2 = dict(
        large=(2048, 2048, 40),)[size]

    input_data = optimus.Input(
        name='data',
        shape=(None, 8, 1, 252))

    chord_idx = optimus.Input(
        name='class_idx',
        shape=(None,),
        dtype='int32')

    learning_rate = optimus.Input(
        name='learning_rate',
        shape=None)

    inputs = [input_data, chord_idx, learning_rate]

    dropout = optimus.Input(
        name='dropout',
        shape=None)

    # 1.2 Create Nodes
    layer0 = optimus.Affine(
        name='layer0',
        input_shape=input_data.shape,
        output_shape=(None, k0),
        act_type='relu')

    layer1 = optimus.Affine(
        name='layer1',
        input_shape=layer0.output.shape,
        output_shape=(None, k1),
        act_type='relu')

    layer2 = optimus.Affine(
        name='layer2',
        input_shape=layer1.output.shape,
        output_shape=(None, k2, 1, 12),
        act_type='relu')

    dropout_edges = []
    if use_dropout:
        layer0.enable_dropout()
        layer1.enable_dropout()
        layer2.enable_dropout()
        inputs += [dropout]
        dropout_edges += [(dropout, layer0.dropout),
                          (dropout, layer1.dropout),
                          (dropout, layer2.dropout)]

    chord_classifier = optimus.Conv3D(
        name='chord_classifier',
        input_shape=layer2.output.shape,
        weight_shape=(13, None, 1, 1),
        act_type='linear')

    flatten = optimus.Flatten('flatten', 2)

    null_classifier = optimus.Affine(
        name='null_classifier',
        input_shape=layer0.output.shape,
        output_shape=(None, 1),
        act_type='linear')

    cat = optimus.Concatenate('concatenate', num_inputs=2, axis=1)
    softmax = optimus.Softmax('softmax')
    prior = optimus.Multiply("prior", weight_shape=(1, 157), broadcast=[0])
    prior.weight.value = np.ones([1, 157])

    param_nodes = [layer0, layer1, layer2, null_classifier, chord_classifier]
    misc_nodes = [flatten, cat, softmax, prior]

    # 1.1 Create Loss
    likelihoods = optimus.SelectIndex(name='likelihoods')

    log = optimus.Log(name='log')
    neg = optimus.Multiply(name='gain', weight_shape=None)
    neg.weight.value = -1.0

    loss = optimus.Mean(name='negative_log_likelihood')
    loss_nodes = [likelihoods, log, neg, loss]
    total_loss = optimus.Output(name='total_loss')

    posterior = optimus.Output(name='posterior')

    # 2. Define Edges
    base_edges = [
        (input_data, layer0.input),
        (layer0.output, layer1.input),
        (layer1.output, layer2.input),
        (layer2.output, chord_classifier.input),
        (layer0.output, null_classifier.input),
        (chord_classifier.output, flatten.input),
        (flatten.output, cat.input_0),
        (null_classifier.output, cat.input_1),
        (cat.output, softmax.input),
        (softmax.output, prior.input),
        (prior.output, posterior)]

    trainer_edges = optimus.ConnectionManager(
        base_edges + dropout_edges + [
            (softmax.output, likelihoods.input),
            (chord_idx, likelihoods.index),
            (likelihoods.output, log.input),
            (log.output, neg.input),
            (neg.output, loss.input),
            (loss.output, total_loss)])

    update_manager = optimus.ConnectionManager(
        map(lambda n: (learning_rate, n.weights), param_nodes) +
        map(lambda n: (learning_rate, n.bias), param_nodes))

    classifier_init(param_nodes)

    trainer = optimus.Graph(
        name=GRAPH_NAME,
        inputs=inputs,
        nodes=param_nodes + misc_nodes + loss_nodes,
        connections=trainer_edges.connections,
        outputs=[total_loss, posterior],
        loss=total_loss,
        updates=update_manager.connections,
        verbose=True)

    if use_dropout:
        layer0.disable_dropout()
        layer1.disable_dropout()
        layer2.disable_dropout()

    predictor = optimus.Graph(
        name=GRAPH_NAME,
        inputs=[input_data],
        nodes=param_nodes + misc_nodes,
        connections=optimus.ConnectionManager(base_edges).connections,
        outputs=[posterior],
        verbose=True)

    return trainer, predictor
예제 #18
0
def main(args):
    # 1.1 Create Inputs
    input_data = optimus.Input(
        name='cqt',
        shape=(None, 1, TIME_DIM, 252))

    chord_idx = optimus.Input(
        name='chord_idx',
        shape=(None,),
        dtype='int32')

    learning_rate = optimus.Input(
        name='learning_rate',
        shape=None)

    # 1.2 Create Nodes
    layer0 = optimus.Conv3D(
        name='layer0',
        input_shape=input_data.shape,
        weight_shape=(12, 1, 9, 19),
        pool_shape=(1, 3),
        act_type='relu')

    layer1 = optimus.Conv3D(
        name='layer1',
        input_shape=layer0.output.shape,
        weight_shape=(16, None, 7, 15),
        act_type='relu')

    layer2 = optimus.Conv3D(
        name='layer2',
        input_shape=layer1.output.shape,
        weight_shape=(20, None, 6, 15),
        act_type='relu')

    layer3 = optimus.Affine(
        name='layer3',
        input_shape=layer2.output.shape,
        output_shape=(None, 512,),
        act_type='relu')

    chord_classifier = optimus.Softmax(
        name='chord_classifier',
        input_shape=layer3.output.shape,
        n_out=VOCAB,
        act_type='linear')

    all_nodes = [layer0, layer1, layer2, layer3, chord_classifier]

    # 1.1 Create Losses
    chord_nll = optimus.NegativeLogLikelihood(
        name="chord_nll")

    # 2. Define Edges
    trainer_edges = optimus.ConnectionManager([
        (input_data, layer0.input),
        (layer0.output, layer1.input),
        (layer1.output, layer2.input),
        (layer2.output, layer3.input),
        (layer3.output, chord_classifier.input),
        (chord_classifier.output, chord_nll.likelihood),
        (chord_idx, chord_nll.target_idx)])

    update_manager = optimus.ConnectionManager([
        (learning_rate, layer0.weights),
        (learning_rate, layer0.bias),
        (learning_rate, layer1.weights),
        (learning_rate, layer1.bias),
        (learning_rate, layer2.weights),
        (learning_rate, layer2.bias),
        (learning_rate, layer3.weights),
        (learning_rate, layer3.bias),
        (learning_rate, chord_classifier.weights),
        (learning_rate, chord_classifier.bias)])

    print "Building trainer"
    trainer = optimus.Graph(
        name=GRAPH_NAME,
        inputs=[input_data, chord_idx, learning_rate],
        nodes=all_nodes,
        connections=trainer_edges.connections,
        outputs=[optimus.Graph.TOTAL_LOSS],
        losses=[chord_nll],
        updates=update_manager.connections)

    optimus.random_init(chord_classifier.weights)

    print "Building validator"
    validator = optimus.Graph(
        name=GRAPH_NAME,
        inputs=[input_data, chord_idx],
        nodes=all_nodes,
        connections=trainer_edges.connections,
        outputs=[optimus.Graph.TOTAL_LOSS],
        losses=[chord_nll])

    posterior = optimus.Output(
        name='posterior')

    predictor_edges = optimus.ConnectionManager([
        (input_data, layer0.input),
        (layer0.output, layer1.input),
        (layer1.output, layer2.input),
        (layer2.output, layer3.input),
        (layer3.output, chord_classifier.input),
        (chord_classifier.output, posterior)])

    print "Building predictor"
    predictor = optimus.Graph(
        name=GRAPH_NAME,
        inputs=[input_data],
        nodes=all_nodes,
        connections=predictor_edges.connections,
        outputs=[posterior])

    # 3. Create Data
    print "Opening Data"
    stash = biggie.Stash(args.training_file)
    stream = S.minibatch(
        D.create_uniform_quality_stream(stash, TIME_DIM),
        batch_size=50,
        functions=[FX.pitch_shift(), FX.map_to_chord_index(VOCAB)])

    driver = optimus.Driver(
        graph=trainer,
        name=args.trial_name,
        output_directory=args.model_directory)

    hyperparams = {learning_rate.name: LEARNING_RATE}

    print "...aaand we're off!"
    driver.fit(stream, hyperparams=hyperparams, **DRIVER_ARGS)

    validator_file = path.join(driver.output_directory, args.validator_file)
    optimus.save(validator, def_file=validator_file)

    predictor_file = path.join(driver.output_directory, args.predictor_file)
    optimus.save(predictor, def_file=predictor_file)
예제 #19
0
def i8c3_pwmse(size='large'):
    k0, k1, k2 = dict(
        small=(8, 16, 20),
        med=(12, 24, 32),
        large=(16, 32, 48))[size]

    input_data = optimus.Input(
        name='cqt',
        shape=(None, 1, 8, 252))

    target = optimus.Input(
        name='target',
        shape=(None, 1))

    chord_idx = optimus.Input(
        name='chord_idx',
        shape=(None,),
        dtype='int32')

    learning_rate = optimus.Input(
        name='learning_rate',
        shape=None)

    # 1.2 Create Nodes
    layer0 = optimus.Conv3D(
        name='layer0',
        input_shape=input_data.shape,
        weight_shape=(k0, None, 3, 13),
        pool_shape=(1, 3),
        act_type='relu')

    layer1 = optimus.Conv3D(
        name='layer1',
        input_shape=layer0.output.shape,
        weight_shape=(k1, None, 3, 37),
        act_type='relu')

    layer2 = optimus.Conv3D(
        name='layer2',
        input_shape=layer1.output.shape,
        weight_shape=(k2, None, 3, 33),
        act_type='relu')

    chord_classifier = optimus.Conv3D(
        name='chord_classifier',
        input_shape=layer2.output.shape,
        weight_shape=(13, None, 2, 1),
        act_type='sigmoid')

    flatten = optimus.Flatten('flatten', 2)

    null_classifier = optimus.Affine(
        name='null_classifier',
        input_shape=layer2.output.shape,
        output_shape=(None, 1),
        act_type='sigmoid')

    cat = optimus.Concatenate('concatenate', num_inputs=2, axis=1)

    param_nodes = [layer0, layer1, layer2, chord_classifier, null_classifier]
    misc_nodes = [flatten, cat]

    # 1.1 Create Loss
    likelihoods = optimus.SelectIndex(name='likelihoods')
    dimshuffle = optimus.Dimshuffle('dimshuffle', (0, 'x'))
    squared_error = optimus.SquaredEuclidean(name='squared_error')
    loss = optimus.Mean(name='mean_squared_error')

    loss_nodes = [likelihoods, dimshuffle, squared_error, loss]

    # 2. Define Edges
    base_edges = [
        (input_data, layer0.input),
        (layer0.output, layer1.input),
        (layer1.output, layer2.input),
        (layer2.output, chord_classifier.input),
        (layer2.output, null_classifier.input),
        (chord_classifier.output, flatten.input),
        (flatten.output, cat.input_0),
        (null_classifier.output, cat.input_1)]

    trainer_edges = optimus.ConnectionManager(
        base_edges + [
            (cat.output, likelihoods.input),
            (chord_idx, likelihoods.index),
            (likelihoods.output, dimshuffle.input),
            (dimshuffle.output, squared_error.input_a),
            (target, squared_error.input_b),
            (squared_error.output, loss.input)])

    update_manager = optimus.ConnectionManager(
        map(lambda n: (learning_rate, n.weights), param_nodes) +
        map(lambda n: (learning_rate, n.bias), param_nodes))

    trainer = optimus.Graph(
        name=GRAPH_NAME,
        inputs=[input_data, target, chord_idx, learning_rate],
        nodes=param_nodes + misc_nodes + loss_nodes,
        connections=trainer_edges.connections,
        outputs=[loss.output],
        loss=loss.output,
        updates=update_manager.connections,
        verbose=True)

    classifier_init(param_nodes)

    posterior = optimus.Output(name='posterior')
    predictor_edges = optimus.ConnectionManager(
        base_edges + [(cat.output, posterior)])

    predictor = optimus.Graph(
        name=GRAPH_NAME,
        inputs=[input_data],
        nodes=param_nodes + misc_nodes,
        connections=predictor_edges.connections,
        outputs=[posterior])

    return trainer, predictor
예제 #20
0
def iXc3_rbf_weighted(n_in, size='large', use_dropout=False):
    k0, k1, k2 = dict(
        small=(10, 20, 40),
        med=(12, 24, 48),
        large=(16, 32, 64),
        xlarge=(20, 40, 80),
        xxlarge=(24, 48, 96))[size]

    n0, n1, n2 = {
        1: (1, 1, 1),
        4: (3, 2, 1),
        8: (5, 3, 2),
        10: (3, 3, 1),
        20: (5, 5, 1)}[n_in]

    p0, p1, p2 = {
        1: (1, 1, 1),
        4: (1, 1, 1),
        8: (1, 1, 1),
        10: (2, 2, 1),
        12: (2, 2, 1),
        20: (2, 2, 2)}[n_in]

    input_data = optimus.Input(
        name='data',
        shape=(None, 1, n_in, 252))

    chord_idx = optimus.Input(
        name='class_idx',
        shape=(None,),
        dtype='int32')

    class_weight = optimus.Input(
        name='class_weight',
        shape=(None,))

    learning_rate = optimus.Input(
        name='learning_rate',
        shape=None)

    inputs = [input_data, chord_idx, class_weight, learning_rate]

    dropout = optimus.Input(
        name='dropout',
        shape=None)

    # 1.2 Create Nodes
    layer0 = optimus.Conv3D(
        name='layer0',
        input_shape=input_data.shape,
        weight_shape=(k0, None, n0, 13),
        pool_shape=(p0, 3),
        act_type='relu')

    layer1 = optimus.Conv3D(
        name='layer1',
        input_shape=layer0.output.shape,
        weight_shape=(k1, None, n1, 37),
        pool_shape=(p1, 1),
        act_type='relu')

    layer2 = optimus.Conv3D(
        name='layer2',
        input_shape=layer1.output.shape,
        weight_shape=(k2, None, n2, 33),
        pool_shape=(p2, 1),
        act_type='relu')

    trainer_edges = []
    if use_dropout:
        layer0.enable_dropout()
        layer1.enable_dropout()
        layer2.enable_dropout()
        inputs += [dropout]
        trainer_edges += [(dropout, layer0.dropout),
                          (dropout, layer1.dropout),
                          (dropout, layer2.dropout)]

    predictors = []
    softmaxes = []
    for name in 'EADGBe':
        predictors.append(optimus.Affine(
            name='{0}_predictor'.format(name),
            input_shape=layer2.output.shape,
            output_shape=(None, NUM_FRETS),
            act_type='linear'))
        softmaxes.append(optimus.Softmax('{0}_softmax'.format(name)))

    stack = optimus.Stack('stacker', num_inputs=6, axes=(1, 0, 2))
    param_nodes = [layer0, layer1, layer2] + predictors
    misc_nodes = [stack] + softmaxes

    # 1.1 Create Loss
    rbf = optimus.RadialBasis(
        name='rbf',
        input_shape=(None, 6, NUM_FRETS),
        output_shape=(None, 157))
    inverter = optimus.Multiply(name='inverter', weight_shape=None)
    inverter.weight.value = -1.0
    class_softmax = optimus.Softmax("class_softmax")

    energies = optimus.SelectIndex(name='energies')
    importance = optimus.Product(name='importance_weighting')
    ave_loss = optimus.Mean(name='ave_loss')
    total_loss = optimus.Output(name='total_loss')

    # 2. Define Edges
    base_edges = [
        (input_data, layer0.input),
        (layer0.output, layer1.input),
        (layer1.output, layer2.input)]

    for p, smax in zip(predictors, softmaxes):
        base_edges += [(layer2.output, p.input),
                       (p.output, smax.input)]

    base_edges += [(softmaxes[0].output, stack.input_0),
                   (softmaxes[1].output, stack.input_1),
                   (softmaxes[2].output, stack.input_2),
                   (softmaxes[3].output, stack.input_3),
                   (softmaxes[4].output, stack.input_4),
                   (softmaxes[5].output, stack.input_5)]

    trainer_edges += base_edges + [
        (stack.output, rbf.input),
        (rbf.output, energies.input),
        (rbf.output, inverter.input),
        (chord_idx, energies.index),
        (energies.output, importance.input_a),
        (class_weight, importance.input_b),
        (importance.output, ave_loss.input),
        (ave_loss.output, total_loss)]

    update_manager = optimus.ConnectionManager(
        map(lambda n: (learning_rate, n.weights), param_nodes) +
        map(lambda n: (learning_rate, n.bias), param_nodes))

    classifier_init(param_nodes)

    train_nodes = [rbf, inverter, energies, importance, ave_loss]

    trainer = optimus.Graph(
        name=GRAPH_NAME,
        inputs=inputs,
        nodes=param_nodes + misc_nodes + train_nodes,
        connections=optimus.ConnectionManager(trainer_edges).connections,
        outputs=[total_loss],
        loss=total_loss,
        updates=update_manager.connections,
        verbose=True)

    if use_dropout:
        layer0.disable_dropout()
        layer1.disable_dropout()
        layer2.disable_dropout()

    fretboard = optimus.Output(name='fretboard')
    fret_predictor = optimus.Graph(
        name=GRAPH_NAME,
        inputs=[input_data],
        nodes=param_nodes + misc_nodes,
        connections=optimus.ConnectionManager(
            base_edges + [(stack.output, fretboard)]).connections,
        outputs=[fretboard],
        verbose=True)

    posterior = optimus.Output(name='posterior')

    classifier_edges = base_edges + [
        (stack.output, rbf.input),
        (rbf.output, inverter.input),
        (inverter.output, class_softmax.input),
        (class_softmax.output, posterior)]

    classifier = optimus.Graph(
        name=GRAPH_NAME,
        inputs=[input_data],
        nodes=param_nodes + misc_nodes + [rbf, inverter, class_softmax],
        connections=optimus.ConnectionManager(classifier_edges).connections,
        outputs=[posterior],
        verbose=True)

    return trainer, fret_predictor, classifier
예제 #21
0
def nlse_iX_c3f2_oY(n_in, n_out, size='large', verbose=False):
    """Variable length input, 5-layer model.

    Parameters
    ----------
    n_in : int
        Length of input window, in [1, 4, 8, 10, 20].

    n_out : int
        Numer of output dimensions.

    size : str
        One of ['small', 'med', 'large', 'xlarge', 'xxlarge']

    Returns
    -------
    trainer, predictor, zero_filt : optimus.Graphs
        Fully operational optimus graphs.
    """
    # Kernel shapes
    k0, k1, k2, k3 = dict(small=(10, 20, 40, 96),
                          med=(12, 24, 48, 128),
                          large=(16, 32, 64, 192),
                          xlarge=(20, 40, 80, 256),
                          xxlarge=(24, 48, 96, 512))[size]

    # Input dimensions
    n0, n1, n2 = {
        1: (1, 1, 1),
        4: (3, 2, 1),
        8: (5, 3, 2),
        10: (3, 3, 1),
        20: (5, 5, 1)
    }[n_in]

    # Pool shapes
    p0, p1, p2 = {
        1: (1, 1, 1),
        4: (1, 1, 1),
        8: (1, 1, 1),
        10: (2, 2, 1),
        12: (2, 2, 1),
        20: (2, 2, 2)
    }[n_in]

    # Inputs
    # ------
    x_in = optimus.Input(name='x_in', shape=(None, 1, n_in, 192))
    x_same = optimus.Input(name='x_same', shape=x_in.shape)
    x_diff = optimus.Input(name='x_diff', shape=x_in.shape)
    learning_rate = optimus.Input(name='learning_rate', shape=None)
    margin_same = optimus.Input(name='margin_same', shape=None)
    margin_diff = optimus.Input(name='margin_diff', shape=None)
    origin_penalty = optimus.Input(name='origin_penalty', shape=None)
    inputs = [
        x_in, x_same, x_diff, learning_rate, margin_same, margin_diff,
        origin_penalty
    ]

    # 1.2 Create Nodes
    logscale = optimus.Log(name="logscale", epsilon=1.0, gain=50.0)

    layer0 = optimus.Conv3D(name='layer0',
                            input_shape=x_in.shape,
                            weight_shape=(k0, None, n0, 13),
                            pool_shape=(p0, 1),
                            act_type='tanh')

    layer1 = optimus.Conv3D(name='layer1',
                            input_shape=layer0.output.shape,
                            weight_shape=(k1, None, n1, 11),
                            pool_shape=(p1, 1),
                            act_type='tanh')

    layer2 = optimus.Conv3D(name='layer2',
                            input_shape=layer1.output.shape,
                            weight_shape=(k2, None, n2, 9),
                            pool_shape=(p2, 1),
                            act_type='tanh')

    layer3 = optimus.Affine(name='layer3',
                            input_shape=layer2.output.shape,
                            output_shape=(None, k3),
                            act_type='tanh')

    layer4 = optimus.Affine(name='layer4',
                            input_shape=layer3.output.shape,
                            output_shape=(None, n_out),
                            act_type='linear')

    param_nodes = [layer0, layer1, layer2, layer3, layer4]

    # 1.1 Create cloned nodes
    logscale_scopy = logscale.clone("logscale_scopy")
    logscale_dcopy = logscale.clone("logscale_dcopy")
    nodes_same = [l.clone(l.name + "_scopy") for l in param_nodes]
    nodes_diff = [l.clone(l.name + "_dcopy") for l in param_nodes]

    # 1.2 Create Loss
    # ---------------
    cost_same = optimus.Euclidean(name='cost_same')
    cost_diff = optimus.Euclidean(name='cost_diff')

    # Sim terms
    criterion = optimus.ContrastiveMargin(name='contrastive_margin')
    decay = optimus.WeightDecayPenalty(name='decay')
    total_loss = optimus.Add(name='total_loss', num_inputs=2)
    loss_nodes = [cost_same, cost_diff, criterion, decay, total_loss]

    # Graph outputs
    loss = optimus.Output(name='loss')
    z_out = optimus.Output(name='z_out')

    # 2. Define Edges
    base_edges = [(x_in, logscale.input), (logscale.output, layer0.input),
                  (layer0.output, layer1.input), (layer1.output, layer2.input),
                  (layer2.output, layer3.input), (layer3.output, layer4.input),
                  (layer4.output, z_out)]

    base_edges_same = [(x_same, logscale_scopy.input),
                       (logscale_scopy.output, nodes_same[0].input),
                       (nodes_same[0].output, nodes_same[1].input),
                       (nodes_same[1].output, nodes_same[2].input),
                       (nodes_same[2].output, nodes_same[3].input),
                       (nodes_same[3].output, nodes_same[4].input)]

    base_edges_diff = [(x_diff, logscale_dcopy.input),
                       (logscale_dcopy.output, nodes_diff[0].input),
                       (nodes_diff[0].output, nodes_diff[1].input),
                       (nodes_diff[1].output, nodes_diff[2].input),
                       (nodes_diff[2].output, nodes_diff[3].input),
                       (nodes_diff[3].output, nodes_diff[4].input)]

    cost_edges = [(param_nodes[-1].output, cost_same.input_a),
                  (nodes_same[-1].output, cost_same.input_b),
                  (param_nodes[-1].output, cost_diff.input_a),
                  (nodes_diff[-1].output, cost_diff.input_b),
                  (cost_same.output, criterion.cost_sim),
                  (cost_diff.output, criterion.cost_diff),
                  (margin_same, criterion.margin_sim),
                  (margin_diff, criterion.margin_diff),
                  (criterion.output, total_loss.input_0),
                  (origin_penalty, decay.weight),
                  (param_nodes[-1].output, decay.input),
                  (decay.output, total_loss.input_1),
                  (total_loss.output, loss)]

    trainer_edges = optimus.ConnectionManager(base_edges + base_edges_same +
                                              base_edges_diff + cost_edges)

    update_manager = optimus.ConnectionManager(
        list(map(lambda n: (learning_rate, n.weights), param_nodes)) +
        list(map(lambda n: (learning_rate, n.bias), param_nodes)))

    param_init(param_nodes)

    misc_nodes = [logscale, logscale_scopy, logscale_dcopy]

    trainer = optimus.Graph(name=GRAPH_NAME,
                            inputs=inputs,
                            nodes=param_nodes + nodes_same + nodes_diff +
                            loss_nodes + misc_nodes,
                            connections=trainer_edges.connections,
                            outputs=[loss, z_out],
                            loss=loss,
                            updates=update_manager.connections,
                            verbose=verbose)

    predictor = optimus.Graph(
        name=GRAPH_NAME,
        inputs=[x_in],
        nodes=param_nodes + [logscale],
        connections=optimus.ConnectionManager(base_edges).connections,
        outputs=[z_out],
        verbose=verbose)

    return trainer, predictor
예제 #22
0
def main(args):
    # 1.1 Create Inputs
    input_data = optimus.Input(name='cqt',
                               shape=(None, 1, TIME_DIM, PITCH_DIM))

    chord_idx = optimus.Input(name='chord_idx', shape=(None, ), dtype='int32')

    learning_rate = optimus.Input(name='learning_rate', shape=None)

    # 1.2 Create Nodes
    layer0 = optimus.Conv3D(name='layer0',
                            input_shape=input_data.shape,
                            weight_shape=(32, 1, 5, 19),
                            pool_shape=(2, 3),
                            act_type='relu')

    layer1 = optimus.Conv3D(name='layer1',
                            input_shape=layer0.output.shape,
                            weight_shape=(64, None, 5, 15),
                            act_type='relu')

    layer2 = optimus.Conv3D(name='layer2',
                            input_shape=layer1.output.shape,
                            weight_shape=(128, None, 3, 15),
                            act_type='relu')

    layer3 = optimus.Affine(name='layer3',
                            input_shape=layer2.output.shape,
                            output_shape=(
                                None,
                                1024,
                            ),
                            act_type='relu')

    chord_classifier = optimus.Softmax(name='chord_classifier',
                                       input_shape=layer3.output.shape,
                                       n_out=VOCAB,
                                       act_type='linear')

    all_nodes = [layer0, layer1, layer2, layer3, chord_classifier]

    # 1.1 Create Losses
    chord_mce = optimus.ClassificationError(name="chord_mce")

    # 2. Define Edges
    trainer_edges = optimus.ConnectionManager([
        (input_data, layer0.input), (layer0.output, layer1.input),
        (layer1.output, layer2.input), (layer2.output, layer3.input),
        (layer3.output, chord_classifier.input),
        (chord_classifier.output, chord_mce.prediction),
        (chord_idx, chord_mce.target_idx)
    ])

    update_manager = optimus.ConnectionManager([
        (learning_rate, layer0.weights), (learning_rate, layer0.bias),
        (learning_rate, layer1.weights), (learning_rate, layer1.bias),
        (learning_rate, layer2.weights), (learning_rate, layer2.bias),
        (learning_rate, layer3.weights), (learning_rate, layer3.bias),
        (learning_rate, chord_classifier.weights),
        (learning_rate, chord_classifier.bias)
    ])

    trainer = optimus.Graph(name=GRAPH_NAME,
                            inputs=[input_data, chord_idx, learning_rate],
                            nodes=all_nodes,
                            connections=trainer_edges.connections,
                            outputs=[optimus.Graph.TOTAL_LOSS],
                            losses=[chord_mce],
                            updates=update_manager.connections)

    for n in all_nodes:
        optimus.random_init(n.weights)
        optimus.random_init(n.bias)

    if args.init_param_file:
        trainer.load_param_values(args.init_param_file)

    posterior = optimus.Output(name='posterior')

    predictor_edges = optimus.ConnectionManager([
        (input_data, layer0.input), (layer0.output, layer1.input),
        (layer1.output, layer2.input), (layer2.output, layer3.input),
        (layer3.output, chord_classifier.input),
        (chord_classifier.output, posterior)
    ])

    predictor = optimus.Graph(name=GRAPH_NAME,
                              inputs=[input_data],
                              nodes=all_nodes,
                              connections=predictor_edges.connections,
                              outputs=[posterior])

    # 3. Create Data
    print "Loading %s" % args.training_file
    stash = biggie.Stash(args.training_file)
    synth_stash = biggie.Stash(args.secondary_source)
    stream = D.muxed_uniform_chord_stream(stash,
                                          synth_stash,
                                          TIME_DIM,
                                          pitch_shift=0,
                                          vocab_dim=VOCAB,
                                          working_size=10)
    # stream = D.create_uniform_chord_stream(
    #     stash, TIME_DIM, pitch_shift=0, vocab_dim=VOCAB, working_size=10)

    # if args.secondary_source:
    #     print "Loading %s" % args.secondary_source
    #     stash2 = biggie.Stash(args.secondary_source)
    #     stream2 = D.create_uniform_chord_stream(
    #         stash2, TIME_DIM, pitch_shift=0, vocab_dim=VOCAB, working_size=5)
    #     stream = S.mux([stream, stream2], [0.5, 0.5])

    stream = S.minibatch(stream, batch_size=BATCH_SIZE)

    print "Starting '%s'" % args.trial_name
    driver = optimus.Driver(graph=trainer,
                            name=args.trial_name,
                            output_directory=args.model_directory)

    hyperparams = {learning_rate.name: LEARNING_RATE}

    predictor_file = path.join(driver.output_directory, args.predictor_file)
    optimus.save(predictor, def_file=predictor_file)

    driver.fit(stream, hyperparams=hyperparams, **DRIVER_ARGS)
예제 #23
0
def main(args):
    # 1.1 Create Inputs
    input_data = optimus.Input(name='cqt', shape=(None, 1, TIME_DIM, 252))

    chord_idx = optimus.Input(name='chord_idx', shape=(None, ), dtype='int32')

    learning_rate = optimus.Input(name='learning_rate', shape=None)

    # 1.2 Create Nodes
    layer0 = optimus.Conv3D(name='layer0',
                            input_shape=input_data.shape,
                            weight_shape=(30, 1, 9, 19),
                            pool_shape=(1, 3),
                            act_type='relu')

    layer1 = optimus.Conv3D(name='layer1',
                            input_shape=layer0.output.shape,
                            weight_shape=(50, None, 7, 15),
                            act_type='relu')

    layer2 = optimus.Affine(name='layer2',
                            input_shape=layer1.output.shape,
                            output_shape=(
                                None,
                                1024,
                            ),
                            act_type='relu')

    layer3 = optimus.Affine(name='layer3',
                            input_shape=layer2.output.shape,
                            output_shape=(
                                None,
                                1024,
                            ),
                            act_type='relu')

    chord_classifier = optimus.Softmax(name='chord_classifier',
                                       input_shape=layer3.output.shape,
                                       n_out=VOCAB,
                                       act_type='linear')

    all_nodes = [layer0, layer1, layer2, layer3, chord_classifier]

    # 1.1 Create Losses
    chord_nll = optimus.NegativeLogLikelihood(name="chord_nll")

    # 2. Define Edges
    trainer_edges = optimus.ConnectionManager([
        (input_data, layer0.input), (layer0.output, layer1.input),
        (layer1.output, layer2.input), (layer2.output, layer3.input),
        (layer3.output, chord_classifier.input),
        (chord_classifier.output, chord_nll.likelihood),
        (chord_idx, chord_nll.target_idx)
    ])

    update_manager = optimus.ConnectionManager([
        (learning_rate, layer0.weights), (learning_rate, layer0.bias),
        (learning_rate, layer1.weights), (learning_rate, layer1.bias),
        (learning_rate, layer2.weights), (learning_rate, layer2.bias),
        (learning_rate, layer3.weights), (learning_rate, layer3.bias),
        (learning_rate, chord_classifier.weights),
        (learning_rate, chord_classifier.bias)
    ])

    trainer = optimus.Graph(name=GRAPH_NAME,
                            inputs=[input_data, chord_idx, learning_rate],
                            nodes=all_nodes,
                            connections=trainer_edges.connections,
                            outputs=[optimus.Graph.TOTAL_LOSS],
                            losses=[chord_nll],
                            updates=update_manager.connections,
                            momentum=None)

    for n in all_nodes:
        optimus.random_init(n.weights)
        optimus.random_init(n.bias)

    trainer.load_param_values(
        "/media/attic/dl4mir/chord_estimation/models/nll_chord_uniform_2big/synth_data_01/0/classifier-V157-synth_data_01-041750-2014-08-25_21h59m56s.npz"
    )

    validator = optimus.Graph(name=GRAPH_NAME,
                              inputs=[input_data, chord_idx],
                              nodes=all_nodes,
                              connections=trainer_edges.connections,
                              outputs=[optimus.Graph.TOTAL_LOSS],
                              losses=[chord_nll])

    posterior = optimus.Output(name='posterior')

    predictor_edges = optimus.ConnectionManager([
        (input_data, layer0.input), (layer0.output, layer1.input),
        (layer1.output, layer2.input), (layer2.output, layer3.input),
        (layer3.output, chord_classifier.input),
        (chord_classifier.output, posterior)
    ])

    predictor = optimus.Graph(name=GRAPH_NAME,
                              inputs=[input_data],
                              nodes=all_nodes,
                              connections=predictor_edges.connections,
                              outputs=[posterior])

    # 3. Create Data
    print "Loading %s" % args.training_file
    stash = biggie.Stash(args.training_file)
    stream = D.create_uniform_chord_stream(stash,
                                           TIME_DIM,
                                           pitch_shift=False,
                                           vocab_dim=VOCAB,
                                           working_size=3)
    stream = S.minibatch(stream, batch_size=BATCH_SIZE)

    print "Starting '%s'" % args.trial_name
    driver = optimus.Driver(graph=trainer,
                            name=args.trial_name,
                            output_directory=args.model_directory)

    hyperparams = {learning_rate.name: LEARNING_RATE}

    validator_file = path.join(driver.output_directory, args.validator_file)
    optimus.save(validator, def_file=validator_file)

    predictor_file = path.join(driver.output_directory, args.predictor_file)
    optimus.save(predictor, def_file=predictor_file)

    driver.fit(stream, hyperparams=hyperparams, **DRIVER_ARGS)
예제 #24
0
def main(args):
    # 1.1 Create Inputs
    input_data = optimus.Input(name='cqt',
                               shape=(None, 1, TIME_DIM, PITCH_DIM))

    chord_idx = optimus.Input(name='chord_idx', shape=(None, ), dtype='int32')

    learning_rate = optimus.Input(name='learning_rate', shape=None)

    # 1.2 Create Nodes
    input_scalar = optimus.Normalize(name='input_scalar',
                                     mode='l2',
                                     scale_factor=50.0)

    layer0 = optimus.Conv3D(name='layer0',
                            input_shape=input_data.shape,
                            weight_shape=(32, 1, 5, 19),
                            pool_shape=(2, 3),
                            act_type='relu')

    layer1 = optimus.Conv3D(name='layer1',
                            input_shape=layer0.output.shape,
                            weight_shape=(64, None, 5, 15),
                            act_type='relu')

    layer2 = optimus.Conv3D(name='layer2',
                            input_shape=layer1.output.shape,
                            weight_shape=(128, None, 3, 15),
                            act_type='relu')

    layer3 = optimus.Affine(name='layer3',
                            input_shape=layer2.output.shape,
                            output_shape=(
                                None,
                                1024,
                            ),
                            act_type='relu')

    chord_classifier = optimus.Softmax(name='chord_classifier',
                                       input_shape=layer3.output.shape,
                                       n_out=VOCAB,
                                       act_type='linear')

    all_nodes = [
        input_scalar, layer0, layer1, layer2, layer3, chord_classifier
    ]

    # 1.1 Create Losses
    chord_nll = optimus.NegativeLogLikelihood(name="chord_nll")

    # 2. Define Edges
    trainer_edges = optimus.ConnectionManager([
        (input_data, input_scalar.input), (input_scalar.output, layer0.input),
        (layer0.output, layer1.input), (layer1.output, layer2.input),
        (layer2.output, layer3.input), (layer3.output, chord_classifier.input),
        (chord_classifier.output, chord_nll.likelihood),
        (chord_idx, chord_nll.target_idx)
    ])

    update_manager = optimus.ConnectionManager([
        (learning_rate, layer0.weights), (learning_rate, layer0.bias),
        (learning_rate, layer1.weights), (learning_rate, layer1.bias),
        (learning_rate, layer2.weights), (learning_rate, layer2.bias),
        (learning_rate, layer3.weights), (learning_rate, layer3.bias),
        (learning_rate, chord_classifier.weights),
        (learning_rate, chord_classifier.bias)
    ])

    trainer = optimus.Graph(name=GRAPH_NAME,
                            inputs=[input_data, chord_idx, learning_rate],
                            nodes=all_nodes,
                            connections=trainer_edges.connections,
                            outputs=[optimus.Graph.TOTAL_LOSS],
                            losses=[chord_nll],
                            updates=update_manager.connections)

    for n in all_nodes[1:]:
        optimus.random_init(n.weights)
        optimus.random_init(n.bias)

    if args.init_param_file:
        param_values = dict(np.load(args.init_param_file))
        keys = param_values.keys()
        for key in keys:
            if chord_classifier.name in key or layer3.name in key:
                print "skipping %s" % key
                del param_values[key]
        trainer.param_values = param_values

    posterior = optimus.Output(name='posterior')

    predictor_edges = optimus.ConnectionManager([
        (input_data, input_scalar.input), (input_scalar.output, layer0.input),
        (layer0.output, layer1.input), (layer1.output, layer2.input),
        (layer2.output, layer3.input), (layer3.output, chord_classifier.input),
        (chord_classifier.output, posterior)
    ])

    predictor = optimus.Graph(name=GRAPH_NAME,
                              inputs=[input_data],
                              nodes=all_nodes,
                              connections=predictor_edges.connections,
                              outputs=[posterior])

    # 3. Create Data
    print "Loading %s" % args.training_file
    stash = biggie.Stash(args.training_file)
    stream = S.minibatch(D.create_uniform_chord_stream(stash,
                                                       TIME_DIM,
                                                       pitch_shift=0,
                                                       vocab_dim=VOCAB,
                                                       working_size=10),
                         batch_size=BATCH_SIZE)

    print "Starting '%s'" % args.trial_name
    driver = optimus.Driver(graph=trainer,
                            name=args.trial_name,
                            output_directory=args.model_directory)

    hyperparams = {learning_rate.name: LEARNING_RATE}

    predictor_file = path.join(driver.output_directory, args.predictor_file)
    optimus.save(predictor, def_file=predictor_file)

    driver.fit(stream, hyperparams=hyperparams, **DRIVER_ARGS)
def main(args):
    # 1.1 Create Inputs
    input_data = optimus.Input(name='cqt', shape=(None, 1, TIME_DIM, 252))

    chord_idx = optimus.Input(name='chord_idx', shape=(None, ), dtype='int32')

    learning_rate = optimus.Input(name='learning_rate', shape=None)

    dropout = optimus.Input(name='dropout', shape=None)

    # 1.2 Create Nodes
    layer0 = optimus.Conv3D(name='layer0',
                            input_shape=input_data.shape,
                            weight_shape=(30, 1, 9, 19),
                            pool_shape=(1, 3),
                            act_type='relu')

    layer1 = optimus.Conv3D(name='layer1',
                            input_shape=layer0.output.shape,
                            weight_shape=(50, None, 7, 15),
                            act_type='relu')

    layer2 = optimus.Affine(name='layer2',
                            input_shape=layer1.output.shape,
                            output_shape=(
                                None,
                                1024,
                            ),
                            act_type='relu')

    layer3 = optimus.Affine(name='layer3',
                            input_shape=layer2.output.shape,
                            output_shape=(
                                None,
                                1024,
                            ),
                            act_type='relu')

    for n in [layer2, layer3]:
        n.enable_dropout()

    chord_classifier = optimus.Softmax(name='chord_classifier',
                                       input_shape=layer3.output.shape,
                                       n_out=VOCAB,
                                       act_type='linear')

    all_nodes = [layer0, layer1, layer2, layer3, chord_classifier]

    # 1.1 Create Losses
    chord_nll = optimus.NegativeLogLikelihood(name="chord_nll")

    # 2. Define Edges
    trainer_edges = optimus.ConnectionManager([
        (input_data, layer0.input), (layer0.output, layer1.input),
        (layer1.output, layer2.input), (layer2.output, layer3.input),
        (layer3.output, chord_classifier.input),
        (chord_classifier.output, chord_nll.likelihood),
        (chord_idx, chord_nll.target_idx), (dropout, layer2.dropout),
        (dropout, layer3.dropout)
    ])

    update_manager = optimus.ConnectionManager([
        (learning_rate, layer0.weights), (learning_rate, layer0.bias),
        (learning_rate, layer1.weights), (learning_rate, layer1.bias),
        (learning_rate, layer2.weights), (learning_rate, layer2.bias),
        (learning_rate, layer3.weights), (learning_rate, layer3.bias),
        (learning_rate, chord_classifier.weights),
        (learning_rate, chord_classifier.bias)
    ])

    trainer = optimus.Graph(
        name=GRAPH_NAME,
        inputs=[input_data, chord_idx, learning_rate, dropout],
        nodes=all_nodes,
        connections=trainer_edges.connections,
        outputs=[optimus.Graph.TOTAL_LOSS],
        losses=[chord_nll],
        updates=update_manager.connections)

    for n in all_nodes:
        optimus.random_init(n.weights)
        optimus.random_init(n.bias)

    validator_edges = optimus.ConnectionManager([
        (input_data, layer0.input), (layer0.output, layer1.input),
        (layer1.output, layer2.input), (layer2.output, layer3.input),
        (layer3.output, chord_classifier.input),
        (chord_classifier.output, chord_nll.likelihood),
        (chord_idx, chord_nll.target_idx)
    ])

    for n in [layer2, layer3]:
        n.disable_dropout()

    validator = optimus.Graph(name=GRAPH_NAME,
                              inputs=[input_data, chord_idx],
                              nodes=all_nodes,
                              connections=validator_edges.connections,
                              outputs=[optimus.Graph.TOTAL_LOSS],
                              losses=[chord_nll])

    posterior = optimus.Output(name='posterior')

    predictor_edges = optimus.ConnectionManager([
        (input_data, layer0.input), (layer0.output, layer1.input),
        (layer1.output, layer2.input), (layer2.output, layer3.input),
        (layer3.output, chord_classifier.input),
        (chord_classifier.output, posterior)
    ])

    predictor = optimus.Graph(name=GRAPH_NAME,
                              inputs=[input_data],
                              nodes=all_nodes,
                              connections=predictor_edges.connections,
                              outputs=[posterior])

    # 3. Create Data
    print "Loading %s" % args.training_file
    stash = biggie.Stash(args.training_file)
    s = D.create_uniform_chord_stream(stash,
                                      TIME_DIM,
                                      pitch_shift=6,
                                      vocab_dim=VOCAB,
                                      working_size=10)
    stream = S.minibatch(FX.drop_frames(FX.awgn(s, 0.05), 0.1),
                         batch_size=BATCH_SIZE)

    driver = optimus.Driver(graph=trainer,
                            name=args.trial_name,
                            output_directory=args.model_directory)

    hyperparams = {learning_rate.name: LEARNING_RATE, dropout.name: DROPOUT}

    validator_file = path.join(driver.output_directory, args.validator_file)
    optimus.save(validator, def_file=validator_file)

    predictor_file = path.join(driver.output_directory, args.predictor_file)
    optimus.save(predictor, def_file=predictor_file)

    print "Starting '%s'" % args.trial_name
    driver.fit(stream, hyperparams=hyperparams, **DRIVER_ARGS)
예제 #26
0
def main(args):
    # 1.1 Create Inputs
    input_data = optimus.Input(name='cqt', shape=(None, 1, TIME_DIM, 252))

    fret_bitmap = optimus.Input(name='fret_bitmap', shape=(None, 6, FRET_DIM))

    learning_rate = optimus.Input(name='learning_rate', shape=None)

    # 1.2 Create Nodes
    layer0 = optimus.Conv3D(name='layer0',
                            input_shape=input_data.shape,
                            weight_shape=(12, 1, 3, 19),
                            pool_shape=(1, 3),
                            act_type='relu')

    layer1 = optimus.Conv3D(name='layer1',
                            input_shape=layer0.output.shape,
                            weight_shape=(16, None, 3, 15),
                            act_type='relu')

    layer2 = optimus.Conv3D(name='layer2',
                            input_shape=layer1.output.shape,
                            weight_shape=(20, None, 1, 15),
                            act_type='relu')

    layer3 = optimus.Affine(name='layer3',
                            input_shape=layer2.output.shape,
                            output_shape=(
                                None,
                                512,
                            ),
                            act_type='relu')

    fretboard = optimus.MultiSoftmax(name='fretboard',
                                     input_shape=layer3.output.shape,
                                     output_shape=(None, 6, FRET_DIM),
                                     act_type='linear')

    all_nodes = [layer0, layer1, layer2, layer3, fretboard]

    # 1.1 Create Losses
    mse = optimus.MeanSquaredError(name="mean_squared_error")

    # 2. Define Edges
    trainer_edges = optimus.ConnectionManager([
        (input_data, layer0.input), (layer0.output, layer1.input),
        (layer1.output, layer2.input), (layer2.output, layer3.input),
        (layer3.output, fretboard.input), (fretboard.output, mse.prediction),
        (fret_bitmap, mse.target)
    ])

    update_manager = optimus.ConnectionManager([
        (learning_rate, layer0.weights), (learning_rate, layer0.bias),
        (learning_rate, layer1.weights), (learning_rate, layer1.bias),
        (learning_rate, layer2.weights), (learning_rate, layer2.bias),
        (learning_rate, layer3.weights), (learning_rate, layer3.bias),
        (learning_rate, fretboard.weights), (learning_rate, fretboard.bias)
    ])

    trainer = optimus.Graph(name=GRAPH_NAME,
                            inputs=[input_data, fret_bitmap, learning_rate],
                            nodes=all_nodes,
                            connections=trainer_edges.connections,
                            outputs=[optimus.Graph.TOTAL_LOSS],
                            losses=[mse],
                            updates=update_manager.connections)

    optimus.random_init(fretboard.weights)

    validator = optimus.Graph(name=GRAPH_NAME,
                              inputs=[input_data, fret_bitmap],
                              nodes=all_nodes,
                              connections=trainer_edges.connections,
                              outputs=[optimus.Graph.TOTAL_LOSS],
                              losses=[mse])

    posterior = optimus.Output(name='posterior')

    predictor_edges = optimus.ConnectionManager([
        (input_data, layer0.input), (layer0.output, layer1.input),
        (layer1.output, layer2.input), (layer2.output, layer3.input),
        (layer3.output, fretboard.input), (fretboard.output, posterior)
    ])

    predictor = optimus.Graph(name=GRAPH_NAME,
                              inputs=[input_data],
                              nodes=all_nodes,
                              connections=predictor_edges.connections,
                              outputs=[posterior])

    # 3. Create Data
    source = optimus.Queue(optimus.File(args.training_file),
                           transformers=[
                               T.cqt_sample(input_data.shape[2]),
                               T.pitch_shift(MAX_FRETS, bins_per_pitch=3),
                               T.fret_indexes_to_bitmap(FRET_DIM)
                           ],
                           **SOURCE_ARGS)

    driver = optimus.Driver(graph=trainer,
                            name=args.trial_name,
                            output_directory=args.model_directory)

    hyperparams = {learning_rate.name: LEARNING_RATE}

    driver.fit(source, hyperparams=hyperparams, **DRIVER_ARGS)

    validator_file = path.join(driver.output_directory, args.validator_file)
    optimus.save(validator, def_file=validator_file)

    predictor_file = path.join(driver.output_directory, args.predictor_file)
    optimus.save(predictor, def_file=predictor_file)
예제 #27
0
def main(args):
    # 1.1 Create Inputs
    input_data = optimus.Input(
        name='cqt',
        shape=(None, OCTAVE_DIM, TIME_DIM, PITCH_DIM))

    chord_idx = optimus.Input(
        name='chord_idx',
        shape=(None,),
        dtype='int32')

    learning_rate = optimus.Input(
        name='learning_rate',
        shape=None)

    margin = optimus.Input(
        name='margin',
        shape=None)

    # 1.2 Create Nodes
    layer0 = optimus.Conv3D(
        name='layer0',
        input_shape=input_data.shape,
        weight_shape=(32, None, 5, 5),
        pool_shape=(2, 3),
        act_type='relu')

    layer1 = optimus.Conv3D(
        name='layer1',
        input_shape=layer0.output.shape,
        weight_shape=(64, None, 5, 7),
        act_type='relu')

    layer2 = optimus.Conv3D(
        name='layer2',
        input_shape=layer1.output.shape,
        weight_shape=(128, None, 3, 6),
        act_type='relu')

    layer3 = optimus.Affine(
        name='layer3',
        input_shape=layer2.output.shape,
        output_shape=(None, 1024,),
        act_type='relu')

    chord_estimator = optimus.Softmax(
        name='chord_estimator',
        input_shape=layer3.output.shape,
        output_shape=(None, VOCAB,),
        act_type='sigmoid')

    all_nodes = [layer0, layer1, layer2, layer3, chord_classifier]

    log = optimus.Log(name='log')
    neg = optimus.Gain(name='gain')
    neg.weight = np.array(-1)

    energy = optimus.SelectIndex(name='selector')

    loss = optimus.Mean(name='total_loss')

    # 1.1 Create Losses
    chord_margin = optimus.Margin(
        name="chord_margin",
        mode='max')

    # 2. Define Edges
    trainer_edges = optimus.ConnectionManager([
        (input_data, layer0.input),
        (layer0.output, layer1.input),
        (layer1.output, layer2.input),
        (layer2.output, layer3.input),
        (layer3.output, chord_estimator.input),
        (chord_estimator.output, log.input),
        (log.output, neg.input),
        (neg.output, energy.input),
        (chord_idx, energy.index),
        (energy.output, loss.input)])

    update_manager = optimus.ConnectionManager([
        (learning_rate, layer0.weights),
        (learning_rate, layer0.bias),
        (learning_rate, layer1.weights),
        (learning_rate, layer1.bias),
        (learning_rate, layer2.weights),
        (learning_rate, layer2.bias),
        (learning_rate, layer3.weights),
        (learning_rate, layer3.bias),
        (learning_rate, chord_estimator.weights),
        (learning_rate, chord_estimator.bias)])

    trainer = optimus.Graph(
        name=GRAPH_NAME,
        inputs=[input_data, chord_idx, learning_rate, margin],
        nodes=all_nodes,
        connections=trainer_edges.connections,
        outputs=[optimus.Graph.TOTAL_LOSS],
        loss=[loss.output],
        updates=update_manager.connections)

    for n in all_nodes:
        optimus.random_init(n.weights)
        optimus.random_init(n.bias)

    if args.init_param_file:
        print "Loading parameters: %s" % args.init_param_file
        trainer.load_param_values(args.init_param_file)

    for n in all_nodes[-2:]:
        optimus.random_init(n.weights)
        optimus.random_init(n.bias)

    posterior = optimus.Output(
        name='posterior')

    predictor_edges = optimus.ConnectionManager([
        (input_data, layer0.input),
        (layer0.output, layer1.input),
        (layer1.output, layer2.input),
        (layer2.output, layer3.input),
        (layer3.output, chord_classifier.input),
        (chord_classifier.output, posterior)])

    predictor = optimus.Graph(
        name=GRAPH_NAME,
        inputs=[input_data],
        nodes=all_nodes,
        connections=predictor_edges.connections,
        outputs=[posterior])

    # 3. Create Data
    print "Loading %s" % args.training_file
    stash = biggie.Stash(args.training_file)
    stream = D.create_stash_stream(
        stash, TIME_DIM, pitch_shift=0, vocab_dim=VOCAB, pool_size=25)

    if args.secondary_source:
        stash2 = biggie.Stash(args.secondary_source)
        stream2 = D.create_uniform_chord_stream(
            stash2, TIME_DIM, pitch_shift=0, vocab_dim=VOCAB, working_size=5)
        stream = S.mux([stream, stream2], [0.5, 0.5])

    stream = S.minibatch(stream, batch_size=BATCH_SIZE)

    print "Starting '%s'" % args.trial_name
    driver = optimus.Driver(
        graph=trainer,
        name=args.trial_name,
        output_directory=args.model_directory)

    hyperparams = {learning_rate.name: LEARNING_RATE,
                   margin.name: MARGIN}

    predictor_file = path.join(driver.output_directory, args.predictor_file)
    optimus.save(predictor, def_file=predictor_file)

    driver.fit(stream, hyperparams=hyperparams, **DRIVER_ARGS)