Exemplo n.º 1
0
def seqcla():

    # LSTM params
    input_dim = 50
    output_dim = 128
    cell_dim = 128
    
    # model
    num_labels = 5
    vocab = 2000
    embed_dim = 50    

    t = C.dynamic_axis(name='t')
    # temporarily using cntk1 SparseInput because cntk2's Input() will simply allow sparse as a parameter
    features = cntk1.SparseInput(vocab, dynamicAxis=t, name='features')    
    labels = C.input(num_labels, name='labels')
   
    train_reader = C.CNTKTextFormatReader(train_file)

    # setup embedding matrix
    embedding = C.parameter((embed_dim, vocab), learning_rate_multiplier=0.0, 
                             init_from_file_path=embedding_file)

    # get the vector representing the word
    sequence = C.times(embedding, features, name='sequence')
    
    # add an LSTM layer
    L = lstm_layer(output_dim, cell_dim, sequence, input_dim)
    
    # add a softmax layer on top
    w = C.parameter((num_labels, output_dim), name='w')
    b = C.parameter((num_labels), name='b')
    z = C.plus(C.times(w, L), b, name='z')
    z.tag = "output"
    
    # and reconcile the shared dynamic axis
    pred = C.reconcile_dynamic_axis(z, labels, name='pred')    
    
    ce = C.cross_entropy_with_softmax(labels, pred)
    ce.tag = "criterion"
    
    my_sgd = C.SGDParams(epoch_size=0, minibatch_size=10, learning_rates_per_mb=0.1, max_epochs=3)    
    
    with C.LocalExecutionContext('seqcla') as ctx:
        # train the model
        ctx.train(root_nodes=[ce], training_params=my_sgd, input_map=train_reader.map(
                  features, alias='x', dim=vocab, format='Sparse').map(
                  labels, alias='y', dim=num_labels, format='Dense'))        
        
        # write out the predictions
        ctx.write(input_map=train_reader.map(
                  features, alias='x', dim=vocab, format='Sparse').map(
                  labels, alias='y', dim=num_labels, format='Dense'))
                  
        # do some manual accuracy testing
        acc = calc_accuracy(train_file, ctx.output_filename_base)
        
        # and test for the same number...
        TOLERANCE_ABSOLUTE = 1E-02
        assert np.allclose(acc, 0.6006415396952687, atol=TOLERANCE_ABSOLUTE)
Exemplo n.º 2
0
def train_eval_logistic_regression_from_file(criterion_name=None,
                                             eval_name=None,
                                             device_id=-1):
    cur_dir = os.path.dirname(__file__)

    # Using data from https://github.com/Microsoft/CNTK/wiki/Tutorial
    train_file = os.path.join(cur_dir, "Train-3Classes.txt")
    test_file = os.path.join(cur_dir, "Test-3Classes.txt")

    X = C.input(2)
    y = C.input(3)

    W = C.parameter(value=np.zeros(shape=(3, 2)))
    b = C.parameter(value=np.zeros(shape=(3, 1)))

    out = C.times(W, X) + b
    out.tag = 'output'
    ce = C.cross_entropy_with_softmax(y, out)
    ce.name = criterion_name
    ce.tag = 'criterion'
    eval = C.ops.square_error(y, out)
    eval.tag = 'eval'
    eval.name = eval_name

    # training data readers
    train_reader = C.CNTKTextFormatReader(train_file, randomize=None)

    # testing data readers
    test_reader = C.CNTKTextFormatReader(test_file, randomize=None)

    my_sgd = C.SGDParams(epoch_size=0,
                         minibatch_size=25,
                         learning_rates_per_mb=0.1,
                         max_epochs=3)

    with C.LocalExecutionContext('logreg') as ctx:
        ctx.device_id = device_id

        ctx.train(root_nodes=[ce, eval],
                  training_params=my_sgd,
                  input_map=train_reader.map(X, alias='I',
                                             dim=2).map(y, alias='L', dim=3))

        result = ctx.test(root_nodes=[ce, eval],
                          input_map=test_reader.map(X, alias='I',
                                                    dim=2).map(y,
                                                               alias='L',
                                                               dim=3))

        return result
Exemplo n.º 3
0
def train_eval_logistic_regression_with_numpy(criterion_name=None,
                                              eval_name=None,
                                              device_id=-1):

    # for repro and tests :-)
    np.random.seed(1)

    train_X, train_y = synthetic_data(train_N, feature_dim, num_classes)
    test_X, test_y = synthetic_data(test_N, feature_dim, num_classes)

    # Set up the training data for CNTK. Before writing the CNTK configuration,
    # the data will be attached to X.reader.batch and y.reader.batch and then
    # serialized.
    X = C.input_numpy(train_X)
    y = C.input_numpy(train_y)

    # define our network -- one weight tensor and a bias
    W = C.parameter(value=np.zeros(shape=(num_classes, feature_dim)))
    b = C.parameter(value=np.zeros(shape=(num_classes, 1)))
    out = C.times(W, X) + b

    ce = C.cross_entropy_with_softmax(y, out)
    ce.tag = 'criterion'
    ce.name = criterion_name

    eval = C.ops.cntk1.SquareError(y, out)
    eval.tag = 'eval'
    eval.name = eval_name

    my_sgd = C.SGDParams(epoch_size=0,
                         minibatch_size=25,
                         learning_rates_per_mb=0.1,
                         max_epochs=3)

    with C.LocalExecutionContext('logreg_numpy',
                                 device_id=device_id,
                                 clean_up=True) as ctx:
        ctx.train(root_nodes=[ce, eval], training_params=my_sgd)

        # For testing, we attach the test data to the input nodes.
        X.reader.batch, y.reader.batch = test_X, test_y
        result = ctx.test(root_nodes=[ce, eval])
        return result
Exemplo n.º 4
0
def train_eval_mnist_onelayer_from_file(criterion_name=None, eval_name=None):

    # Network definition
    feat_dim = 784
    label_dim = 10
    hidden_dim = 200

    cur_dir = os.path.dirname(__file__)

    training_filename = os.path.join(cur_dir, "Data", "Train-28x28_text.txt")
    test_filename = os.path.join(cur_dir, "Data", "Test-28x28_text.txt")

    features = C.input(feat_dim)
    features.name = 'features'

    feat_scale = C.constant(0.00390625)
    feats_scaled = C.element_times(features, feat_scale)

    labels = C.input(label_dim)
    labels.tag = 'label'
    labels.name = 'labels'

    traning_reader = C.CNTKTextFormatReader(training_filename)
    test_reader = C.CNTKTextFormatReader(test_filename)

    h1 = add_dnn_sigmoid_layer(feat_dim, hidden_dim, feats_scaled, 1)
    out = add_dnn_layer(hidden_dim, label_dim, h1, 1)
    out.tag = 'output'

    ec = C.cross_entropy_with_softmax(labels, out)
    ec.name = criterion_name
    ec.tag = 'criterion'

    eval = C.ops.square_error(labels, out)
    eval.name = eval_name
    eval.tag = 'eval'

    # Specify the training parameters (settings are scaled down)
    my_sgd = C.SGDParams(epoch_size=600,
                         minibatch_size=32,
                         learning_rates_per_mb=0.1,
                         max_epochs=5,
                         momentum_per_mb=0)

    # Create a context or re-use if already there
    with C.LocalExecutionContext('mnist_one_layer', clean_up=True) as ctx:
        # CNTK actions
        ctx.train(root_nodes=[ec, eval],
                  training_params=my_sgd,
                  input_map=traning_reader.map(labels,
                                               alias='labels',
                                               dim=label_dim).map(
                                                   features,
                                                   alias='features',
                                                   dim=feat_dim))

        result = ctx.test(root_nodes=[ec, eval],
                          input_map=test_reader.map(labels,
                                                    alias='labels',
                                                    dim=label_dim).map(
                                                        features,
                                                        alias='features',
                                                        dim=feat_dim))

        return result