Ejemplo n.º 1
0
    def _setup_test_model(self):
        inputs = input_variable(shape=(1, ), dtype=np.float32)
        outputs = input_variable(shape=(1, ), dtype=np.float32)

        q = Dense(1, activation=None)(inputs)
        loss = squared_error(q, outputs)

        return {'inputs': inputs, 'outputs': outputs, 'f': q, 'loss': loss}
Ejemplo n.º 2
0
    def _setup_test_model(self):
        inputs = input_variable(shape=(1,), dtype=np.float32)
        outputs = input_variable(shape=(1,), dtype=np.float32)

        q = Dense(1, activation=None)(inputs)
        loss = squared_error(q, outputs)

        return {
            'inputs': inputs,
            'outputs': outputs,
            'f': q,
            'loss': loss
        }
Ejemplo n.º 3
0
    label = input((output_dim), np.float32)

    """
    Create model, reader and map
    """
    netout = create_model(input_dim, output_dim, hidden_dim, feature)
    training_reader = create_reader(data_file_path, True, input_dim, output_dim)
    input_map = {
    label  : training_reader.streams.labels,
    feature  : training_reader.streams.features
    }
    
    """
    Set loss and evaluation functions
    """
    loss = squared_error(netout, label)    
    evaluation = squared_error(netout, label)
    lr_per_minibatch=learning_rate_schedule(learning_rate, UnitType.minibatch)

    """
    Instantiate the trainer object to drive the model training
    See: https://www.cntk.ai/pythondocs/cntk.learners.html
    """
    learner = sgd(netout.parameters, lr=lr_per_minibatch)    

    # Other learners to try
    #learner = momentum_sgd(netout.parameters, lr=lr_per_minibatch, momentum = momentum_schedule(0.9))
    #learner = adagrad(netout.parameters, lr=lr_per_minibatch) 

    progress_printer = ProgressPrinter(minibatch_size)
    
        print(str_out)
        assert False

    results = re.findall("Completed successfully.", str_out)
    if len(results) != 2:
        print(str_out)
        assert False

if __name__=='__main__':
    in1 = C.input_variable(shape=1)
    labels = C.input_variable(shape=1)
    p1 = parameter(shape=1)
    p2 = parameter(shape=1)
    n = plus(in1, p1, name='n')
    z = plus(n, p2, name='z')
    ce = squared_error(z, labels)

    momentum_schedule = C.momentum_schedule_per_sample(0.9990913221888589)
    lr_per_sample = C.learning_parameter_schedule_per_sample(0.007)
    dist_learners = [
        C.distributed.data_parallel_distributed_learner(C.momentum_sgd([p1], lr_per_sample, momentum_schedule, True)),
        C.distributed.data_parallel_distributed_learner(C.momentum_sgd([p2], lr_per_sample, momentum_schedule, True))
    ]

    trainer = C.Trainer(z, ce, dist_learners)
    in1_value = [[1]]
    label_value = [[0]]
    arguments = {in1: in1_value, labels: label_value}
    z_output = z.output

    def check_samples(learners, expected_number_of_samples):
Ejemplo n.º 5
0
        assert False

    results = re.findall("Completed successfully.", str_out)
    if len(results) != 2:
        print(str_out)
        assert False


if __name__ == '__main__':
    in1 = C.input_variable(shape=1)
    labels = C.input_variable(shape=1)
    p1 = parameter(shape=1)
    p2 = parameter(shape=1)
    n = plus(in1, p1, name='n')
    z = plus(n, p2, name='z')
    ce = squared_error(z, labels)

    momentum_schedule = C.momentum_schedule_per_sample(0.9990913221888589)
    lr_per_sample = C.learning_parameter_schedule_per_sample(0.007)
    learner1 = C.distributed.data_parallel_distributed_learner(
        C.momentum_sgd([p1], lr_per_sample, momentum_schedule, True))
    learner1.set_as_metric_aggregator()
    dist_learners = [
        learner1,
        C.distributed.data_parallel_distributed_learner(
            C.momentum_sgd([p2], lr_per_sample, momentum_schedule, True))
    ]

    trainer = C.Trainer(z, ce, dist_learners)
    in1_value = [[1]]
    label_value = [[0]]