def __init__(self):

        #### Construct the model ####
        observation = cntk.ops.input_variable(STATE_DIM, np.float32, name="s")
        q_target = cntk.ops.input_variable(NUM_ACTIONS, np.float32, name="q")

        # Define the structure of the neural network
        self.model = self.create_multi_layer_neural_network(
            observation, NUM_ACTIONS, 2)

        #### Define the trainer ####
        self.learning_rate = cntk.learner.training_parameter_schedule(
            0.0001, cntk.UnitType.sample)
        self.momentum = cntk.learner.momentum_as_time_constant_schedule(0.99)

        self.loss = cntk.ops.reduce_mean(cntk.ops.square(self.model -
                                                         q_target),
                                         axis=0)
        mean_error = cntk.ops.reduce_mean(cntk.ops.square(self.model -
                                                          q_target),
                                          axis=0)

        learner = cntk.adam_sgd(self.model.parameters,
                                self.learning_rate,
                                momentum=self.momentum)
        self.trainer = cntk.Trainer(self.model, self.loss, mean_error, learner)
Пример #2
0
 def __call__(self, parameters, opt_learning_rate=0.001, **kwargs):
     lr_per_minibatch = cntk.learning_rate_schedule(
         lr=opt_learning_rate, unit=cntk.UnitType.minibatch)
     momentum = cntk.momentum_schedule(momentum=0.99)
     return cntk.adam_sgd(parameters=parameters,
                          lr=lr_per_minibatch,
                          momentum=momentum)
Пример #3
0
def test_htk_deserializers():
    mbsize = 640
    epoch_size = 1000 * mbsize
    lr = [0.001]

    feature_dim = 33
    num_classes = 132
    context = 2

    os.chdir(data_path)

    features_file = "glob_0000.scp"
    labels_file = "glob_0000.mlf"
    label_mapping_file = "state.list"

    fd = HTKFeatureDeserializer(
        StreamDefs(amazing_features=StreamDef(
            shape=feature_dim, context=(context, context), scp=features_file)))

    ld = HTKMLFDeserializer(
        label_mapping_file,
        StreamDefs(
            awesome_labels=StreamDef(shape=num_classes, mlf=labels_file)))

    reader = MinibatchSource([fd, ld])

    features = C.input_variable(((2 * context + 1) * feature_dim))
    labels = C.input_variable((num_classes))

    model = Sequential(
        [For(range(3), lambda: Recurrence(LSTM(256))),
         Dense(num_classes)])
    z = model(features)
    ce = C.cross_entropy_with_softmax(z, labels)
    errs = C.classification_error(z, labels)

    learner = C.adam_sgd(z.parameters,
                         lr=C.learning_rate_schedule(lr, C.UnitType.sample,
                                                     epoch_size),
                         momentum=C.momentum_as_time_constant_schedule(1000),
                         low_memory=True,
                         gradient_clipping_threshold_per_sample=15,
                         gradient_clipping_with_truncation=True)
    trainer = C.Trainer(z, (ce, errs), learner)

    input_map = {
        features: reader.streams.amazing_features,
        labels: reader.streams.awesome_labels
    }

    pp = C.ProgressPrinter(freq=0)
    # just run and verify it doesn't crash
    for i in range(3):
        mb_data = reader.next_minibatch(mbsize, input_map=input_map)
        trainer.train_minibatch(mb_data)
        pp.update_with_trainer(trainer, with_metric=True)
    assert True
    os.chdir(abs_path)
Пример #4
0
def test_htk_deserializers():
    mbsize = 640
    epoch_size = 1000 * mbsize
    lr = [0.001]

    feature_dim = 33
    num_classes = 132
    context = 2

    os.chdir(data_path)

    features_file = "glob_0000.scp"
    labels_file = "glob_0000.mlf"
    label_mapping_file = "state.list"

    fd = HTKFeatureDeserializer(StreamDefs(
        amazing_features = StreamDef(shape=feature_dim, context=(context,context), scp=features_file)))

    ld = HTKMLFDeserializer(label_mapping_file, StreamDefs(
        awesome_labels = StreamDef(shape=num_classes, mlf=labels_file)))

    reader = MinibatchSource([fd,ld])

    features = C.input_variable(((2*context+1)*feature_dim))
    labels = C.input_variable((num_classes))

    model = Sequential([For(range(3), lambda : Recurrence(LSTM(256))),
                        Dense(num_classes)])
    z = model(features)
    ce = C.cross_entropy_with_softmax(z, labels)
    errs = C.classification_error    (z, labels)

    learner = C.adam_sgd(z.parameters,
                    lr=C.learning_rate_schedule(lr, C.UnitType.sample, epoch_size),
                    momentum=C.momentum_as_time_constant_schedule(1000),
                    low_memory=True,
                    gradient_clipping_threshold_per_sample=15, gradient_clipping_with_truncation=True)
    trainer = C.Trainer(z, (ce, errs), learner)

    input_map={ features: reader.streams.amazing_features, labels: reader.streams.awesome_labels }

    pp = C.ProgressPrinter(freq=0)
    # just run and verify it doesn't crash
    for i in range(3):
        mb_data = reader.next_minibatch(mbsize, input_map=input_map)
        trainer.train_minibatch(mb_data)
        pp.update_with_trainer(trainer, with_metric=True)
    assert True
    os.chdir(abs_path)
    def __init__(self):

        #### Construct the model ####
        observation = cntk.ops.input_variable(STATE_DIM, np.float32, name="s")
        q_target = cntk.ops.input_variable(NUM_ACTIONS, np.float32, name="q")

        # Define the structure of the neural network
        self.model = self.create_multi_layer_neural_network(observation, NUM_ACTIONS, 3)

        #### Define the trainer ####
        self.learning_rate = 0.00025

        self.loss =  cntk.ops.reduce_mean(cntk.ops.square(self.model - q_target), axis=0)
        mean_error = cntk.ops.reduce_mean(cntk.ops.square(self.model - q_target), axis=0)

        learner = cntk.adam_sgd(self.model.parameters, self.learning_rate/self.BATCH_SIZE, momentum=0.9)
        self.trainer = cntk.Trainer(self.model, self.loss, mean_error, learner)
Пример #6
0
# Define the label as the other input parameter of the trainer
labels = cntk.ops.input_variable(output_dim, np.float32)

# Initialize the parameters for the trainer
train_minibatch_size = 50
learning_rate = cntk.learner.training_parameter_schedule(1e-4, cntk.UnitType.sample)
momentum = cntk.learner.momentum_as_time_constant_schedule(0.99)

# Define the loss function
loss = cntk.ops.cross_entropy_with_softmax(output, labels)

# Define the function that calculates classification error
label_error = cntk.ops.classification_error(output, labels)

# Instantiate the trainer object to drive the model training
learner = cntk.adam_sgd(output.parameters, learning_rate, momentum)
trainer = cntk.Trainer(output, loss, label_error, [learner])


'''
-----------------------------------------
Training the Convolutional Neural Network
-----------------------------------------
'''
num_training_epoch = 20
training_progress_output_freq = 100
training_start_time = perf_counter()

for epoch in range(num_training_epoch):

    sample_count = 0