def test_cifar_resnet_error(device_id): target_device = DeviceDescriptor.gpu_device(0) DeviceDescriptor.set_default_device(target_device) try: base_path = os.path.join( os.environ['CNTK_EXTERNAL_TESTDATA_SOURCE_DIRECTORY'], *"Image/CIFAR/v0/cifar-10-batches-py".split("/")) except KeyError: base_path = os.path.join( * "../../../../Examples/Image/Miscellaneous/CIFAR-10/cifar-10-batches-py" .split("/")) base_path = os.path.normpath(base_path) os.chdir(os.path.join(base_path, '..')) test_error = cifar_resnet(base_path) expected_test_error = 0.7 assert np.allclose(test_error, expected_test_error, atol=TOLERANCE_ABSOLUTE)
pe = classification_error(classifier_output, label_var) # Instantiate the trainer object to drive the model training lr = learning_rates_per_sample(0.0078125) trainer = Trainer(classifier_output, ce, pe, [sgd_learner(classifier_output.owner.parameters(), lr)]) # Get minibatches of images to train with and perform model training mb_size = 32 training_progress_output_freq = 20 num_mbs = 1000 for i in range(0, num_mbs): mb = minibatch_source.get_next_minibatch(mb_size) # Specify the mapping of input variables in the model to actual minibatch data to be trained with arguments = { image_input: mb[features_si].m_data, label_var: mb[labels_si].m_data } trainer.train_minibatch(arguments) print_training_progress(trainer, i, training_progress_output_freq) if __name__ == '__main__': # Specify the target device to be used for computing target_device = DeviceDescriptor.gpu_device(0) DeviceDescriptor.set_default_device(target_device) cifar_resnet()
# Instantiate the resnet classification model classifier_output = resnet_classifer(image_input, num_classes) ce = cross_entropy_with_softmax(classifier_output, label_var) pe = classification_error(classifier_output, label_var) # Instantiate the trainer object to drive the model training lr = learning_rates_per_sample(0.0078125) trainer = Trainer(classifier_output, ce, pe, [sgd_learner(classifier_output.owner.parameters(), lr)]) # Get minibatches of images to train with and perform model training mb_size = 32 training_progress_output_freq = 20 num_mbs = 1000 for i in range(0, num_mbs): mb=minibatch_source.get_next_minibatch(mb_size) # Specify the mapping of input variables in the model to actual minibatch data to be trained with arguments = {image_input : mb[features_si].m_data, label_var : mb[labels_si].m_data} trainer.train_minibatch(arguments) print_training_progress(trainer, i, training_progress_output_freq) if __name__=='__main__': # Specify the target device to be used for computing target_device = DeviceDescriptor.gpu_device(0) DeviceDescriptor.set_default_device(target_device) cifar_resnet()