def test_load_save_constant(tmpdir): c = constant(value=[1, 3]) root_node = c * 5 result = root_node.eval() expected = [[[[5, 15]]]] assert np.allclose(result, expected) filename = str(tmpdir / 'c_plus_c.mod') save_model(root_node, filename) loaded_node = load_model(filename) loaded_result = loaded_node.eval() assert np.allclose(loaded_result, expected)
def test_load_save_unique_input(tmpdir): i1 = input_variable((1, 2), name='i1') root_node = softmax(i1) input1 = [[[1, 2]]] result = root_node.eval(input1) expected = [[[[0.268941, 0.731059]]]] assert np.allclose(result, expected) filename = str(tmpdir / 'i_plus_0.mod') save_model(root_node, filename) loaded_node = load_model(filename) # Test specifying the only value for an unique input loaded_result = loaded_node.eval(input1) assert np.allclose(loaded_result, expected)
def test_load_save_input(tmpdir): i1 = input_variable((1, 2), name='i1') root_node = abs(i1) input1 = [[[-1, 2]]] result = root_node.eval({i1: input1}) expected = [[[[1, 2]]]] assert np.allclose(result, expected) filename = str(tmpdir / 'i_plus_c_0.mod') save_model(root_node, filename) loaded_node = load_model(filename) # Test spefying the input node names by order loaded_result = loaded_node.eval([input1]) assert np.allclose(loaded_result, expected)
def test_load_save_inputs(tmpdir): i1 = input_variable((1, 2), name='i1') i2 = input_variable((2, 1), name='i2') root_node = plus(i1, i2) input1 = [[[1, 2]]] input2 = [[[[1], [2]]]] result = root_node.eval({i1: input1, i2: input2}) expected = [[[[2, 3], [3, 4]]]] assert np.allclose(result, expected) filename = str(tmpdir / 'i_plus_i_0.mod') save_model(root_node, filename) loaded_node = load_model(filename) # Test specifying the input nodes by name loaded_result = loaded_node.eval({'i1': input1, 'i2': input2}) assert np.allclose(loaded_result, expected)
def cifar_resnet_distributed(data_path, run_test, num_epochs, communicator=None, save_model_filename=None, load_model_filename=None, debug_output=False): image_height = 32 image_width = 32 num_channels = 3 num_classes = 10 feats_stream_name = 'features' labels_stream_name = 'labels' minibatch_source = create_reader(os.path.join(data_path, 'train_map.txt'), os.path.join(data_path, 'CIFAR-10_mean.xml'), True, distributed_communicator=communicator) features_si = minibatch_source[feats_stream_name] labels_si = minibatch_source[labels_stream_name] # Instantiate the resnet classification model, or load from file if load_model_filename: print("Loading model:", load_model_filename) classifier_output = persist.load_model(load_model_filename) image_input = classifier_output.arguments[0] else: image_input = input_variable((num_channels, image_height, image_width), features_si.m_element_type) classifier_output = create_resnet_model(image_input, num_classes) # Input variables denoting the features and label data label_var = input_variable((num_classes), features_si.m_element_type) ce = cross_entropy_with_softmax(classifier_output, label_var) pe = classification_error(classifier_output, label_var) # Instantiate the trainer object to drive the model training mb_size = 128 num_mb_per_epoch = 100 num_mbs = num_mb_per_epoch * num_epochs lr_schedule = [1.0 / mb_size] * 80 + [0.1 / mb_size] * 40 + [ 0.01 / mb_size ] lr_per_minibatch = learning_rate_schedule(lr_schedule, UnitType.minibatch, mb_size * num_mb_per_epoch) momentum_time_constant = momentum_as_time_constant_schedule(-mb_size / np.log(0.9)) # create data parallel distributed trainer if needed dist_trainer = distributed.data_parallel_distributed_trainer( communicator, False) if communicator else None # Instantiate the trainer object to drive the model training trainer = Trainer(classifier_output, ce, pe, [ momentum_sgd(classifier_output.parameters, lr=lr_per_minibatch, momentum=momentum_time_constant, l2_regularization_weight=0.0001) ], distributed_trainer=dist_trainer) # Get minibatches of images to train with and perform model training training_progress_output_freq = 100 if communicator else 20 if debug_output: training_progress_output_freq = training_progress_output_freq / 4 for i in range(0, num_mbs): # NOTE: depends on network, the mb_size can be changed dynamically here mb = minibatch_source.next_minibatch(mb_size) # Specify the mapping of input variables in the model to actual # minibatch data to be trained with arguments = {image_input: mb[features_si], label_var: mb[labels_si]} trainer.train_minibatch(arguments) print_training_progress(trainer, i, training_progress_output_freq) if save_model_filename: print("Saving model:", save_model_filename) persist.save_model(classifier_output, save_model_filename) if run_test: test_minibatch_source = create_reader( os.path.join(data_path, 'test_map.txt'), os.path.join(data_path, 'CIFAR-10_mean.xml'), False) features_si = test_minibatch_source[feats_stream_name] labels_si = test_minibatch_source[labels_stream_name] mb_size = 128 num_mbs = 100 total_error = 0.0 for i in range(0, num_mbs): mb = test_minibatch_source.next_minibatch(mb_size) # Specify the mapping of input variables in the model to actual # minibatch data to be trained with arguments = { image_input: mb[features_si], label_var: mb[labels_si] } error = trainer.test_minibatch(arguments) total_error += error return total_error / num_mbs else: return 0
def convnet_cifar10_dataaug(reader_train, reader_test, distributed_trainer, max_epochs=80): set_computation_network_trace_level(0) # Input variables denoting the features and label data input_var = input_variable((num_channels, image_height, image_width)) label_var = input_variable((num_classes)) # apply model to input scaled_input = element_times(constant(0.00390625), input_var) with default_options(activation=relu, pad=True): z = Sequential([ LayerStack( 2, lambda: [ Convolution((3, 3), 64), Convolution((3, 3), 64), MaxPooling((3, 3), (2, 2)) ]), LayerStack(2, lambda i: [Dense([256, 128][i]), Dropout(0.5)]), Dense(num_classes, activation=None) ])(scaled_input) # loss and metric ce = cross_entropy_with_softmax(z, label_var) pe = classification_error(z, label_var) # training config epoch_size = 50000 # for now we manually specify epoch size minibatch_size = 64 # Set learning parameters lr_per_sample = [0.0015625] * 20 + [0.00046875] * 20 + [ 0.00015625 ] * 20 + [0.000046875] * 10 + [0.000015625] lr_schedule = learning_rate_schedule(lr_per_sample, unit=UnitType.sample, epoch_size=epoch_size) momentum_time_constant = [0] * 20 + [600] * 20 + [1200] mm_schedule = momentum_as_time_constant_schedule(momentum_time_constant, epoch_size=epoch_size) l2_reg_weight = 0.002 # trainer object learner = momentum_sgd(z.parameters, lr_schedule, mm_schedule, l2_regularization_weight=l2_reg_weight) trainer = Trainer(z, ce, pe, learner, distributed_trainer) # define mapping from reader streams to network inputs input_map = { input_var: reader_train.streams.features, label_var: reader_train.streams.labels } log_number_of_parameters(z) print() progress_printer = ProgressPrinter(tag='Training') # perform model training for epoch in range(max_epochs): # loop over epochs sample_count = 0 while sample_count < epoch_size: # loop over minibatches in the epoch data = reader_train.next_minibatch( min(minibatch_size, epoch_size - sample_count), input_map=input_map) # fetch minibatch. trainer.train_minibatch(data) # update model with it sample_count += trainer.previous_minibatch_sample_count # count samples processed so far progress_printer.update_with_trainer( trainer, with_metric=True) # log progress progress_printer.epoch_summary(with_metric=True) if distributed_trainer.communicator().current_worker( ).global_rank == 0: persist.save_model( z, os.path.join(model_path, "ConvNet_CIFAR10_DataAug_{}.dnn".format(epoch))) ### Evaluation action epoch_size = 10000 minibatch_size = 16 # process minibatches and evaluate the model metric_numer = 0 metric_denom = 0 sample_count = 0 minibatch_index = 0 while sample_count < epoch_size: current_minibatch = min(minibatch_size, epoch_size - sample_count) # Fetch next test min batch. data = reader_test.next_minibatch(current_minibatch, input_map=input_map) # minibatch data to be trained with metric_numer += trainer.test_minibatch(data) * current_minibatch metric_denom += current_minibatch # Keep track of the number of samples processed so far. sample_count += trainer.previous_minibatch_sample_count minibatch_index += 1 print("") print("Final Results: Minibatch[1-{}]: errs = {:0.2f}% * {}".format( minibatch_index + 1, (metric_numer * 100.0) / metric_denom, metric_denom)) print("") return metric_numer / metric_denom
def convnet_cifar10(debug_output=False): set_computation_network_trace_level(0) image_height = 32 image_width = 32 num_channels = 3 input_dim = image_height * image_width * num_channels num_output_classes = 10 # Input variables denoting the features and label data input_var = input_variable((num_channels, image_height, image_width), np.float32) label_var = input_variable(num_output_classes, np.float32) # Instantiate the feedforward classification model input_removemean = minus(input_var, constant(128)) scaled_input = element_times(constant(0.00390625), input_removemean) with default_options(activation=relu, pad=True): z = Sequential([ LayerStack( 2, lambda: [ Convolution((3, 3), 64), Convolution((3, 3), 64), MaxPooling((3, 3), (2, 2)) ]), LayerStack(2, lambda i: [Dense([256, 128][i]), Dropout(0.5)]), Dense(num_output_classes, activation=None) ])(scaled_input) ce = cross_entropy_with_softmax(z, label_var) pe = classification_error(z, label_var) reader_train = create_reader( os.path.join(data_path, 'Train_cntk_text.txt'), True, input_dim, num_output_classes) # training config epoch_size = 50000 # for now we manually specify epoch size minibatch_size = 64 # Set learning parameters lr_per_sample = [0.0015625] * 10 + [0.00046875] * 10 + [0.00015625] lr_schedule = learning_rate_schedule(lr_per_sample, epoch_size=epoch_size, unit=UnitType.sample) momentum_time_constant = [0] * 20 + [-minibatch_size / np.log(0.9)] mm_schedule = momentum_as_time_constant_schedule(momentum_time_constant, epoch_size=epoch_size) l2_reg_weight = 0.002 # Instantiate the trainer object to drive the model training learner = momentum_sgd(z.parameters, lr_schedule, mm_schedule, l2_regularization_weight=l2_reg_weight) trainer = Trainer(z, ce, pe, learner) # define mapping from reader streams to network inputs input_map = { input_var: reader_train.streams.features, label_var: reader_train.streams.labels } log_number_of_parameters(z) print() progress_printer = ProgressPrinter(tag='Training') # Get minibatches of images to train with and perform model training max_epochs = 30 for epoch in range(max_epochs): # loop over epochs sample_count = 0 while sample_count < epoch_size: # loop over minibatches in the epoch data = reader_train.next_minibatch( min(minibatch_size, epoch_size - sample_count), input_map=input_map) # fetch minibatch. trainer.train_minibatch(data) # update model with it sample_count += trainer.previous_minibatch_sample_count # count samples processed so far progress_printer.update_with_trainer( trainer, with_metric=True) # log progress progress_printer.epoch_summary(with_metric=True) persist.save_model( z, os.path.join(model_path, "ConvNet_CIFAR10_{}.dnn".format(epoch))) # Load test data reader_test = create_reader(os.path.join(data_path, 'Test_cntk_text.txt'), False, input_dim, num_output_classes) input_map = { input_var: reader_test.streams.features, label_var: reader_test.streams.labels } # Test data for trained model epoch_size = 10000 minibatch_size = 16 # process minibatches and evaluate the model metric_numer = 0 metric_denom = 0 sample_count = 0 minibatch_index = 0 while sample_count < epoch_size: current_minibatch = min(minibatch_size, epoch_size - sample_count) # Fetch next test min batch. data = reader_test.next_minibatch(current_minibatch, input_map=input_map) # minibatch data to be trained with metric_numer += trainer.test_minibatch(data) * current_minibatch metric_denom += current_minibatch # Keep track of the number of samples processed so far. sample_count += data[label_var].num_samples minibatch_index += 1 print("") print("Final Results: Minibatch[1-{}]: errs = {:0.2f}% * {}".format( minibatch_index + 1, (metric_numer * 100.0) / metric_denom, metric_denom)) print("") return metric_numer / metric_denom
def convnet_mnist(debug_output=False): image_height = 28 image_width = 28 num_channels = 1 input_dim = image_height * image_width * num_channels num_output_classes = 10 # Input variables denoting the features and label data input_var = input_variable((num_channels, image_height, image_width), np.float32) label_var = input_variable(num_output_classes, np.float32) # Instantiate the feedforward classification model scaled_input = element_times(constant(0.00390625), input_var) with default_options(activation=relu, pad=False): conv1 = Convolution((5, 5), 32, pad=True)(scaled_input) pool1 = MaxPooling((3, 3), (2, 2))(conv1) conv2 = Convolution((3, 3), 48)(pool1) pool2 = MaxPooling((3, 3), (2, 2))(conv2) conv3 = Convolution((3, 3), 64)(pool2) f4 = Dense(96)(conv3) drop4 = Dropout(0.5)(f4) z = Dense(num_output_classes, activation=None)(drop4) ce = cross_entropy_with_softmax(z, label_var) pe = classification_error(z, label_var) reader_train = create_reader( os.path.join(data_path, 'Train-28x28_cntk_text.txt'), True, input_dim, num_output_classes) # training config epoch_size = 60000 # for now we manually specify epoch size minibatch_size = 128 # Set learning parameters lr_per_sample = [0.001] * 10 + [0.0005] * 10 + [0.0001] lr_schedule = learning_rate_schedule(lr_per_sample, UnitType.sample, epoch_size) momentum_time_constant = [0] * 5 + [1024] mn_schedule = momentum_schedule(momentum_time_constant, epoch_size) # Instantiate the trainer object to drive the model training learner = momentum_sgd(z.parameters, lr_schedule, mn_schedule) trainer = Trainer(z, ce, pe, learner) # define mapping from reader streams to network inputs input_map = { input_var: reader_train.streams.features, label_var: reader_train.streams.labels } log_number_of_parameters(z) print() progress_printer = ProgressPrinter(tag='Training') # Get minibatches of images to train with and perform model training max_epochs = 40 for epoch in range(max_epochs): # loop over epochs sample_count = 0 while sample_count < epoch_size: # loop over minibatches in the epoch data = reader_train.next_minibatch( min(minibatch_size, epoch_size - sample_count), input_map=input_map) # fetch minibatch. trainer.train_minibatch(data) # update model with it sample_count += data[ label_var].num_samples # count samples processed so far progress_printer.update_with_trainer( trainer, with_metric=True) # log progress progress_printer.epoch_summary(with_metric=True) persist.save_model( z, os.path.join(model_path, "ConvNet_MNIST_{}.dnn".format(epoch))) # Load test data reader_test = create_reader( os.path.join(data_path, 'Test-28x28_cntk_text.txt'), False, input_dim, num_output_classes) input_map = { input_var: reader_test.streams.features, label_var: reader_test.streams.labels } # Test data for trained model epoch_size = 10000 minibatch_size = 1024 # process minibatches and evaluate the model metric_numer = 0 metric_denom = 0 sample_count = 0 minibatch_index = 0 while sample_count < epoch_size: current_minibatch = min(minibatch_size, epoch_size - sample_count) # Fetch next test min batch. data = reader_test.next_minibatch(current_minibatch, input_map=input_map) # minibatch data to be trained with metric_numer += trainer.test_minibatch(data) * current_minibatch metric_denom += current_minibatch # Keep track of the number of samples processed so far. sample_count += trainer.previous_minibatch_sample_count minibatch_index += 1 print("") print("Final Results: Minibatch[1-{}]: errs = {:0.2f}% * {}".format( minibatch_index + 1, (metric_numer * 100.0) / metric_denom, metric_denom)) print("") return metric_numer / metric_denom
def train_and_evaluate(reader_train, reader_test, network_name, max_epochs): set_computation_network_trace_level(0) # Input variables denoting the features and label data input_var = input_variable((num_channels, image_height, image_width)) label_var = input_variable((num_classes)) # create model, and configure learning parameters if network_name == 'resnet20': z = create_cifar10_model(input_var, 3, num_classes) lr_per_mb = [1.0] * 80 + [0.1] * 40 + [0.01] elif network_name == 'resnet110': z = create_cifar10_model(input_var, 18, num_classes) lr_per_mb = [0.1] * 1 + [1.0] * 80 + [0.1] * 40 + [0.01] else: return RuntimeError("Unknown model name!") # loss and metric ce = cross_entropy_with_softmax(z, label_var) pe = classification_error(z, label_var) # shared training parameters epoch_size = 50000 # for now we manually specify epoch size minibatch_size = 128 momentum_time_constant = -minibatch_size / np.log(0.9) l2_reg_weight = 0.0001 # Set learning parameters lr_per_sample = [lr / minibatch_size for lr in lr_per_mb] lr_schedule = learning_rate_schedule(lr_per_sample, epoch_size=epoch_size, unit=UnitType.sample) mm_schedule = momentum_as_time_constant_schedule(momentum_time_constant) # trainer object learner = momentum_sgd(z.parameters, lr_schedule, mm_schedule, l2_regularization_weight=l2_reg_weight) trainer = Trainer(z, ce, pe, learner) # define mapping from reader streams to network inputs input_map = { input_var: reader_train.streams.features, label_var: reader_train.streams.labels } log_number_of_parameters(z) print() progress_printer = ProgressPrinter(tag='Training') # perform model training for epoch in range(max_epochs): # loop over epochs sample_count = 0 while sample_count < epoch_size: # loop over minibatches in the epoch data = reader_train.next_minibatch( min(minibatch_size, epoch_size - sample_count), input_map=input_map) # fetch minibatch. trainer.train_minibatch(data) # update model with it sample_count += trainer.previous_minibatch_sample_count # count samples processed so far progress_printer.update_with_trainer( trainer, with_metric=True) # log progress progress_printer.epoch_summary(with_metric=True) persist.save_model( z, os.path.join(model_path, network_name + "_{}.dnn".format(epoch))) # Evaluation parameters epoch_size = 10000 minibatch_size = 16 # process minibatches and evaluate the model metric_numer = 0 metric_denom = 0 sample_count = 0 minibatch_index = 0 while sample_count < epoch_size: current_minibatch = min(minibatch_size, epoch_size - sample_count) # Fetch next test min batch. data = reader_test.next_minibatch(current_minibatch, input_map=input_map) # minibatch data to be trained with metric_numer += trainer.test_minibatch(data) * current_minibatch metric_denom += current_minibatch # Keep track of the number of samples processed so far. sample_count += data[label_var].num_samples minibatch_index += 1 print("") print("Final Results: Minibatch[1-{}]: errs = {:0.2f}% * {}".format( minibatch_index + 1, (metric_numer * 100.0) / metric_denom, metric_denom)) print("") return metric_numer / metric_denom
def convnet_cifar10_dataaug(reader_train, reader_test, max_epochs = 80): set_computation_network_trace_level(0) # Input variables denoting the features and label data input_var = input_variable((num_channels, image_height, image_width)) label_var = input_variable((num_classes)) # apply model to input scaled_input = element_times(constant(0.00390625), input_var) with default_options (activation=relu, pad=True): z = Sequential([ LayerStack(2, lambda : [ Convolution((3,3), 64), Convolution((3,3), 64), MaxPooling((3,3), (2,2)) ]), LayerStack(2, lambda i: [ Dense([256,128][i]), Dropout(0.5) ]), Dense(num_classes, activation=None) ])(scaled_input) # loss and metric ce = cross_entropy_with_softmax(z, label_var) pe = classification_error(z, label_var) # training config epoch_size = 50000 # for now we manually specify epoch size minibatch_size = 64 # Set learning parameters lr_per_sample = [0.0015625]*20+[0.00046875]*20+[0.00015625]*20+[0.000046875]*10+[0.000015625] lr_schedule = learning_rate_schedule(lr_per_sample, unit=UnitType.sample, epoch_size=epoch_size) mm_time_constant = [0]*20+[600]*20+[1200] mm_schedule = momentum_as_time_constant_schedule(mm_time_constant, epoch_size=epoch_size) l2_reg_weight = 0.002 # trainer object learner = momentum_sgd(z.parameters, lr_schedule, mm_schedule, l2_regularization_weight = l2_reg_weight) trainer = Trainer(z, ce, pe, learner) # define mapping from reader streams to network inputs input_map = { input_var: reader_train.streams.features, label_var: reader_train.streams.labels } log_number_of_parameters(z) ; print() progress_printer = ProgressPrinter(tag='Training') # perform model training for epoch in range(max_epochs): # loop over epochs sample_count = 0 while sample_count < epoch_size: # loop over minibatches in the epoch data = reader_train.next_minibatch(min(minibatch_size, epoch_size-sample_count), input_map=input_map) # fetch minibatch. trainer.train_minibatch(data) # update model with it sample_count += trainer.previous_minibatch_sample_count # count samples processed so far progress_printer.update_with_trainer(trainer, with_metric=True) # log progress progress_printer.epoch_summary(with_metric=True) persist.save_model(z, os.path.join(model_path, "ConvNet_CIFAR10_DataAug_{}.dnn".format(epoch))) ### Evaluation action epoch_size = 10000 minibatch_size = 16 # process minibatches and evaluate the model metric_numer = 0 metric_denom = 0 sample_count = 0 minibatch_index = 0 while sample_count < epoch_size: current_minibatch = min(minibatch_size, epoch_size - sample_count) # Fetch next test min batch. data = reader_test.next_minibatch(current_minibatch, input_map=input_map) # minibatch data to be trained with metric_numer += trainer.test_minibatch(data) * current_minibatch metric_denom += current_minibatch # Keep track of the number of samples processed so far. sample_count += data[label_var].num_samples minibatch_index += 1 print("") print("Final Results: Minibatch[1-{}]: errs = {:0.2f}% * {}".format(minibatch_index+1, (metric_numer*100.0)/metric_denom, metric_denom)) print("") return metric_numer/metric_denom
def cifar_resnet_distributed(data_path, run_test, num_epochs, communicator=None, save_model_filename=None, load_model_filename=None, debug_output=False): image_height = 32 image_width = 32 num_channels = 3 num_classes = 10 feats_stream_name = 'features' labels_stream_name = 'labels' minibatch_source = create_reader(os.path.join(data_path, 'train_map.txt'), os.path.join(data_path, 'CIFAR-10_mean.xml'), True, distributed_communicator = communicator) features_si = minibatch_source[feats_stream_name] labels_si = minibatch_source[labels_stream_name] # Instantiate the resnet classification model, or load from file if load_model_filename: print("Loading model:", load_model_filename) classifier_output = persist.load_model(load_model_filename) image_input = classifier_output.arguments[0] else: image_input = input_variable( (num_channels, image_height, image_width), features_si.m_element_type) classifier_output = create_resnet_model(image_input, num_classes) # Input variables denoting the features and label data label_var = input_variable((num_classes), features_si.m_element_type) ce = cross_entropy_with_softmax(classifier_output, label_var) pe = classification_error(classifier_output, label_var) # Instantiate the trainer object to drive the model training mb_size = 128 num_mb_per_epoch = 100 num_mbs = num_mb_per_epoch * num_epochs lr_per_sample = [1/mb_size]*80+[0.1/mb_size]*40+[0.01/mb_size] lr_schedule = learning_rate_schedule(lr_per_sample, units = mb_size * num_mb_per_epoch) momentum_time_constant = -mb_size/np.log(0.9) # create data parallel distributed trainer if needed dist_trainer = distributed.data_parallel_distributed_trainer(communicator, False) if communicator else None # Instantiate the trainer object to drive the model training trainer = Trainer(classifier_output, ce, pe, [momentum_sgd(classifier_output.parameters, lr_schedule, momentum_time_constant, l2_regularization_weight=0.0001)], distributed_trainer = dist_trainer) # Get minibatches of images to train with and perform model training training_progress_output_freq = 100 if communicator else 20 if debug_output: training_progress_output_freq = training_progress_output_freq/4 for i in range(0, num_mbs): # NOTE: depends on network, the mb_size can be changed dynamically here mb = minibatch_source.next_minibatch(mb_size) # Specify the mapping of input variables in the model to actual # minibatch data to be trained with arguments = { image_input: mb[features_si], label_var: mb[labels_si] } trainer.train_minibatch(arguments) print_training_progress(trainer, i, training_progress_output_freq) if save_model_filename: print("Saving model:", save_model_filename) persist.save_model(classifier_output, save_model_filename) if run_test: test_minibatch_source = create_reader(os.path.join(data_path, 'test_map.txt'), os.path.join(data_path, 'CIFAR-10_mean.xml'), False) features_si = test_minibatch_source[feats_stream_name] labels_si = test_minibatch_source[labels_stream_name] mb_size = 128 num_mbs = 100 total_error = 0.0 for i in range(0, num_mbs): mb = test_minibatch_source.next_minibatch(mb_size) # Specify the mapping of input variables in the model to actual # minibatch data to be trained with arguments = { image_input: mb[features_si], label_var: mb[labels_si] } error = trainer.test_minibatch(arguments) total_error += error return total_error / num_mbs else: return 0
def train_and_evaluate(reader_train, reader_test, network_name): set_computation_network_trace_level(0) # Input variables denoting the features and label data input_var = input_variable((num_channels, image_height, image_width)) label_var = input_variable((num_classes)) # create model, and configure learning parameters if network_name == 'resnet20': z = create_cifar10_model(input_var, 3, num_classes) lr_per_mb = [1.0]*80+[0.1]*40+[0.01] elif network_name == 'resnet110': z = create_cifar10_model(input_var, 18, num_classes) lr_per_mb = [0.1]*1+[1.0]*80+[0.1]*40+[0.01] else: return RuntimeError("Unknown model name!") # loss and metric ce = cross_entropy_with_softmax(z, label_var) pe = classification_error(z, label_var) # shared training parameters epoch_size = 50000 # for now we manually specify epoch size minibatch_size = 128 max_epochs = 160 momentum_time_constant = -minibatch_size/np.log(0.9) l2_reg_weight = 0.0001 # Set learning parameters lr_per_sample = [lr/minibatch_size for lr in lr_per_mb] lr_schedule = learning_rate_schedule(lr_per_sample, epoch_size=epoch_size) mm_schedule = momentum_as_time_constant_schedule(momentum_time_constant) # trainer object learner = momentum_sgd(z.parameters, lr_schedule, mm_schedule, l2_regularization_weight = l2_reg_weight) trainer = Trainer(z, ce, pe, learner) # define mapping from reader streams to network inputs input_map = { input_var: reader_train.streams.features, label_var: reader_train.streams.labels } log_number_of_parameters(z) ; print() progress_printer = ProgressPrinter(tag='Training') # perform model training for epoch in range(max_epochs): # loop over epochs sample_count = 0 while sample_count < epoch_size: # loop over minibatches in the epoch data = reader_train.next_minibatch(min(minibatch_size, epoch_size-sample_count), input_map=input_map) # fetch minibatch. trainer.train_minibatch(data) # update model with it sample_count += data[label_var].num_samples # count samples processed so far progress_printer.update_with_trainer(trainer, with_metric=True) # log progress progress_printer.epoch_summary(with_metric=True) persist.save_model(z, os.path.join(model_path, network_name + "_{}.dnn".format(epoch))) # Evaluation parameters epoch_size = 10000 minibatch_size = 16 # process minibatches and evaluate the model metric_numer = 0 metric_denom = 0 sample_count = 0 minibatch_index = 0 while sample_count < epoch_size: current_minibatch = min(minibatch_size, epoch_size - sample_count) # Fetch next test min batch. data = reader_test.next_minibatch(current_minibatch, input_map=input_map) # minibatch data to be trained with metric_numer += trainer.test_minibatch(data) * current_minibatch metric_denom += current_minibatch # Keep track of the number of samples processed so far. sample_count += data[label_var].num_samples minibatch_index += 1 print("") print("Final Results: Minibatch[1-{}]: errs = {:0.2f}% * {}".format(minibatch_index+1, (metric_numer*100.0)/metric_denom, metric_denom)) print("") return metric_numer/metric_denom
def convnet_cifar10(debug_output=False): set_computation_network_trace_level(0) image_height = 32 image_width = 32 num_channels = 3 input_dim = image_height * image_width * num_channels num_output_classes = 10 # Input variables denoting the features and label data input_var = input_variable((num_channels, image_height, image_width), np.float32) label_var = input_variable(num_output_classes, np.float32) # Instantiate the feedforward classification model input_removemean = minus(input_var, constant(128)) scaled_input = element_times(constant(0.00390625), input_removemean) with default_options (activation=relu, pad=True): z = Sequential([ LayerStack(2, lambda : [ Convolution((3,3), 64), Convolution((3,3), 64), MaxPooling((3,3), (2,2)) ]), LayerStack(2, lambda i: [ Dense([256,128][i]), Dropout(0.5) ]), Dense(num_output_classes, activation=None) ])(scaled_input) ce = cross_entropy_with_softmax(z, label_var) pe = classification_error(z, label_var) reader_train = create_reader(os.path.join(data_path, 'Train_cntk_text.txt'), True, input_dim, num_output_classes) # training config epoch_size = 50000 # for now we manually specify epoch size minibatch_size = 64 # Set learning parameters lr_per_sample = [0.0015625]*10+[0.00046875]*10+[0.00015625] lr_schedule = learning_rate_schedule(lr_per_sample, epoch_size=epoch_size) momentum_time_constant = [0]*20+[-minibatch_size/np.log(0.9)] mm_schedule = momentum_as_time_constant_schedule(momentum_time_constant, epoch_size=epoch_size) l2_reg_weight = 0.002 # Instantiate the trainer object to drive the model training learner = momentum_sgd(z.parameters, lr_schedule, mm_schedule, l2_regularization_weight = l2_reg_weight) trainer = Trainer(z, ce, pe, learner) # define mapping from reader streams to network inputs input_map = { input_var : reader_train.streams.features, label_var : reader_train.streams.labels } log_number_of_parameters(z) ; print() progress_printer = ProgressPrinter(tag='Training') # Get minibatches of images to train with and perform model training max_epochs = 30 for epoch in range(max_epochs): # loop over epochs sample_count = 0 while sample_count < epoch_size: # loop over minibatches in the epoch data = reader_train.next_minibatch(min(minibatch_size, epoch_size - sample_count), input_map=input_map) # fetch minibatch. trainer.train_minibatch(data) # update model with it sample_count += data[label_var].num_samples # count samples processed so far progress_printer.update_with_trainer(trainer, with_metric=True) # log progress progress_printer.epoch_summary(with_metric=True) persist.save_model(z, os.path.join(model_path, "ConvNet_CIFAR10_{}.dnn".format(epoch))) # Load test data reader_test = create_reader(os.path.join(data_path, 'Test_cntk_text.txt'), False, input_dim, num_output_classes) input_map = { input_var : reader_test.streams.features, label_var : reader_test.streams.labels } # Test data for trained model epoch_size = 10000 minibatch_size = 16 # process minibatches and evaluate the model metric_numer = 0 metric_denom = 0 sample_count = 0 minibatch_index = 0 while sample_count < epoch_size: current_minibatch = min(minibatch_size, epoch_size - sample_count) # Fetch next test min batch. data = reader_test.next_minibatch(current_minibatch, input_map=input_map) # minibatch data to be trained with metric_numer += trainer.test_minibatch(data) * current_minibatch metric_denom += current_minibatch # Keep track of the number of samples processed so far. sample_count += data[label_var].num_samples minibatch_index += 1 print("") print("Final Results: Minibatch[1-{}]: errs = {:0.2f}% * {}".format(minibatch_index+1, (metric_numer*100.0)/metric_denom, metric_denom)) print("") return metric_numer/metric_denom
def convnet_mnist(debug_output=False): image_height = 28 image_width = 28 num_channels = 1 input_dim = image_height * image_width * num_channels num_output_classes = 10 # Input variables denoting the features and label data input_var = input_variable((num_channels, image_height, image_width), np.float32) label_var = input_variable(num_output_classes, np.float32) # Instantiate the feedforward classification model scaled_input = element_times(constant(0.00390625), input_var) with default_options (activation=relu, pad=False): conv1 = Convolution((5,5), 32, pad=True)(scaled_input) pool1 = MaxPooling((3,3), (2,2))(conv1) conv2 = Convolution((3,3), 48)(pool1) pool2 = MaxPooling((3,3), (2,2))(conv2) conv3 = Convolution((3,3), 64)(pool2) f4 = Dense(96)(conv3) drop4 = Dropout(0.5)(f4) z = Dense(num_output_classes, activation=None)(drop4) ce = cross_entropy_with_softmax(z, label_var) pe = classification_error(z, label_var) reader_train = create_reader(os.path.join(data_path, 'Train-28x28_cntk_text.txt'), True, input_dim, num_output_classes) # training config epoch_size = 60000 # for now we manually specify epoch size minibatch_size = 128 # Set learning parameters lr_per_sample = [0.001]*10+[0.0005]*10+[0.0001] lr_schedule = learning_rate_schedule(lr_per_sample, epoch_size) momentum_time_constant = [0]*5+[1024] # Instantiate the trainer object to drive the model training learner = momentum_sgd(z.parameters, lr_schedule, momentum_time_constant) trainer = Trainer(z, ce, pe, learner) # define mapping from reader streams to network inputs input_map = { input_var : reader_train.streams.features, label_var : reader_train.streams.labels } log_number_of_parameters(z) ; print() progress_printer = ProgressPrinter(tag='Training') # Get minibatches of images to train with and perform model training max_epochs = 40 for epoch in range(max_epochs): # loop over epochs sample_count = 0 while sample_count < epoch_size: # loop over minibatches in the epoch data = reader_train.next_minibatch(min(minibatch_size, epoch_size - sample_count), input_map=input_map) # fetch minibatch. trainer.train_minibatch(data) # update model with it sample_count += data[label_var].num_samples # count samples processed so far progress_printer.update_with_trainer(trainer, with_metric=True) # log progress progress_printer.epoch_summary(with_metric=True) persist.save_model(z, os.path.join(model_path, "ConvNet_MNIST_{}.dnn".format(epoch))) # Load test data reader_test = create_reader(os.path.join(data_path, 'Test-28x28_cntk_text.txt'), False, input_dim, num_output_classes) input_map = { input_var : reader_test.streams.features, label_var : reader_test.streams.labels } # Test data for trained model epoch_size = 10000 minibatch_size = 1024 # process minibatches and evaluate the model metric_numer = 0 metric_denom = 0 sample_count = 0 minibatch_index = 0 while sample_count < epoch_size: current_minibatch = min(minibatch_size, epoch_size - sample_count) # Fetch next test min batch. data = reader_test.next_minibatch(current_minibatch, input_map=input_map) # minibatch data to be trained with metric_numer += trainer.test_minibatch(data) * current_minibatch metric_denom += current_minibatch # Keep track of the number of samples processed so far. sample_count += data[label_var].num_samples minibatch_index += 1 print("") print("Final Results: Minibatch[1-{}]: errs = {:0.2f}% * {}".format(minibatch_index+1, (metric_numer*100.0)/metric_denom, metric_denom)) print("") return metric_numer/metric_denom
def train_and_evaluate(reader_train, reader_test, network_name, max_epochs, distributed_trainer, scale_up=False): set_computation_network_trace_level(0) # Input variables denoting the features and label data input_var = input_variable((num_channels, image_height, image_width)) label_var = input_variable((num_classes)) # create model, and configure learning parameters if network_name == 'resnet20': z = create_cifar10_model(input_var, 3, num_classes) lr_per_mb = [1.0] * 80 + [0.1] * 40 + [0.01] elif network_name == 'resnet110': z = create_cifar10_model(input_var, 18, num_classes) lr_per_mb = [0.1] * 1 + [1.0] * 80 + [0.1] * 40 + [0.01] else: return RuntimeError("Unknown model name!") # loss and metric ce = cross_entropy_with_softmax(z, label_var) pe = classification_error(z, label_var) # shared training parameters epoch_size = 50000 # for now we manually specify epoch size # NOTE: scaling up minibatch_size increases sample throughput. In 8-GPU machine, # ResNet110 samples-per-second is ~7x of single GPU, comparing to ~3x without scaling # up. However, bigger minimatch size on the same number of samples means less updates, # thus leads to higher training error. This is a trade-off of speed and accuracy minibatch_size = 128 * (len(distributed_trainer.communicator().workers()) if scale_up else 1) momentum_time_constant = -minibatch_size / np.log(0.9) l2_reg_weight = 0.0001 # Set learning parameters lr_per_sample = [lr / minibatch_size for lr in lr_per_mb] lr_schedule = learning_rate_schedule(lr_per_sample, epoch_size=epoch_size, unit=UnitType.sample) mm_schedule = momentum_as_time_constant_schedule(momentum_time_constant) # trainer object learner = momentum_sgd(z.parameters, lr_schedule, mm_schedule, l2_regularization_weight=l2_reg_weight) trainer = Trainer(z, ce, pe, learner, distributed_trainer) # define mapping from reader streams to network inputs input_map = { input_var: reader_train.streams.features, label_var: reader_train.streams.labels } log_number_of_parameters(z) print() progress_printer = ProgressPrinter(tag='Training') # perform model training for epoch in range(max_epochs): # loop over epochs sample_count = 0 while sample_count < epoch_size: # loop over minibatches in the epoch data = reader_train.next_minibatch( min(minibatch_size, epoch_size - sample_count), input_map=input_map) # fetch minibatch. trainer.train_minibatch(data) # update model with it sample_count += trainer.previous_minibatch_sample_count # count samples processed so far progress_printer.update_with_trainer( trainer, with_metric=True) # log progress progress_printer.epoch_summary(with_metric=True) # save model only in worker0, otherwise there will be file write conflicts for multi GPU on the same machine if distributed_trainer.communicator().current_worker( ).global_rank == 0: persist.save_model( z, os.path.join(model_path, network_name + "_{}.dnn".format(epoch))) # Evaluation parameters epoch_size = 10000 minibatch_size = 16 # process minibatches and evaluate the model metric_numer = 0 metric_denom = 0 sample_count = 0 minibatch_index = 0 while sample_count < epoch_size: current_minibatch = min(minibatch_size, epoch_size - sample_count) # Fetch next test min batch. data = reader_test.next_minibatch(current_minibatch, input_map=input_map) # minibatch data to be trained with metric_numer += trainer.test_minibatch(data) * current_minibatch metric_denom += current_minibatch # Keep track of the number of samples processed so far. sample_count += data[label_var].num_samples minibatch_index += 1 print("") print("Final Results: Minibatch[1-{}]: errs = {:0.2f}% * {}".format( minibatch_index + 1, (metric_numer * 100.0) / metric_denom, metric_denom)) print("") return metric_numer / metric_denom
def train_and_evaluate(reader_train, reader_test, max_epochs, distributed_trainer): # Input variables denoting the features and label data input_var = input_variable((num_channels, image_height, image_width)) label_var = input_variable((num_classes)) # Normalize the input feature_scale = 1.0 / 256.0 input_var_norm = element_times(feature_scale, input_var) # apply model to input z = create_resnet_model(input_var_norm, 10) # # Training action # # loss and metric ce = cross_entropy_with_softmax(z, label_var) pe = classification_error(z, label_var) # training config epoch_size = 50000 minibatch_size = 128 # Set learning parameters lr_per_minibatch = learning_rate_schedule([1] * 80 + [0.1] * 40 + [0.01], UnitType.minibatch, epoch_size) momentum_time_constant = momentum_as_time_constant_schedule( -minibatch_size / np.log(0.9)) l2_reg_weight = 0.0001 # trainer object learner = momentum_sgd(z.parameters, lr=lr_per_minibatch, momentum=momentum_time_constant, l2_regularization_weight=l2_reg_weight) trainer = Trainer(z, ce, pe, learner, distributed_trainer) # define mapping from reader streams to network inputs input_map = { input_var: reader_train.streams.features, label_var: reader_train.streams.labels } log_number_of_parameters(z) print() progress_printer = ProgressPrinter(tag='Training') # perform model training for epoch in range(max_epochs): # loop over epochs sample_count = 0 while sample_count < epoch_size: # loop over minibatches in the epoch data = reader_train.next_minibatch( minibatch_size, input_map=input_map) # fetch minibatch. trainer.train_minibatch(data) # update model with it sample_count += trainer.previous_minibatch_sample_count # count samples processed so far progress_printer.update_with_trainer( trainer, with_metric=True) # log progress progress_printer.epoch_summary(with_metric=True) if distributed_trainer.communicator().current_worker( ).global_rank == 0: persist.save_model( z, os.path.join(model_path, "CifarResNet_Distributed_{}.dnn".format(epoch))) # # Evaluation action # epoch_size = 10000 minibatch_size = 16 # process minibatches and evaluate the model metric_numer = 0 metric_denom = 0 sample_count = 0 minibatch_index = 0 #progress_printer = ProgressPrinter(freq=100, first=10, tag='Eval') while sample_count < epoch_size: current_minibatch = min(minibatch_size, epoch_size - sample_count) # Fetch next test min batch. data = reader_test.next_minibatch(current_minibatch, input_map=input_map) # minibatch data to be trained with metric_numer += trainer.test_minibatch(data) * current_minibatch metric_denom += current_minibatch # Keep track of the number of samples processed so far. sample_count += data[label_var].num_samples minibatch_index += 1 print("") print("Final Results: Minibatch[1-{}]: errs = {:0.1f}% * {}".format( minibatch_index + 1, (metric_numer * 100.0) / metric_denom, metric_denom)) print("") # return evaluation error. return metric_numer / metric_denom