def main(): ''' Entry point of this script. ''' args = parse_args() # Hyperparameter values taken from Pylearn2: # In pylearn2/scripts/tutorials/convolutional_network/: # convolutional_network.ipynb filter_counts = [96, 192, 192] filter_init_uniform_ranges = [0.005]* len(filter_counts) filter_shapes = [(8, 8), (8,8), (5, 5)] pool_shapes = [(4, 4),(4, 4), (2, 2)] pool_strides = [(2, 2), (2, 2), (2,2)] pool_pads = [(2,2), (2,2), (2,2)] affine_output_sizes = [10] affine_init_stddevs = [.05] * len(affine_output_sizes) dropout_include_rates = [0.8, 0.5, 0.5, 0.5] #dropout_include_rates = ([.8 if args.dropout else 1.0] * # (len(filter_counts) + len(affine_output_sizes))) conv_pads = [(4, 4), (3, 3), (3, 3)] assert_equal(affine_output_sizes[-1], 10) def unpickle(file): import cPickle fo = open(file, 'rb') dict = cPickle.load(fo) fo.close() return dict batch1 = unpickle('/home/s1422538/datasets/simplelearn/cifar10/original_files/cifar-10-batches-py/data_batch_1') batch2 = unpickle('/home/s1422538/datasets/simplelearn/cifar10/original_files/cifar-10-batches-py/data_batch_2') batch3 = unpickle('/home/s1422538/datasets/simplelearn/cifar10/original_files/cifar-10-batches-py/data_batch_3') batch4 = unpickle('/home/s1422538/datasets/simplelearn/cifar10/original_files/cifar-10-batches-py/data_batch_4') batch5 = unpickle('/home/s1422538/datasets/simplelearn/cifar10/original_files/cifar-10-batches-py/data_batch_5') training_tensors = [ numpy.concatenate((batch1['data'].reshape(10000,3,32,32), batch2['data'].reshape(10000,3,32,32), batch3['data'].reshape(10000,3,32,32), batch4['data'].reshape(10000,3,32,32) )), numpy.concatenate((batch1['labels'], batch2['labels'], batch3['labels'], batch4['labels'])) ] validation_tensors = [ batch5['data'].reshape(10000,3,32,32), numpy.asarray(batch5['labels']) ] if args.no_shuffle_dataset == False: def shuffle_in_unison_inplace(a, b): assert len(a) == len(b) p = numpy.random.permutation(len(a)) return a[p], b[p] [training_tensors[0],training_tensors[1]] = shuffle_in_unison_inplace(training_tensors[0],training_tensors[1]) [validation_tensors[0], validation_tensors[1]] = shuffle_in_unison_inplace(validation_tensors[0], validation_tensors[1]) all_images_shared = theano.shared(numpy.vstack([training_tensors[0],validation_tensors[0]])) all_labels_shared = theano.shared(numpy.concatenate([training_tensors[1],validation_tensors[1]])) length_training = training_tensors[0].shape[0] length_validation = validation_tensors[0].shape[0] indices_training = numpy.asarray(range(length_training)) indices_validation = numpy.asarray(range(length_training, length_training + length_validation)) indices_training_dataset = Dataset( tensors=[indices_training], names=['indices'], formats=[DenseFormat(axes=['b'],shape=[-1],dtype='int64')] ) indices_validation_dataset = Dataset( tensors=[indices_validation], names=['indices'], formats=[DenseFormat(axes=['b'],shape=[-1],dtype='int64')] ) indices_training_iterator = indices_training_dataset.iterator(iterator_type='sequential',batch_size=args.batch_size_calculation) indices_validation_iterator = indices_validation_dataset.iterator(iterator_type='sequential',batch_size=args.batch_size_calculation) mnist_validation_iterator = indices_validation_iterator mnist_training_iterator = indices_training_iterator input_indices_symbolic, = indices_training_iterator.make_input_nodes() image_lookup_node = ImageLookeupNode(input_indices_symbolic, all_images_shared) label_lookup_node = LabelLookeupNode(input_indices_symbolic, all_labels_shared) image_node = RescaleImage(image_lookup_node) image_node = Lcn(image_node) rng = numpy.random.RandomState(129734) theano_rng = RandomStreams(2387845) (conv_layers, affine_layers, output_node, params_flat, params_old_flat, shapes) = build_conv_classifier(image_node, filter_shapes, filter_counts, filter_init_uniform_ranges, pool_shapes, pool_strides, affine_output_sizes, affine_init_stddevs, dropout_include_rates, conv_pads, rng, theano_rng) loss_node = CrossEntropy(output_node, label_lookup_node) scalar_loss = loss_node.output_symbol.mean() # scalar_loss2 = theano.clone(scalar_loss, replace = {params_flat: params_old_flat}) if args.weight_decay != 0.0: for conv_layer in conv_layers: filters = conv_layer.conv2d_node.filters filter_loss = args.weight_decay * theano.tensor.sqr(filters).sum() scalar_loss = scalar_loss + filter_loss for affine_layer in affine_layers: weights = affine_layer.affine_node.linear_node.params weight_loss = args.weight_decay * theano.tensor.sqr(weights).sum() scalar_loss = scalar_loss + weight_loss max_epochs = 200 # # Makes parameter updater # gradient = theano.gradient.grad(scalar_loss, params_flat) loss_function = theano.function([input_indices_symbolic.output_symbol],scalar_loss) gradient_function = theano.function([input_indices_symbolic.output_symbol],gradient) cost_arguments = mnist_training_iterator.next() print(loss_function(*cost_arguments)) grads = gradient_function(*cost_arguments) print(grads) print(grads.shape) # # Makes batch and epoch callbacks # ''' def make_output_filename(args, best=False): Constructs a filename that reflects the command-line params. assert_equal(os.path.splitext(args.output_prefix)[1], "") if os.path.isdir(args.output_prefix): output_dir, output_prefix = args.output_prefix, "" else: output_dir, output_prefix = os.path.split(args.output_prefix) assert_true(os.path.isdir(output_dir)) if output_prefix != "": output_prefix = output_prefix + "_" output_prefix = os.path.join(output_dir, output_prefix) return ("%slr-%g_mom-%g_nesterov-%s_bs-%d%s.pkl" % (output_prefix, args.learning_rate, args.initial_momentum, not args.no_nesterov, args.batch_size, "_best" if best else "")) ''' # Set up the loggers assert_equal(os.path.splitext(args.output_prefix)[1], "") if os.path.isdir(args.output_prefix) and \ not args.output_prefix.endswith('/'): args.output_prefix += '/' output_dir, output_prefix = os.path.split(args.output_prefix) if output_prefix != "": output_prefix = output_prefix + "_" output_prefix = os.path.join(output_dir, output_prefix) epoch_logger = EpochLogger(output_prefix + "LBFGS.h5") misclassification_node = Misclassification(output_node, label_lookup_node) validation_loss_monitor = MeanOverEpoch(loss_node, callbacks=[]) epoch_logger.subscribe_to('validation mean loss', validation_loss_monitor) training_stopper = StopsOnStagnation(max_epochs=10, min_proportional_decrease=0.0) validation_misclassification_monitor = MeanOverEpoch(misclassification_node, callbacks=[print_misclassification_rate, training_stopper]) epoch_logger.subscribe_to('validation misclassification', validation_misclassification_monitor) # batch callback (monitor) #training_loss_logger = LogsToLists() training_loss_monitor = MeanOverEpoch(loss_node, callbacks=[print_loss]) epoch_logger.subscribe_to("training loss", training_loss_monitor) training_misclassification_monitor = MeanOverEpoch(misclassification_node, callbacks=[]) epoch_logger.subscribe_to('training misclassification %', training_misclassification_monitor) #model = SerializableModel([input_indices_symbolic], [output_node]) #saves_best = SavesAtMinimum(model, make_output_filename(args, best=True)) validation_loss_monitor = MeanOverEpoch(loss_node, callbacks=[]) epoch_logger.subscribe_to("Validation Loss", validation_loss_monitor) epoch_timer = EpochTimer2() epoch_logger.subscribe_to('epoch duration', epoch_timer) validation_callback = ValidationCallback( inputs=[input_indices_symbolic.output_symbol], input_iterator=mnist_validation_iterator, epoch_callbacks=[validation_loss_monitor, validation_misclassification_monitor]) # trainer = Sgd((image_node.output_symbol, label_node.output_symbol), trainer = Bgfs(inputs=[input_indices_symbolic], parameters=params_flat, gradient=gradient, learning_rate=args.learning_rate, training_iterator=mnist_training_iterator, validation_iterator=mnist_validation_iterator, scalar_loss=scalar_loss, armijo=args.armijo, tangent=args.tangent, batch_size=args.batch_size, epoch_callbacks=([ #training_loss_monitor, # training_misclassification_monitor, validation_callback, LimitsNumEpochs(max_epochs), epoch_timer]), param_shapes=shapes) ''' stuff_to_pickle = OrderedDict( (('model', model), ('validation_loss_logger', validation_loss_logger))) # Pickling the trainer doesn't work when there are Dropout nodes. # stuff_to_pickle = OrderedDict( # (('trainer', trainer), # ('validation_loss_logger', validation_loss_logger), # ('model', model))) trainer.epoch_callbacks += (momentum_updaters + [EpochTimer(), PicklesOnEpoch(stuff_to_pickle, make_output_filename(args), overwrite=False), validation_callback, LimitsNumEpochs(max_epochs)]) ''' start_time = time.time() trainer.train() elapsed_time = time.time() - start_time print("Total elapsed time is for training is: ", elapsed_time)
def main(): args = parse_args() # Hyperparameter values taken from Pylearn2: # In pylearn2/scripts/tutorials/multilayer_perceptron/: # multilayer_perceptron.ipynb # mlp_tutorial_part_3.yaml sizes = [500, 500, 10] sparse_init_counts = [15, 15] assert_equal(len(sparse_init_counts), len(sizes) - 1) assert_equal(sizes[-1], 10) mnist_training, mnist_testing = load_mnist() # split training set into training and validation sets tensors = mnist_training.tensors training_tensors = [t[:-args.validation_size, ...] for t in tensors] validation_tensors = [t[-args.validation_size:, ...] for t in tensors] if args.no_shuffle_dataset == False: def shuffle_in_unison_inplace(a, b): assert len(a) == len(b) p = numpy.random.permutation(len(a)) return a[p], b[p] [training_tensors[0],training_tensors[1]] = shuffle_in_unison_inplace(training_tensors[0],training_tensors[1]) [validation_tensors[0], validation_tensors[1]] = shuffle_in_unison_inplace(validation_tensors[0], validation_tensors[1]) all_images_shared = theano.shared(numpy.vstack([training_tensors[0],validation_tensors[0]])) all_labels_shared = theano.shared(numpy.concatenate([training_tensors[1],validation_tensors[1]])) length_training = training_tensors[0].shape[0] length_validation = validation_tensors[0].shape[0] indices_training = numpy.asarray(range(length_training)) indices_validation = numpy.asarray(range(length_training, length_training + length_validation)) indices_training_dataset = Dataset( tensors=[indices_training], names=['indices'], formats=[DenseFormat(axes=['b'],shape=[-1],dtype='int64')] ) indices_validation_dataset = Dataset( tensors=[indices_validation], names=['indices'], formats=[DenseFormat(axes=['b'],shape=[-1],dtype='int64')] ) indices_training_iterator = indices_training_dataset.iterator(iterator_type='sequential',batch_size=args.batch_size_calculation) indices_validation_iterator = indices_validation_dataset.iterator(iterator_type='sequential',batch_size=10000) mnist_validation_iterator = indices_validation_iterator mnist_training_iterator = indices_training_iterator input_indices_symbolic, = indices_training_iterator.make_input_nodes() image_lookup_node = ImageLookeupNode(input_indices_symbolic, all_images_shared) label_lookup_node = LabelLookeupNode(input_indices_symbolic, all_labels_shared) #image_node = CastNode(image_lookup_node, 'floatX') image_node = RescaleImage(image_lookup_node) # image_node = RescaleImage(image_uint8_node) rng = numpy.random.RandomState(281934) theano_rng = RandomStreams(23845) (affine_nodes, output_node, params_flat, params_old_flat, shapes) = build_fc_classifier(image_node, sizes, sparse_init_counts, args.dropout_include_rates, rng, theano_rng) loss_node = CrossEntropy(output_node, label_lookup_node) loss_sum = loss_node.output_symbol.mean() max_epochs = 300 gradient = theano.gradient.grad(loss_sum, params_flat) # # Makes batch and epoch callbacks # ''' def make_output_basename(args): assert_equal(os.path.splitext(args.output_prefix)[1], "") if os.path.isdir(args.output_prefix) and \ not args.output_prefix.endswith('/'): args.output_prefix += '/' output_dir, output_prefix = os.path.split(args.output_prefix) if output_prefix != "": output_prefix = output_prefix + "_" output_prefix = os.path.join(output_dir, output_prefix) return "{}lr-{}_mom-{}_nesterov-{}_bs-{}".format( output_prefix, args.learning_rate, args.initial_momentum, args.nesterov, args.batch_size) ''' assert_equal(os.path.splitext(args.output_prefix)[1], "") if os.path.isdir(args.output_prefix) and \ not args.output_prefix.endswith('/'): args.output_prefix += '/' output_dir, output_prefix = os.path.split(args.output_prefix) if output_prefix != "": output_prefix = output_prefix + "_" output_prefix = os.path.join(output_dir, output_prefix) epoch_logger = EpochLogger(output_prefix + "LBFGS_mini_batch_" + str(args.batch_size) + ".h5") # misclassification_node = Misclassification(output_node, label_node) # mcr_logger = LogsToLists() # training_stopper = StopsOnStagnation(max_epochs=10, # min_proportional_decrease=0.0) misclassification_node = Misclassification(output_node, label_lookup_node) validation_loss_monitor = MeanOverEpoch(loss_node, callbacks=[]) epoch_logger.subscribe_to('validation mean loss', validation_loss_monitor) validation_misclassification_monitor = MeanOverEpoch( misclassification_node, callbacks=[print_mcr, StopsOnStagnation(max_epochs=50, min_proportional_decrease=0.0)]) epoch_logger.subscribe_to('validation misclassification', validation_misclassification_monitor) # batch callback (monitor) # training_loss_logger = LogsToLists() training_loss_monitor = MeanOverEpoch(loss_node, callbacks=[print_loss]) epoch_logger.subscribe_to('training mean loss', training_loss_monitor) training_misclassification_monitor = MeanOverEpoch(misclassification_node, callbacks=[]) epoch_logger.subscribe_to('training misclassification %', training_misclassification_monitor) # epoch callbacks # validation_loss_logger = LogsToLists() def make_output_filename(args, best=False): basename = make_output_basename(args) return "{}{}.pkl".format(basename, '_best' if best else "") #model = SerializableModel([input_indices_symbolic], [output_node]) #saves_best = SavesAtMinimum(model, make_output_filename(args, best=True)) validation_loss_monitor = MeanOverEpoch( loss_node, callbacks=[]) epoch_logger.subscribe_to('validation loss', validation_loss_monitor) epoch_timer = EpochTimer2() epoch_logger.subscribe_to('epoch duration', epoch_timer) validation_callback = ValidationCallback( inputs=[input_indices_symbolic.output_symbol], input_iterator=mnist_validation_iterator, epoch_callbacks=[validation_loss_monitor, validation_misclassification_monitor]) trainer = Bgfs(inputs=[input_indices_symbolic], parameters=params_flat, gradient=gradient, learning_rate=args.learning_rate, training_iterator=mnist_training_iterator, validation_iterator=mnist_validation_iterator, scalar_loss=loss_sum, armijo=args.armijo, tangent=args.tangent, batch_size=args.batch_size, epoch_callbacks=([ #training_loss_monitor, # training_misclassification_monitor, validation_callback, LimitsNumEpochs(max_epochs), epoch_timer]), param_shapes=shapes) # validation_loss_monitor])) # stuff_to_pickle = OrderedDict( # (('model', model), # ('validation_loss_logger', validation_loss_logger))) # Pickling the trainer doesn't work when there are Dropout nodes. # stuff_to_pickle = OrderedDict( # (('trainer', trainer), # ('validation_loss_logger', validation_loss_logger), # ('model', model))) # trainer.epoch_callbacks += (momentum_updaters + # [PicklesOnEpoch(stuff_to_pickle, # make_output_filename(args), # overwrite=False), # validation_callback, # LimitsNumEpochs(max_epochs)]) loss_function = theano.function([input_indices_symbolic.output_symbol],loss_sum) cost_args = mnist_training_iterator.next() print loss_function(*cost_args) start_time = time.time() trainer.train() elapsed_time = time.time() - start_time print("Total elapsed time is for training is: ", elapsed_time)