axis=(0, 2, 3)) print mean print std train_dataset = supervised_dataset.SupervisedDataset(X_train, y_train) val_dataset = supervised_dataset.SupervisedDataset(X_val, y_val) train_iterator = train_dataset.iterator(mode='random_uniform', batch_size=64, num_batches=31000) val_iterator = val_dataset.iterator(mode='random_uniform', batch_size=64, num_batches=31000) # Create object to local contrast normalize a batch. # Note: Every batch must be normalized before use. normer = util.Normer3(filter_size=5, num_channels=1) module_list = [normer] preprocessor = util.Preprocessor(module_list) print('Training Model') for x_batch, y_batch in train_iterator: #x_batch = preprocessor.run(x_batch) x_batch = (x_batch - mean) / std # loop over batch for i in range(len(x_batch)): # hide patch for an image x_batch[i] = hide_patch(x_batch[i]) monitor.start() log_prob, accuracy = model.train(x_batch, y_batch)
checkpoint_file = args.checkpoint_file fold = int(args.split) dataset_path = os.path.join(data_paths.tfd_data_path, 'npy_files/TFD_96/split_'+str(fold)) print 'Checkpoint: %s' % checkpoint_file print 'Testing on split %d\n' % fold # Load model model = SupervisedModel('evaluation', './') # Load dataset supervised_data_loader = SupervisedDataLoader(dataset_path) test_data_container = supervised_data_loader.load(2) test_data_container.X = numpy.float32(test_data_container.X) test_data_container.X /= 255.0 test_data_container.X *= 2.0 # Construct evaluator preprocessor = [util.Normer3(filter_size=5, num_channels=1)] evaluator = util.Evaluator(model, test_data_container, checkpoint_file, preprocessor) # For the inputted checkpoint, compute the overall test accuracy accuracies = [] print 'Checkpoint: %s' % os.path.split(checkpoint_file)[1] evaluator.set_checkpoint(checkpoint_file) accuracy = evaluator.run() print 'Accuracy: %f\n' % accuracy accuracies.append(accuracy)