def load_stl10_data(train_split): # Load STL-10 data print 'Loading STL-10 Training Data' X_train = numpy.load('/data/stl10_matlab/train_splits/train_X_' + str(train_split) + '.npy') y_train = numpy.load('/data/stl10_matlab/train_splits/train_y_' + str(train_split) + '.npy') print 'Loading STL-10 Testing Data' X_test = numpy.load('/data/stl10_matlab/test_X.npy') y_test = numpy.load('/data/stl10_matlab/test_y.npy') X_train = numpy.float32(X_train) X_train /= 255.0 X_train *= 2.0 X_test = numpy.float32(X_test) X_test /= 255.0 X_test *= 2.0 train_dataset = supervised_dataset.SupervisedDataset(X_train, y_train) test_dataset = supervised_dataset.SupervisedDataset(X_test, y_test) train_iterator = train_dataset.iterator(mode='sequential', batch_size=128) test_iterator = test_dataset.iterator(mode='sequential', batch_size=128) return train_iterator, test_iterator
def load_cifar10_data(): # Load CIFAR-10 data print 'Loading CIFAR-10 Testing Data' X_test = numpy.load('/data/cifar10/test_X.npy') y_test = numpy.load('/data/cifar10/test_y.npy') test_dataset = supervised_dataset.SupervisedDataset(X_test, y_test) test_iterator = test_dataset.iterator(mode='sequential', batch_size=128) return test_iterator
model = CNNModel('experiment', './', learning_rate=1e-2) monitor = util.Monitor(model) # Loading CIFAR-10 dataset print('Loading Data') data_path = '/data/cifar10/' reduced_data_path = os.path.join(data_path, 'reduced', 'cifar10_100') train_data = numpy.load(os.path.join(reduced_data_path, 'train_X_split_0.npy')) train_labels = numpy.load( os.path.join(reduced_data_path, 'train_y_split_0.npy')) test_data = numpy.load('/data/cifar10/test_X.npy') test_labels = numpy.load('/data/cifar10/test_y.npy') train_dataset = supervised_dataset.SupervisedDataset(train_data, train_labels) test_dataset = supervised_dataset.SupervisedDataset(test_data, test_labels) train_iterator = train_dataset.iterator(mode='random_uniform', batch_size=128, num_batches=100000) test_iterator = test_dataset.iterator(mode='random_uniform', batch_size=128, num_batches=100000) normer = util.Normer2(filter_size=5, num_channels=3) print('Training Model') for x_batch, y_batch in train_iterator: x_batch = x_batch.transpose(1, 2, 3, 0) x_batch = normer.run(x_batch) #y_batch = numpy.int64(numpy.argmax(y_batch, axis=1))
def _get_iterator(self): dataset = supervised_dataset.SupervisedDataset(self.data_container.X, self.data_container.y) iterator = dataset.iterator(mode='sequential', batch_size=self.batch_size) return iterator
X_val /= 255.0 X_val *= 2.0 X_test = numpy.float32(X_test) X_test /= 255.0 X_test *= 2.0 mean = numpy.average(numpy.concatenate((X_train, X_val, X_test), axis=0), axis=(0, 2, 3)) print numpy.shape(numpy.concatenate((X_train, X_val, X_test), axis=0)) std = numpy.std(numpy.concatenate((X_train, X_val, X_test), axis=0), axis=(0, 2, 3)) print mean print std train_dataset = supervised_dataset.SupervisedDataset(X_train, y_train) val_dataset = supervised_dataset.SupervisedDataset(X_val, y_val) train_iterator = train_dataset.iterator(mode='random_uniform', batch_size=64, num_batches=31000) val_iterator = val_dataset.iterator(mode='random_uniform', batch_size=64, num_batches=31000) # Create object to local contrast normalize a batch. # Note: Every batch must be normalized before use. normer = util.Normer3(filter_size=5, num_channels=1) module_list = [normer] preprocessor = util.Preprocessor(module_list) print('Training Model')
train_data_container = supervised_data_loader.load('train', train_split) test_data_container = supervised_data_loader.load('test', train_split) X_train = train_data_container.X X_train = numpy.float32(X_train) X_train /= 255.0 X_train *= 2.0 y_train = train_data_container.y X_test = test_data_container.X X_test = numpy.float32(X_test) X_test /= 255.0 X_test *= 2.0 y_test = test_data_container.y train_dataset = supervised_dataset.SupervisedDataset(X_train, y_train) test_dataset = supervised_dataset.SupervisedDataset(X_test, y_test) train_iterator = train_dataset.iterator(mode='random_uniform', batch_size=64, num_batches=31000) test_iterator = test_dataset.iterator(mode='random_uniform', batch_size=64, num_batches=31000) # Create object to local contrast normalize a batch. # Note: Every batch must be normalized before use. normer = util.Normer3(filter_size=5, num_channels=1) module_list = [normer] preprocessor = util.Preprocessor(module_list) print('Training Model')