train_labels = numpy.load( os.path.join(reduced_data_path, 'train_y_split_0.npy')) test_data = numpy.load('/data/cifar10/test_X.npy') test_labels = numpy.load('/data/cifar10/test_y.npy') train_dataset = supervised_dataset.SupervisedDataset(train_data, train_labels) test_dataset = supervised_dataset.SupervisedDataset(test_data, test_labels) train_iterator = train_dataset.iterator(mode='random_uniform', batch_size=128, num_batches=100000) test_iterator = test_dataset.iterator(mode='random_uniform', batch_size=128, num_batches=100000) normer = util.Normer2(filter_size=5, num_channels=3) augmenter = util.DataAugmenter(2, (32, 32), flip=False) print('Training Model') for x_batch, y_batch in train_iterator: x_batch = x_batch.transpose(1, 2, 3, 0) x_batch = augmenter.run(x_batch) x_batch = normer.run(x_batch) #y_batch = numpy.int64(numpy.argmax(y_batch, axis=1)) monitor.start() log_prob, accuracy = model.train(x_batch, y_batch) monitor.stop(1 - accuracy) # monitor takes error instead of accuracy if monitor.test: monitor.start() x_test_batch, y_test_batch = test_iterator.next() x_test_batch = x_test_batch.transpose(1, 2, 3, 0)
# Loading CIFAR-10 dataset print('Loading Data') train_data = numpy.load('/data/cifar10/train_X.npy') train_labels = numpy.load('/data/cifar10/train_y.npy') test_data = numpy.load('/data/cifar10/test_X.npy') test_labels = numpy.load('/data/cifar10/test_y.npy') train_dataset = supervised_dataset.SupervisedDataset(train_data, train_labels) test_dataset = supervised_dataset.SupervisedDataset(test_data, test_labels) train_iterator = train_dataset.iterator( mode='random_uniform', batch_size=128, num_batches=100000) test_iterator = test_dataset.iterator(mode='random_uniform', batch_size=128, num_batches=100000) normer = util.Normer2(filter_size=5, num_channels=3) augmenter = util.DataAugmenter(2, (32, 32), flip=True) print('Training Model') for x_batch, y_batch in train_iterator: x_batch = x_batch.transpose(1, 2, 3, 0) x_batch = augmenter.run(x_batch) x_batch = normer.run(x_batch) #y_batch = numpy.int64(numpy.argmax(y_batch, axis=1)) monitor.start() log_prob, accuracy = model.train(x_batch, y_batch) monitor.stop(1-accuracy) # monitor takes error instead of accuracy if monitor.test: monitor.start() x_test_batch, y_test_batch = test_iterator.next() x_test_batch = x_test_batch.transpose(1, 2, 3, 0)
X_test = numpy.float32(X_test) X_test /= 255.0 X_test *= 1.0 train_dataset = supervised_dataset.SupervisedDataset(X_train, y_train) test_dataset = supervised_dataset.SupervisedDataset(X_test, y_test) train_iterator = train_dataset.iterator( mode='random_uniform', batch_size=128, num_batches=45000) test_iterator = test_dataset.iterator( mode='random_uniform', batch_size=128, num_batches=45000) # Create object to local contrast normalize a batch. # Note: Every batch must be normalized before use. normer = util.Normer2(filter_size=5, num_channels=3) augmenter = util.DataAugmenter(16, (96, 96), color_on=True) print('Training Model') for x_batch, y_batch in train_iterator: x_batch = x_batch.transpose(1, 2, 3, 0) x_batch = augmenter.run(x_batch) x_batch = normer.run(x_batch) # y_batch = numpy.int64(numpy.argmax(y_batch, axis=1)) monitor.start() log_prob, accuracy = model.train(x_batch, y_batch-1) monitor.stop(1-accuracy) # monitor takes error instead of accuracy if monitor.test: monitor.start() x_test_batch, y_test_batch = test_iterator.next() x_test_batch = x_test_batch.transpose(1, 2, 3, 0)
train_data = numpy.load(os.path.join(reduced_data_path, 'train_X_split_0.npy')) train_labels = numpy.load(os.path.join(reduced_data_path, 'train_y_split_0.npy')) test_data = numpy.load('/data/cifar10/test_X.npy') test_labels = numpy.load('/data/cifar10/test_y.npy') train_dataset = supervised_dataset.SupervisedDataset(train_data, train_labels) test_dataset = supervised_dataset.SupervisedDataset(test_data, test_labels) train_iterator = train_dataset.iterator( mode='random_uniform', batch_size=128, num_batches=100000) test_iterator = test_dataset.iterator(mode='random_uniform', batch_size=128, num_batches=100000) normer = util.Normer2(filter_size=5, num_channels=3) augmenter = util.DataAugmenter(2, (32, 32)) print('Training Model') for x_batch, y_batch in train_iterator: x_batch = x_batch.transpose(1, 2, 3, 0) x_batch = augmenter.run(x_batch) x_batch = normer.run(x_batch) #y_batch = numpy.int64(numpy.argmax(y_batch, axis=1)) monitor.start() log_prob, accuracy = model.train(x_batch, y_batch) monitor.stop(1-accuracy) # monitor takes error instead of accuracy if monitor.test: monitor.start() x_test_batch, y_test_batch = test_iterator.next() x_test_batch = x_test_batch.transpose(1, 2, 3, 0)
X_test = numpy.float32(X_test) X_test /= 255.0 X_test *= 2.0 train_dataset = supervised_dataset.SupervisedDataset(X_train, y_train) test_dataset = supervised_dataset.SupervisedDataset(X_test, y_test) train_iterator = train_dataset.iterator( mode='random_uniform', batch_size=128, num_batches=45000) test_iterator = test_dataset.iterator( mode='random_uniform', batch_size=128, num_batches=45000) # Create object to local contrast normalize a batch. # Note: Every batch must be normalized before use. normer = util.Normer2(filter_size=5, num_channels=3) augmenter = util.DataAugmenter(16, (96, 96)) print('Training Model') for x_batch, y_batch in train_iterator: x_batch = x_batch.transpose(1, 2, 3, 0) x_batch = augmenter.run(x_batch) x_batch = normer.run(x_batch) # y_batch = numpy.int64(numpy.argmax(y_batch, axis=1)) monitor.start() log_prob, accuracy = model.train(x_batch, y_batch-1) monitor.stop(1-accuracy) # monitor takes error instead of accuracy if monitor.test: monitor.start() x_test_batch, y_test_batch = test_iterator.next() x_test_batch = x_test_batch.transpose(1, 2, 3, 0)