def cifar10_load_data(path): """Loads CIFAR10 dataset. # Returns Tuple of Numpy arrays: `(x_train, y_train), (x_test, y_test)`. """ dirname = 'cifar-10-batches-py' # origin = 'http://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz' # path = get_file(dirname, origin=origin, untar=True) path_ = os.path.join(path, dirname) num_train_samples = 50000 x_train = np.zeros((num_train_samples, 3, 32, 32), dtype='uint8') y_train = np.zeros((num_train_samples,), dtype='uint8') for i in range(1, 6): fpath = os.path.join(path_, 'data_batch_' + str(i)) data, labels = cifar10.load_batch(fpath) x_train[(i - 1) * 10000: i * 10000, :, :, :] = data y_train[(i - 1) * 10000: i * 10000] = labels fpath = os.path.join(path_, 'test_batch') x_test, y_test = cifar10.load_batch(fpath) y_train = np.reshape(y_train, (len(y_train), 1)) y_test = np.reshape(y_test, (len(y_test), 1)) if KB.image_data_format() == 'channels_last': x_train = x_train.transpose(0, 2, 3, 1) x_test = x_test.transpose(0, 2, 3, 1) return (x_train, y_train), (x_test, y_test)
def cifar10_load_data(path): """Loads CIFAR10 dataset. # Returns Tuple of Numpy arrays: `(x_train, y_train), (x_test, y_test)`. """ dirname = 'cifar-10-batches-py' # origin = 'http://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz' # path = get_file(dirname, origin=origin, untar=True) path_ = os.path.join(path, dirname) num_train_samples = 50000 x_train = np.zeros((num_train_samples, 3, 32, 32), dtype='uint8') y_train = np.zeros((num_train_samples, ), dtype='uint8') for i in range(1, 6): fpath = os.path.join(path_, 'data_batch_' + str(i)) data, labels = cifar10.load_batch(fpath) x_train[(i - 1) * 10000:i * 10000, :, :, :] = data y_train[(i - 1) * 10000:i * 10000] = labels fpath = os.path.join(path_, 'test_batch') x_test, y_test = cifar10.load_batch(fpath) y_train = np.reshape(y_train, (len(y_train), 1)) y_test = np.reshape(y_test, (len(y_test), 1)) if KB.image_data_format() == 'channels_last': x_train = x_train.transpose(0, 2, 3, 1) x_test = x_test.transpose(0, 2, 3, 1) return (x_train, y_train), (x_test, y_test)
def load_data(data_path=None): if data_path == None: path = cifar10_path else: path = data_path num_train_samples = 50000 x_train = np.zeros((num_train_samples, 3, 32, 32), dtype='uint8') y_train = np.zeros((num_train_samples,), dtype='uint8') for i in range(1, 6): fpath = os.path.join(path, 'data_batch_' + str(i)) data, labels = cifar10.load_batch(fpath) x_train[(i - 1) * 10000: i * 10000, :, :, :] = data y_train[(i - 1) * 10000: i * 10000] = labels fpath = os.path.join(path, 'test_batch') x_test, y_test = cifar10.load_batch(fpath) y_train = np.reshape(y_train, (len(y_train), 1)) y_test = np.reshape(y_test, (len(y_test), 1)) if K.image_data_format() == 'channels_last': x_train = x_train.transpose(0, 2, 3, 1) x_test = x_test.transpose(0, 2, 3, 1) print "loading data done." return (x_train, y_train), (x_test, y_test)
def load_data_said(org='https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz'): """Loads CIFAR10 dataset. # Returns Tuple of Numpy arrays: `(x_train, y_train), (x_test, y_test)`. """ dirname = 'cifar-10-batches-py' origin = 'https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz' path = get_file(dirname, origin=origin, untar=True) num_train_samples = 50000 x_train = np.empty((num_train_samples, 3, 32, 32), dtype='uint8') y_train = np.empty((num_train_samples,), dtype='uint8') for i in range(1, 6): fpath = os.path.join(path, 'data_batch_' + str(i)) (x_train[(i - 1) * 10000: i * 10000, :, :, :], y_train[(i - 1) * 10000: i * 10000]) = load_batch(fpath) fpath = os.path.join(path, 'test_batch') x_test, y_test = load_batch(fpath) y_train = np.reshape(y_train, (len(y_train), 1)) y_test = np.reshape(y_test, (len(y_test), 1)) if K.image_data_format() == 'channels_last': x_train = x_train.transpose(0, 2, 3, 1) x_test = x_test.transpose(0, 2, 3, 1) return (x_train, y_train), (x_test, y_test)
def load_data(): path = 'data/cifar-10-batches-py' num_train_samples = 50000 x_train = np.zeros((num_train_samples, 3, 32, 32), dtype='uint8') y_train = np.zeros((num_train_samples,), dtype='uint8') for i in range(1, 6): data, labels = load_batch(os.path.join(path, 'data_batch_' + str(i))) x_train[(i - 1) * 10000: i * 10000, :, :, :] = data y_train[(i - 1) * 10000: i * 10000] = labels x_test, y_test = load_batch(os.path.join(path, 'test_batch')) y_train = np.reshape(y_train, (len(y_train), 1)) y_test = np.reshape(y_test, (len(y_test), 1)) x_train = x_train.transpose(0, 2, 3, 1) x_test = x_test.transpose(0, 2, 3, 1) return (x_train, y_train), (x_test, y_test)
def __load_data(self, data_dir=None): if data_dir is None: (x_train, y_train), (x_test, y_test) = cifar10.load_data() else: x_train = np.zeros((50000, 3, 32, 32), dtype='uint8') y_train = np.zeros((50000,), dtype='uint8') for i in range(1, 6): fpath = os.path.join(data_dir, 'data_batch_' + str(i)) data, labels = cifar10.load_batch(fpath) x_train[(i - 1) * 10000: i * 10000, :, :, :] = data y_train[(i - 1) * 10000: i * 10000] = labels fpath = os.path.join(data_dir, 'test_batch') x_test, y_test = cifar10.load_batch(fpath) y_train = np.reshape(y_train, (len(y_train), 1)) y_test = np.reshape(y_test, (len(y_test), 1)) # Ready the data for use in networks x_train = x_train.astype('float32') x_test = x_test.astype('float32') x_train /= 255 x_test /= 255 y_train = keras.utils.to_categorical(y_train, 10) y_test = keras.utils.to_categorical(y_test, 10) # Data comes as channel first. Transpose the dimensions if necessary if K.image_data_format() == 'channels_last': x_train = x_train.transpose(0, 2, 3, 1) x_test = x_test.transpose(0, 2, 3, 1) # Split off validation data in the ratio described in the paper x_train, x_val, y_train, y_val = train_test_split( x_train, y_train, test_size=0.1) return { 'x_train': x_train, 'x_test': x_test, 'x_val': x_val, 'y_train': y_train, 'y_test': y_test, 'y_val': y_val }
def test_time(model): path = cifar10_path num_samples = 32 x_ = np.zeros((num_samples, 3, 32, 32), dtype='uint8') fpath = os.path.join(path, 'data_batch_' + str(1)) data, labels = cifar10.load_batch(fpath) x_[:,:,:,:]=data[:num_samples,:,:,:] if K.image_data_format() == 'channels_last': x_ = x_.transpose(0, 2, 3, 1) x_ = x_.astype('float32') / 255 x_mean = np.mean(x_, axis=0) x_ -= x_mean model.predict(x_) start_time = time.time() model.predict(x_) end_time = time.time() cprint('Time used:' + str(end_time - start_time), 'red')