Beispiel #1
0
def load_data(data_type,label):
    
    if label == 'fine':

        classes = 100
        (x_train, y_train), (x_test, y_test) = cifar100.load_data('fine')
        y_train, y_test = one_hot_encoding(y_train, y_test, classes)

        width, height, channels = x_train.shape[1], x_train.shape[2], x_train.shape[3]
        x_train = x_train.reshape((x_train.shape[0], width, height, channels))

        width, height, channels = x_test.shape[1], x_test.shape[2], x_test.shape[3]
        x_test = x_test.reshape((x_test.shape[0], width, height, channels))

        x_train = x_train.astype("float32")
        x_test = x_test.astype("float32")

        x_train /= 255.0
        x_test /= 255.0

    else:

        classes = 20
        (x_train, y_train), (x_test, y_test) = cifar100.load_data('coarse')
        y_train, y_test = one_hot_encoding(y_train, y_test, classes)

        width, height, channels = x_train.shape[1], x_train.shape[2], x_train.shape[3]
        x_train = x_train.reshape((x_train.shape[0], width, height, channels))

        width, height, channels = x_test.shape[1], x_test.shape[2], x_test.shape[3]
        x_test = x_test.reshape((x_test.shape[0], width, height, channels))

        x_train = x_train.astype("float32")
        x_test = x_test.astype("float32")

        x_train /= 255.0
        x_test /= 255.0


    if data_type == 'act':

        data = x_test
        data_shape = data.shape

        return data, data_shape

    if data_type == 'test':
        return x_test, y_test
Beispiel #2
0
def Cifar100(home_path):
    from tensorflow.keras.datasets.cifar100 import load_data
    (train_images, train_labels), (val_images, val_labels) = load_data()

    teacher = sio.loadmat(home_path + '/pre_trained/ResNet32.mat')

    def pre_processing(image, is_training):
        with tf.variable_scope('preprocessing'):
            image = tf.cast(image, tf.float32)
            image = (image - np.array([112.4776, 124.1058, 129.3773])
                     ) / np.array([70.4587, 65.4312, 68.2094])

            def augmentation(image):
                image = tf.image.random_flip_left_right(
                    image)  # tf.__version__ > 1.10
                sz = tf.shape(image)
                image = tf.pad(image, [[0, 0], [4, 4], [4, 4], [0, 0]],
                               'REFLECT')
                image = tf.random_crop(image, sz)
                return image

            image = tf.cond(is_training, lambda: augmentation(image),
                            lambda: image)
        return image

    return train_images, train_labels, val_images, val_labels, pre_processing, teacher
Beispiel #3
0
    def _cifar100_dataset(self, partition):
        """Returns a partition of the CIFAR-100 `Dataset`."""

        cifar100_data = None
        try:
            cifar100_data = cifar100.load_data()
            tf.logging.info('Loaded cifar100.')
        except:  # pylint: disable=bare-except
            tf.logging.info(
                'Can not load cifar100 from internet. Creating dummy data for '
                'testing.')
            data = np.zeros((3, 32, 32, 3))
            labels = np.array([[47], [52], [5]])
            data[:, 0, 0] = [220, 25, 47]
            data[:, -1, 0, 0] = 128
            cifar100_data = ((data, labels), (data, labels))

        (x_train, y_train), (x_test, y_test) = cifar100_data

        x = None
        y = None
        if partition == 'train':
            x, y = x_train, y_train
        else:
            x, y = x_test, y_test

        dataset = tf.data.Dataset.from_tensor_slices((x, y.astype(np.int32)))
        return dataset.cache()
def cifar100_val_generator(data_generator, ds_metainfo, batch_size):
    """
    Create image generator for validation subset.

    Parameters:
    ----------
    data_generator : ImageDataGenerator
        Image transform sequence.
    ds_metainfo : DatasetMetaInfo
        ImageNet-1K dataset metainfo.
    batch_size : int
        Batch size.

    Returns:
    -------
    Sequential
        Image transform sequence.
    """
    assert (ds_metainfo is not None)
    _, (x_test, y_test) = cifar100.load_data()
    generator = data_generator.flow(x=x_test,
                                    y=y_test,
                                    batch_size=batch_size,
                                    shuffle=False)
    return generator
def download_dataset():
    print("Downloading dataset of %d samples to:\n\t%s" %
          (DATASET_SIZE, DATASET_DIR))
    print("and corrupting the data (%d%% duplicates)" %
          (100 * CORRUPTION_RATE))

    # if not empty, delete current contents
    etau.ensure_empty_dir(DATASET_DIR, cleanup=True)

    (_, _), (x_test, y_test) = cifar100.load_data(label_mode="fine")

    dataset_size = min(DATASET_SIZE, 10000)

    x = x_test[:dataset_size, :]
    y = y_test[:dataset_size, :]

    for i in range(x.shape[0]):
        if random.random() > 0.95:
            # pick a random sample 5% of the time
            idx = random.randint(0, x.shape[0])
        else:
            idx = i

        # get label
        fine_label = FINE_CLASSES[y[idx, 0]]

        # read image
        img = x[idx, :]

        rel_img_path = os.path.join(fine_label, "%d.jpg" % i)
        abs_img_path = os.path.join(DATASET_DIR, rel_img_path)

        etai.write(img, abs_img_path)

    print("Download successful")
Beispiel #6
0
    def __init__(self):
        print("===Loading cifar100 Dataset===")

        (self.images_train,
         self.labels_train), (self.images_test,
                              self.labels_test) = cifar100.load_data()
        #self.images_train = np.expand_dims(self.images_train, axis=3) / 255.0
        #self.images_test = np.expand_dims(self.images_test, axis=3) / 255.0
        self.images_train, self.images_test = self.images_train / 255.0, self.images_test / 255.0
        self.labels_train = np.expand_dims(self.labels_train, axis=1)

        self.unique_train_label = np.unique(self.labels_train)
        self.unique_test_label = np.unique(self.labels_test)

        self.map_train_label_indices = {
            label: np.flatnonzero(self.labels_train == label)
            for label in self.unique_train_label
        }
        self.map_test_label_indices = {
            label: np.flatnonzero(self.labels_test == label)
            for label in self.unique_test_label
        }

        self.n_classes, self.n_examples, self.w, self.h = self.images_train.shape
        self.n_val, self.n_ex_val, _, _ = self.images_test.shape
        print("Test shape: ", self.images_test.shape)

        print("Images train :", self.images_train.shape)
        print("Labels train :", self.labels_train.shape)
        print("Images test  :", self.images_test.shape)
        print("Labels test  :", self.labels_test.shape)
        print("Unique label :", self.unique_train_label)
    def __init__(self, extended, datagen_kwargs, batch_size, seed=None):
        if extended:
            (_, _), (X_test, Y_test) = cifar100.load_data()
            self.nclasses = 100
        else:
            (_, _), (X_test, Y_test) = cifar10.load_data()
            self.nclasses = 10

        # All our images are floats from [0, 1)
        X_test = X_test.astype(np.float32) / 255
        # All our labels are categorical
        Y_test = to_categorical(Y_test)

        datagen = ImageDataGenerator(**datagen_kwargs)
        clean_datagen = ImageDataGenerator()

        self.test_datagen = datagen.flow(X_test,
                                         Y_test,
                                         batch_size=batch_size,
                                         seed=seed)
        self.clean_test_datagen = clean_datagen.flow(X_test,
                                                     Y_test,
                                                     batch_size=batch_size,
                                                     seed=seed)
        self.input_shape = X_test.shape[1:]
        self.steps_per_epoch = X_test.shape[0] // batch_size
        self.validation_steps = X_test.shape[0] // batch_size
Beispiel #8
0
def load_dataset():
    (x_train, y_train), (x_test, y_test) = cifar100.load_data()
    fs_generator = ImageDataGenerator(
        width_shift_range=5. / 32.,
        height_shift_range=5. / 32.,
        fill_mode='reflect',
        horizontal_flip=True,
    )
    generator = ImageDataGenerator(
        width_shift_range=5. / 32.,
        height_shift_range=5. / 32.,
        fill_mode='reflect',
        horizontal_flip=True,
    )
    y_train = np.reshape(y_train, [-1, 1])
    y_test = np.reshape(y_test, [-1, 1])
    x_train = x_train / 127.5 - 1.
    x_test = x_test / 127.5 - 1.
    output = {
        'train': {
            'data': x_train,
            'label': y_train
        },
        'test': {
            'data': x_test,
            'label': y_test
        },
        'generator': generator,
        'fs_generator': fs_generator
    }
    return output
    def __init__(self, extended, datagen_kwargs, batch_size, val_split=0.1):
        if extended:
            (X_train, Y_train), (_, _) = cifar100.load_data()
            self.nclasses = 100
        else:
            (X_train, Y_train), (_, _) = cifar10.load_data()
            self.nclasses = 10

        # Divide train/val
        X_train, X_test, Y_train, Y_test = train_test_split(
            X_train,
            Y_train,
            test_size=val_split,
            stratify=Y_train,
            random_state=2)

        X_train = X_train / 255
        X_test = X_test / 255

        # All our labels are categorical and all our images are floats
        Y_train = to_categorical(Y_train)
        Y_test = to_categorical(Y_test)

        datagen = ImageDataGenerator(**datagen_kwargs)
        datagen_for_test = ImageDataGenerator()

        self.input_shape = X_train.shape[1:]
        self.train_dataset = datagen.flow(X_train,
                                          Y_train,
                                          batch_size=batch_size)
        self.test_dataset = datagen_for_test.flow(X_test,
                                                  Y_test,
                                                  batch_size=batch_size)
        self.steps_per_epoch = X_train.shape[0] // batch_size
        self.validation_steps = X_test.shape[0] // batch_size
Beispiel #10
0
def load_CIFAR_data(data_type="CIFAR10",
                    label_mode="fine",
                    standarized=False,
                    verbose=False):
    if data_type == "CIFAR10":
        (X_train, y_train), (X_test, y_test) = cifar10.load_data()
    elif data_type == "CIFAR100":
        (X_train,
         y_train), (X_test, y_test) = cifar100.load_data(label_mode=label_mode)
    else:
        print("Unknown Data type. Stopped!")
        return None

    y_train = np.squeeze(y_train)
    y_test = np.squeeze(y_test)
    # substract mean and normalized to [-1/2,1/2]
    if standarized:
        X_train = X_train / 255
        X_test = X_test / 255
        mean_image = np.mean(X_train, axis=0)
        X_train -= mean_image
        X_test -= mean_image

    if verbose == True:
        print("X_train shape :", X_train.shape)
        print("X_test shape :", X_test.shape)
        print("y_train shape :", y_train.shape)
        print("y_test shape :", y_test.shape)

    return X_train, y_train, X_test, y_test
def save_cifar100():
    OUT_DIR = 'cifar100'

    # Load data from keras API
    (x_train, y_train), (x_test, y_test) = cifar100.load_data()

    # define class names from ex: https://github.com/keras-team/keras/issues/2653
    class_list = [
        'apple', 'aquarium_fish', 'baby', 'bear', 'beaver', 'bed', 'bee',
        'beetle', 'bicycle', 'bottle', 'bowl', 'boy', 'bridge', 'bus',
        'butterfly', 'camel', 'can', 'castle', 'caterpillar', 'cattle',
        'chair', 'chimpanzee', 'clock', 'cloud', 'cockroach', 'couch', 'crab',
        'crocodile', 'cup', 'dinosaur', 'dolphin', 'elephant', 'flatfish',
        'forest', 'fox', 'girl', 'hamster', 'house', 'kangaroo', 'keyboard',
        'lamp', 'lawn_mower', 'leopard', 'lion', 'lizard', 'lobster', 'man',
        'maple_tree', 'motorcycle', 'mountain', 'mouse', 'mushroom',
        'oak_tree', 'orange', 'orchid', 'otter', 'palm_tree', 'pear',
        'pickup_truck', 'pine_tree', 'plain', 'plate', 'poppy', 'porcupine',
        'possum', 'rabbit', 'raccoon', 'ray', 'road', 'rocket', 'rose', 'sea',
        'seal', 'shark', 'shrew', 'skunk', 'skyscraper', 'snail', 'snake',
        'spider', 'squirrel', 'streetcar', 'sunflower', 'sweet_pepper',
        'table', 'tank', 'telephone', 'television', 'tiger', 'tractor',
        'train', 'trout', 'tulip', 'turtle', 'wardrobe', 'whale',
        'willow_tree', 'wolf', 'woman', 'worm'
    ]

    # make train/test dirs and class dirs
    for cid, class_name in enumerate(class_list):
        os.makedirs(os.path.join(OUT_DIR, 'train',
                                 '{:02d}_{}'.format(cid, class_name)),
                    exist_ok=True)
        os.makedirs(os.path.join(OUT_DIR, 'test',
                                 '{:02d}_{}'.format(cid, class_name)),
                    exist_ok=True)

    # convert train data
    for num, (x_data, y_data) in enumerate(zip(x_train, y_train)):
        cid = y_data[0]  # y_data is an 1 element array
        # make file path
        fpath = os.path.join(OUT_DIR, 'train',
                             '{:02d}_{}'.format(cid, class_list[cid]),
                             'train_{:05d}.png'.format(num))
        # convert numpy to pillow and save
        Image.fromarray(x_data).save(fpath)

    # convert test data
    for num, (x_data, y_data) in enumerate(zip(x_test, y_test)):
        cid = y_data[0]  # y_data is an 1 element array
        # make file path
        fpath = os.path.join(OUT_DIR, 'test',
                             '{:02d}_{}'.format(cid, class_list[cid]),
                             'test_{:05d}.png'.format(num))
        # convert numpy to pillow and save
        Image.fromarray(x_data).save(fpath)

    print()
    print('Saved to ' + OUT_DIR + '/')
    print()
Beispiel #12
0
def load_cifar100():
    # load dataset
    (trainX10, trainY10), (testX10, testY10) = cifar10.load_data()

    (trainX100,
     trainY100), (testX100, testY100) = cifar100.load_data(label_mode='coarse')
    (trainX100,
     trainY100f), (testX100, testY100f) = cifar100.load_data(label_mode='fine')
    trainIndecies = []
    testIndecies = []
    for i in range(len(trainY100)):
        if trainY100[i] != 7:
            trainIndecies.append(i)
    for i in range(len(testY100)):
        if testY100[i] != 7:
            testIndecies.append(i)

    # delete other super classes data
    trainY100f = np.delete(trainY100f, trainIndecies, 0)
    trainX100 = np.delete(trainX100, trainIndecies, 0)
    testY100f = np.delete(testY100f, testIndecies, 0)
    testX100 = np.delete(testX100, testIndecies, 0)

    trainY100f = np.where(trainY100f == 6, 10, trainY100f)
    trainY100f = np.where(trainY100f == 7, 11, trainY100f)
    trainY100f = np.where(trainY100f == 14, 12, trainY100f)
    trainY100f = np.where(trainY100f == 18, 13, trainY100f)
    trainY100f = np.where(trainY100f == 24, 14, trainY100f)

    testY100f = np.where(testY100f == 6, 10, testY100f)
    testY100f = np.where(testY100f == 7, 11, testY100f)
    testY100f = np.where(testY100f == 14, 12, testY100f)
    testY100f = np.where(testY100f == 18, 13, testY100f)
    testY100f = np.where(testY100f == 24, 14, testY100f)

    trainX = np.concatenate((trainX10, trainX100), axis=0)
    trainY = np.concatenate((trainY10, trainY100f), axis=0)
    testX = np.concatenate((testX10, testX100), axis=0)
    testY = np.concatenate((testY10, testY100f), axis=0)

    # one hot encode data
    trainY = to_categorical(trainY)
    testY = to_categorical(testY)

    return trainX, trainY, testX, testY
Beispiel #13
0
    def read_train_data(self, sample_count):
        from tensorflow.keras.datasets import cifar100

        data = cifar100.load_data()
        data = (
            self.preprocess(*data[0], sample_count),
            self.preprocess(*data[1], None),
        )
        return data
Beispiel #14
0
def load_cifar100_images():
    """ Loads 0-1 normalized CIFAR images
    """
    (cifar_x, cifar_y), (cifar_x_test, cifar_y_test) = cifar100.load_data()
    # Scale everything to 0-1
    cifar_x, cifar_x_test, = normalize_0_1([cifar_x, cifar_x_test])
    return (cifar_x, transform_to_one_hot(
        cifar_y, depth=100)), (cifar_x_test,
                               transform_to_one_hot(cifar_y_test, depth=100))
Beispiel #15
0
def main():
    parser = make_parser()
    args = parser.parse_args()

    in_shape = [3, 32, 32]
    if args.dataset.lower() == "cifar10":
        (x_train, y_train), (x_test, y_test) = cifar10.load_data()
        nClasses = 10
    else:
        assert (args.dataset.lower() == "cifar100"
                ), "Only cifar10 and cifar100 are supported"
        (x_train, y_train), (x_test, y_test) = cifar100.load_data()
        nClasses = 100

    # Switch from channel_last to channel_first
    x_train = np.moveaxis(x_train, 3, 1)
    x_test = np.moveaxis(x_test, 3, 1)
    assert x_train.shape[1] == 3, x_train.shape
    assert x_test.shape[1] == 3, x_test.shape

    # (x_train, y_train), (x_test, y_test) = cifar10.load_data()
    # Some pre-processing differences from the original
    # TODO fit it on the data to do featurewise centering, and normalization
    datagen = ImageDataGenerator(
        horizontal_flip=True,
        featurewise_center=True,
        featurewise_std_normalization=True,
        data_format="channels_first",
    )

    def get_model(args):
        model = iRevNet(
            nBlocks=args.nBlocks,
            nStrides=args.nStrides,
            nChannels=args.nChannels,
            nClasses=nClasses,
            init_ds=args.init_ds,
            dropout_rate=0.1,
            affineBN=True,
            in_shape=in_shape,
            mult=args.bottleneck_mult,
        )
        fname = "i-revnet-" + str(sum(args.nBlocks) + 1)
        return model, fname

    model, fname = get_model(args)

    train(
        (model, fname),
        (x_train, y_train),
        datagen,
        epochs=args.epochs,
        batch_size=args.batch,
        learning_rate=args.lr,
    )
Beispiel #16
0
 def get_data(self):
     (x_train, y_train), (x_test, y_test) = cifar100.load_data(label_mode='fine')
     # 出力データをone-hotベクトルによる表現にする
     y_train = to_categorical(y_train, self.num_classes)
     y_test = to_categorical(y_test, self.num_classes)
     # 各データが0~1の値となるように調整
     x_train = x_train.astype('float32') / 255
     x_test = x_test.astype('float32') / 255
     # 画像データの平滑化
     x_train = x_train.reshape([len(x_train), self.num_input])
     x_test = x_test.reshape([len(x_test), self.num_input])
     return x_train, y_train, x_test, y_test
Beispiel #17
0
def load_images():
    (train_images, train_labels), (test_images,
                                   test_labels) = cifar100.load_data()
    train_images = train_images.astype(np.float32)
    test_images = test_images.astype(np.float32)

    (train_images, test_images) = normalization(train_images, test_images)

    train_labels = to_categorical(train_labels, 100)
    test_labels = to_categorical(test_labels, 100)

    return train_images, train_labels, test_images, test_labels
Beispiel #18
0
def main(argv):

    batch_size = 50
    epochs = 51
    #(x_train, y_train), (x_test, y_test) = cifar100.load_data(label_mode='fine')
    (x_train, y_train), (x_test, y_test) = cifar100.load_data()
    #(x_train, y_train), (x_test, y_test) = mnist.load_data()
    x_train = x_train.astype('float32') / 255.0
    x_test = x_test.astype('float32') / 255.0
    input_shape = x_train.shape[1:]
    num_classes = len(np.unique(y_train))
    num_train_samples = x_train.shape[0]

    # Convert y to categorical one-hot vectors
    y_train = to_categorical(y_train, num_classes=num_classes)
    y_test = to_categorical(y_test, num_classes=num_classes)

    # Create and compile model

    model = dense_model1(input_shape=input_shape,
                         n_classes=num_classes,
                         dropout=0.5,
                         model_name=get_project_name(argv[0]))

    model.compile(loss=categorical_crossentropy,
                  optimizer='adam',
                  metrics=['accuracy'])

    # Print summary and save model as plot and node-link-graph
    project_paths = get_project_paths(argv[0], to_tmp=False)
    #save_graph_plot(model, project_paths["plots"] + "/model.ps")
    #save_graph_json(model, project_paths["weights"] + "/model.json")

    weight = model.get_weights()

    logs = project_paths["weights"] + "/mod_hist_U5_mnist_acc.csv"
    #np.savetxt('weight.csv' , weight , fmt='%s', delimiter=',')
    csv_logger = CSVLogger(logs, append=True)
    # Train model while saving weights as checkpoints after each epoch
    model.fit(
        x_train,
        y_train,
        steps_per_epoch=num_train_samples / batch_size /
        5,  # 5 epochs per full dataset rotation
        batch_size=batch_size,
        epochs=epochs,
        verbose=1,
        #               callbacks=[ModelCheckpoint(
        #                   project_paths["checkpoints"] + "/weights_epoch-{epoch:02d}.hdf5",
        #                   save_weights_only=True,
        #                   save_freq='epoch'),csv_logger],
        validation_data=(x_test, y_test))
def get_dataset():
    (X_train, y_train), (X_test, y_test) = cifar.load_data()

    X_train = X_train.astype('float32')
    X_train = (X_train - X_train.mean(axis=0)) / (X_train.std(axis=0))

    X_test = X_test.astype('float32')
    X_test = (X_test - X_test.mean(axis=0)) / (X_test.std(axis=0))

    y_train = keras.utils.to_categorical(y_train)
    y_test = keras.utils.to_categorical(y_test)

    return X_train, y_train, X_test, y_test
Beispiel #20
0
def create_cifar100():
    (x_train, y_train), (x_test, y_test) = cifar100.load_data()

    x_train = x_train / 255.0
    x_test = x_test / 255.0

    x_train = x_train.reshape((x_train.shape[0], 32, 32, 3))
    x_test = x_test.reshape((x_test.shape[0], 32, 32, 3))

    y_train = tf.keras.utils.to_categorical(y_train, num_classes=100)
    y_test = tf.keras.utils.to_categorical(y_test, num_classes=100)

    return ((x_train, y_train), (x_test, y_test))
Beispiel #21
0
    def __init__(self):

        np.random.seed(31415)

        (self.x_train, self.y_train), (self.x_test,
                                       self.y_test) = cifar100.load_data()
        self.index = np.arange(self.x_train.shape[0])
        self.x_train, self.y_train = self.get_batch()
        self.x_train, self.y_train, self.x_val, self.y_val = self.split_data(
            self.x_train.shape[0])

        self.y_train = keras.utils.to_categorical(self.y_train, num_classes)
        self.y_val = keras.utils.to_categorical(self.y_val, num_classes)
        self.y_test = keras.utils.to_categorical(self.y_test, num_classes)
def loadAndPrep100():
    # x data in form of (num samples, num channels, width, height) =  (50000 or 10000, 32, 32, 3)
    # both in uint8
    (xtrain, ytrain), (xtest, ytest) = cifar100.load_data()

    # # use one-hot encoding for y
    ytrain = np_utils.to_categorical(ytrain)
    ytest = np_utils.to_categorical(ytest)

    # this is for the resnet only (since vgg was abandoned)
    # changes pixel values to be within a certain range
    xtrain = preprocess_input(xtrain)
    xtest = preprocess_input(xtest)

    return xtrain, ytrain, xtest, ytest
def load_data():
    # load data
    (trainX, trainY), (testX, testY) = cifar100.load_data()
    #one-hot encoding
    trainY = to_categorical(trainY)
    testY = to_categorical(testY)
    # convert from integers to floats
    train_norm = trainX.astype('float32')
    test_norm = testX.astype('float32')
    # normalize to range 0-1
    train_norm = train_norm / 255.0
    test_norm = test_norm / 255.0
    # preprocess input
    train_norm = preprocess_input(train_norm)
    test_norm = preprocess_input(test_norm)
    return (train_norm, trainY), (test_norm, testY)
Beispiel #24
0
def load_cifar(label, num):
    if num == 10:
        (x_train, y_train), (x_test, y_test) = cifar10.load_data()
    else:
        (x_train, y_train), (x_test,
                             y_test) = cifar100.load_data(label_mode='fine')

    train_mask = [y[0] == label for y in y_train]
    test_mask = [y[0] == label for y in y_test]

    x_data = np.concatenate([x_train[train_mask], x_test[test_mask]])
    y_data = np.concatenate([y_train[train_mask], y_test[test_mask]])

    x_data = (x_data.astype('float32') - 127.5) / 127.5

    return (x_data, y_data)
Beispiel #25
0
def resize_cifar100(fine_labelled=True, size=(256, 256)):
    label_mode = 'fine' if fine_labelled else 'coarse'
    label = fine_label if fine_labelled else coarse_label
    (x_train, y_train), (x_test,
                         y_test) = cifar100.load_data(label_mode=label_mode)
    for i, (data, label_idx) in enumerate(zip(x_train, y_train)):
        resized_image = cv2.resize(data, size)
        cv2.imwrite(
            "../dataset/cifar100/{}/train/{}.{}.jpg".format(
                label_mode, label[label_idx[0]], i), resized_image)

    for i, (data, label_idx) in enumerate(zip(x_test, y_test)):
        resized_image = cv2.resize(data, size)
        cv2.imwrite(
            "../dataset/cifar100/{}/test/{}.{}.jpg".format(
                label_mode, label[label_idx[0]], i), resized_image)
    def load_data(self):
        """
        Load data from CIFAR100 package

        # Returns:
            all_data : train data, train label, test data and test labels
        """
        ((self._train_data, self._train_labels),
         (self._test_data, self._test_labels)) = cifar100.load_data()

        self._train_labels = tf.keras.utils.to_categorical(self._train_labels)
        self._test_labels = tf.keras.utils.to_categorical(self._test_labels)

        self.shuffle()

        return self._train_data, self._train_labels, self._test_data, self._test_labels
def generate_dataset(dataset_params):
    if dataset_params["name"] == "cifar100":
        dataset_root_dir = dataset_params["path"]
        train_path = os.path.join(dataset_root_dir, "train")
        test_path = os.path.join(dataset_root_dir, "test")

        #   download CIFAR100 files from the keras dataset repo
        (x_train, y_train), (x_test,
                             y_test) = cifar100.load_data(label_mode='fine')

        # creating train and test folder
        save_files(train_path, x_train)
        save_files(test_path, x_test)

        tf.logging.info("  >> Cifar images saved to  datasets directory " +
                        dataset_root_dir)
    elif dataset_params["name"] == "cifar10":
        class_details = []
        (x_train, y_train), (x_test, y_test) = cifar10.load_data()

        category_counter = {}
        num_per_category = round(dataset_params["dataset_size"] / 10)
        c_counter = 0
        f_utils.mkdir(dataset_params["path"])

        for i, val in enumerate(list(y_train)):
            val = val[0]
            if (val in category_counter.keys()):
                if (category_counter[val] < num_per_category):
                    class_details.append({str(c_counter): str(val)})
                    category_counter[val] = category_counter[val] + 1
                    img = Image.fromarray(x_train[i], 'RGB')
                    img.save(dataset_params["path"] + "/" + str(c_counter) +
                             '.jpg')
                    c_counter += 1
                    if c_counter >= dataset_params["dataset_size"]:
                        break
            else:
                category_counter[val] = 0

        f_utils.save_json_file(
            os.path.join(dataset_params["path"], "classes.json"),
            class_details)

        tf.logging.info("  >> Cifar10 images saved to  datasets directory " +
                        dataset_params["path"])
def load_dataset(ds):
    if ds == 'mnist':
        from tensorflow.keras.datasets import mnist
        return mnist.load_data(), 10
    elif ds == 'fashion-mnist':
        from tensorflow.keras.datasets import fashion_mnist
        return fashion_mnist.load_data(), 10
    elif ds == 'cifar10':
        from tensorflow.keras.datasets import cifar10
        return cifar10.load_data(), 10
    elif ds == 'cifar100':
        from tensorflow.keras.datasets import cifar100
        return cifar100.load_data(), 100
    else:
        print('ERROR: Unknown dataset specified:', args.dataset)
        sys.exit(1)
    return
Beispiel #29
0
    def cifar100(self, epochs=20):
        """ Train on CIFAR-100
            epochs : number of epochs for full training
        """
        from tensorflow.keras.datasets import cifar100
        import numpy as np
        (x_train, y_train), (x_test, y_test) = cifar100.load_data()
        x_train = (x_train / 255.0).astype(np.float32)
        x_test  = (x_test  / 255.0).astype(np.float32)

        print("Warmup the model for numerical stability")
        self.warmup(x_train, y_train)

        print("Full training")
        self.compile()
        self.model.fit(x_train, y_train, epochs=epochs, batch_size=32, validation_split=0.1, verbose=1)
        self.model.evaluate(x_test, y_test)
Beispiel #30
0
    def load_cifar100(self):
        """
        Load the CIFAR-100 dataset
        """
        logging.info("loading CIFAR-100 dataset...")

        # Load the data
        (self.train_x, self.train_y), (self.test_x,
                                       self.test_y) = cifar100.load_data()

        # Scale the data to the range [0, 1]
        self.train_x = self.train_x.astype("float32") / 255.0
        self.test_x = self.test_x.astype("float32") / 255.0

        self.height = 32
        self.width = 32
        self.depth = 3
        self.classes_cnt = 100