Пример #1
0
    def __init__(self):
        self.image_shape = (config.IMG_HEIGHT, config.IMG_WIDTH,
                            config.CHANNELS)
        self.optimizer = tf.keras.optimizers.RMSprop(
            learning_rate=config.LEARNING_RATE)
        self.kernel_init = tf.keras.initializers.RandomNormal(stddev=0.02)
        self.constraint = ClipConstraint(0.01)

        print("Loading Data...")
        (self.train_images, _), (_, _) = fashion_mnist.load_data()

        print("Normalizing Data...")
        self.train_images = self.train_images / 127.5 - 1.

        self.train_images = np.expand_dims(self.train_images, axis=3)
        print("Train Data Shape : ", self.train_images.shape)
        print()

        print("Building Generator...")
        self.generator = self.build_generator()

        print("Building Discriminator...")
        self.discriminator = self.build_discriminator()

        self.generator_losses = []
        self.discriminator_losses = []
Пример #2
0
def load_image(data_source: str):
    """
    Loads one of the following image datasets: {mnist, famnist, cifar10}.
    Normalizes the data. Returns X and y.
    """
    reshape_shape = -1, 28, 28, 1

    if data_source == "mnist":
        (X_train, y_train), (X_test, y_test) = mnist.load_data()

    elif data_source == "famnist":
        (X_train, y_train), (X_test, y_test) = fashion_mnist.load_data()

    elif data_source == "cifar10":
        (X_train, y_train), (X_test, y_test) = cifar10.load_data()
        reshape_shape = -1, 32, 32, 3

    else:
        raise ValueError("No valid `data_source`.")

    X = np.concatenate((
        X_train,
        X_test))  # Combine train/test to make new train/test/validate later on
    y = np.concatenate((y_train, y_test))

    X = X.reshape(reshape_shape)
    X = X / 255  # /= is not available when casting int to float: https://stackoverflow.com/a/48948461/10603874
    y = y.reshape(y.shape[0], )

    return X, y
Пример #3
0
    def load_dataset_expanddim(self, random_state=42):
        (x_train, y_train), (x_test, y_test) = dataset.load_data()

        # Expand dimensions of datasets
        x_train_1, x_test_1 = np.expand_dims(x_train,
                                             axis=3), np.expand_dims(x_test,
                                                                     axis=3)

        # Rescale the images from [0,255] to the [0.0, 1.0] range.
        x_train_1, x_test_1 = np.array(x_train,
                                       dtype=np.float32) / 255.0, np.array(
                                           x_test, dtype=np.float32) / 255.0
        y_train_1, y_test_1 = tf.keras.utils.to_categorical(
            y_train, 10,
            dtype=np.uint8), tf.keras.utils.to_categorical(y_test,
                                                           10,
                                                           dtype=np.uint8)

        x_val, x_test, y_val, y_test = train_test_split(
            x_test_1, y_test_1, test_size=0.2, random_state=random_state)

        print("Shape of original training examples:", np.shape(x_train_1))
        print("Shape of original validation examples:", np.shape(x_val))
        print("Shape of original test examples:", np.shape(x_test))
        print("Shape of original training result:", np.shape(y_train_1))
        print("Shape of original validation result:", np.shape(y_val))
        print("Shape of original test result:", np.shape(y_test))

        return (x_train_1, y_train_1), (x_val, y_val), (x_test, y_test)
Пример #4
0
def load_image(source: str):
    """
    Loads the image datasets: mnist, famnist, cifar10.
    :return:  (X_train, y_train, X_test, y_test)
    """
    reshape_shape = -1, 28, 28, 1

    if source == "mnist":
        (X_train, y_train), (X_test, y_test) = mnist.load_data()

    elif source == "famnist":
        (X_train, y_train), (X_test, y_test) = fashion_mnist.load_data()

    elif source == "cifar10":
        (X_train, y_train), (X_test, y_test) = cifar10.load_data()
        reshape_shape = -1, 32, 32, 3

    else:
        raise ValueError("Specify a valid source.")

    X_train = X_train.reshape(reshape_shape).astype(np.float32)
    X_test = X_test.reshape(reshape_shape).astype(np.float32)

    X_train /= 255
    X_test /= 255

    y_train = y_train.reshape(y_train.shape[0], ).astype(np.int32)
    y_test = y_test.reshape(y_test.shape[0], ).astype(np.int32)

    return X_train, y_train, X_test, y_test
Пример #5
0
    def _load_data(self):  # pragma: no cover
        (x_train, y_train), (x_test, y_test) = fashion_mnist.load_data()

        train = Dataset(x_train, y_train, self.x_type, self.y_type)
        test = Dataset(x_test, y_test, self.x_type, self.y_type)

        return train, test, None
Пример #6
0
def load_dataset():
    (X_train, y_train), (X_test, y_test) = fm.load_data()

    X_train = X_train.astype('float32') / 255.0
    X_test = X_test.astype('float32') / 255.0

    X_train = np.expand_dims(X_train, axis=3)
    X_test = np.expand_dims(X_test, axis=3)

    label_binarizer = LabelBinarizer()
    y_train = label_binarizer.fit_transform(y_train)
    y_test = label_binarizer.fit_transform(y_test)

    (X_train, X_val, y_train, y_val) = train_test_split(X_train,
                                                        y_train,
                                                        train_size=0.8)

    train_ds = (tf.data.Dataset.from_tensor_slices((X_train, y_train)))
    val_ds = (tf.data.Dataset.from_tensor_slices((X_val, y_val)))
    test_ds = (tf.data.Dataset.from_tensor_slices((X_test, y_test)))

    train_ds = (train_ds.shuffle(
        buffer_size=BUFFER_SIZE).batch(BATCH_SIZE).prefetch(
            buffer_size=BUFFER_SIZE))
    val_ds = (val_ds.batch(BATCH_SIZE).prefetch(buffer_size=BUFFER_SIZE))
    test_ds = test_ds.batch(BATCH_SIZE)

    return train_ds, val_ds, test_ds
def fmnist_sandal_sneaker():
    """
    Classes:
    0	T-shirt/top
    1	Trouser
    2	Pullover
    3	Dress
    4	Coat
    5	Sandal
    6	Shirt
    7	Sneaker
    8	Bag
    9	Ankle boot

    train: (12000, 784), test: (2000, 784)
    """
    eps_dataset = 0.1
    classes = [5, 7]  # 5 is 1, 7 is -1 in the binary classification scheme

    (X_train, y_train), (X_test, y_test) = fashion_mnist.load_data()
    X_train, X_test = X_train.astype(np.float64) / 255.0, X_test.astype(
        np.float64) / 255.0
    X_train = np.reshape(X_train, [X_train.shape[0], -1])
    X_test = np.reshape(X_test, [X_test.shape[0], -1])

    X_train, y_train, X_test, y_test = binary_from_multiclass(
        X_train, y_train, X_test, y_test, classes)
    return X_train, y_train, X_test, y_test, eps_dataset
Пример #8
0
def solution_model():
    fashion_mnist = tf.keras.datasets.fashion_mnist
    (x_train, y_train), (x_test, y_test) = fashion_mnist.load_data()

    # YOUR CODE HERE
    model = Sequential()
    model.add(
        Conv1D(filters=64,
               input_shape=(28, 28),
               activation='relu',
               kernel_size=(3),
               strides=1,
               padding='valid'))
    model.add(Dropout(0.5))
    model.add(MaxPooling1D(pool_size=2))
    model.add(Flatten())
    model.add(Dense(64))
    model.add(Dense(32))
    model.add(Dense(10, activation='softmax'))
    return model

    model.compile(optimizer='adam',
                  loss='categorical_crossentropy',
                  metrics=['acc'])
    model.fit(x_train,
              y_train,
              epochs=100,
              validation_size=0.2,
              verbose=1,
              batch_size=32)
Пример #9
0
def load(dataset='f-mnist'):
    '''
    iris, mnist, fashion mnist のデータを読み込む
    デフォルトはfashion mnist
    mnist, fashion mnist は255で除算し最大値1に正規化
    iris は、MinMaxScalerで正規化
    '''
    if dataset == 'iris':
        iris = load_iris()
        x = iris.data
        y = iris.target
        mms = MinMaxScaler()
        x = mms.fit_transform(x)
    elif dataset == 'mnist':
        (x, y), (x2, y2) = mnist.load_data()
        x = np.concatenate([x, x2], axis=0)
        y = np.concatenate([y, y2], axis=0)
        x = np.expand_dims(x, axis=-1)
        x = x / 255.
    else:
        (x, y), (x2, y2) = fashion_mnist.load_data()
        x = np.concatenate([x, x2], axis=0)
        y = np.concatenate([y, y2], axis=0)
        x = np.expand_dims(x, axis=-1)
        x = x / 255.
    return x, y
Пример #10
0
    def load_fashion_mnist(self):
        """
        Load the FASHION MNIST dataset
        """
        logging.info("loading FASHION MNIST dataset...")

        # Load the data
        (self.train_x,
         self.train_y), (self.test_x, self.test_y) = fashion_mnist.load_data()

        # Reshape data
        self.train_x = self.train_x.reshape(60000, 28, 28, 1)
        self.test_x = self.test_x.reshape(10000, 28, 28, 1)

        # Scale the data to the range [0, 1]
        self.train_x = self.train_x.astype('float32') / 255.0
        self.test_x = self.test_x.astype('float32') / 255.0

        self.height = 28
        self.width = 28
        self.depth = 1

        label_names = [
            'T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat', 'Sandal',
            'Shirt', 'Sneaker', 'Bag', 'Ankle boot'
        ]
        self.classes_cnt = len(label_names)
Пример #11
0
def load_dataset():
    (x_train, y_train), (x_test, y_test) = fashion_mnist.load_data()
    x_train = np.expand_dims(x_train, axis=-1)
    x_test = np.expand_dims(x_test, axis=-1)
    generator_fs = ImageDataGenerator(
        # width_shift_range=4./28.,
        # height_shift_range=4./28.,
        # fill_mode='reflect',
        horizontal_flip=True, )
    generator = ImageDataGenerator(
        # width_shift_range=4./28.,
        # height_shift_range=4./28.,
        # fill_mode='reflect',
        horizontal_flip=True)
    y_train = np.reshape(y_train, [-1, 1])
    y_test = np.reshape(y_test, [-1, 1])
    x_train = x_train / 127.5 - 1.
    x_test = x_test / 127.5 - 1.
    output = {
        'train': {
            'data': x_train,
            'label': y_train
        },
        'test': {
            'data': x_test,
            'label': y_test
        },
        'generator': generator,
        'generator_fs': generator_fs
    }
    return output
Пример #12
0
def test_generators(params):
    print(
        "Test on MNIST fashion https://www.kaggle.com/zalando-research/fashionmnist"
    )
    (x_train, y_train), (x_test, y_test) = fashion_mnist.load_data()
    num_classes = len(np.unique(y_train))
    params['image_size'] = (32, 32, 1)
    params['batch_size'] = 400
    params['model_file'] = "model.test.fashion_mnist.coronahack.hdf5"
    x_train = np.array(
        [cv2.resize(x, dsize=params['image_size'][:-1]) for x in x_train])
    x_test = np.array(
        [cv2.resize(x, dsize=params['image_size'][:-1]) for x in x_test])

    x_train = np.expand_dims(x_train, axis=3).astype(
        np.float32) / x_train.max()
    x_test = np.expand_dims(x_test, axis=3).astype(np.float32) / x_train.max()

    y_train = to_categorical(y_train)
    y_test = to_categorical(y_test)
    train_img_gen = ImageDataGenerator(rescale=1.0 / x_train.max())
    test_img_gen = ImageDataGenerator(rescale=1.0 / x_test.max())
    train_img_gen.fit(x_train)
    test_img_gen.fit(x_test)
    train_generator = train_img_gen.flow(x_train,
                                         y_train,
                                         batch_size=params['batch_size'])
    val_generator = test_img_gen.flow(x_test,
                                      y_test,
                                      batch_size=params['batch_size'])
    return num_classes, train_generator, val_generator
Пример #13
0
def data_generator(n_samples):
    '''yields the first n_samples from the fashion_mnist dataset.

    '''
    (x_train, y_train), (_, _) = fashion_mnist.load_data()
    for sample, target in zip(x_train, y_train[:n_samples]):
        yield sample, target
Пример #14
0
def retrieve_FMNIST(name, shuffle, samples_per_class, list_of_nrs):
    """
    retireves fashion-mnist dataset with the required data in the required format
    name: name of dataset
    shuffle: flag if shuffle or not
    samples_per_class: how many samples per class from mnist
    list_of_nrs: which number classes
    returns: transformed fashion mnist data
    """
    (X_train, y_train), (X_test, y_test) = fashion_mnist.load_data()

    # extract indices of the wanted numbers
    train_filter = np.isin(y_train, list_of_nrs)
    test_filter = np.isin(y_test, list_of_nrs)
    # extract wanted numbers
    X_train, y_train = X_train[train_filter], y_train[train_filter]
    X_test, y_test = X_test[test_filter], y_test[test_filter]

    #if samples per class is specified find random indices
    if samples_per_class != -1:
        # extract indices of amount of samples per class wanted
        indices = MNIST_extract_shuffled_indices(y_train, list_of_nrs,
                                                 samples_per_class)
        # apply indices on train data
        X_train, y_train = X_train[indices], y_train[indices]

    return trans_MNIST(name, X_train, y_train, X_test, y_test)
Пример #15
0
def train_and_evaluate(output_dir, hparams):
    (X_train, y_train), (X_test, y_test) = fashion_mnist.load_data()

    model = cnn_model()
    model.compile(optimizer=tf.keras.optimizers.Adam(hparams["learning_rate"]),
                  loss=tf.keras.losses.CategoricalCrossentropy(),
                  metrics=[tf.keras.metrics.CategoricalAccuracy()])
    estimator = tf.keras.estimator.model_to_estimator(keras_model=model,
                                                      model_dir=output_dir)
    train_spec = tf.estimator.TrainSpec(input_fn=make_input_fn(
        X_train, y_train, tf.estimator.ModeKeys.TRAIN, hparams),
                                        max_steps=hparams["train_steps"])

    input_column = tf.feature_column.numeric_column("image_input",
                                                    shape=(28, 28, 1),
                                                    dtype=tf.float32)
    serving_input_fn = tf.estimator.export.build_parsing_serving_input_receiver_fn(
        tf.feature_column.make_parse_example_spec([input_column]))
    exporter = tf.estimator.LatestExporter(
        name="exporter", serving_input_receiver_fn=serving_input_fn)
    eval_spec = tf.estimator.EvalSpec(input_fn=make_input_fn(
        X_test, y_test, tf.estimator.ModeKeys.EVAL, hparams),
                                      exporters=exporter,
                                      steps=None)

    tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)
    estimator.saved_model()
Пример #16
0
def get_input_fn(mode, batch_size=32, num_epochs=1):
    (train_x, train_y), (test_x, test_y) = fashion_mnist.load_data()
    half = test_x.shape[0] // 2
    if mode == tf.estimator.ModeKeys.TRAIN:
        input_x, input_y = train_x, train_y
        train = True
    elif mode == tf.estimator.ModeKeys.EVAL:
        input_x, input_y = test_x[:half], test_y[:half]
        train = False
    elif mode == tf.estimator.ModeKeys.PREDICT:
        input_x, input_y = test_x[half:-1], test_y[half:-1]
        train = False
    else:
        raise ValueError("tf.estimator.ModeKeys required!")

    def scale_fn(image, label):
        return (
            (tf.image.convert_image_dtype(image, tf.float32) - 0.5) * 2.0,
            tf.cast(label, tf.int32),
        )

    def input_fn():
        dataset = tf.data.Dataset.from_tensor_slices(
            (tf.expand_dims(input_x, -1), tf.expand_dims(input_y, -1))
        ).map(scale_fn)
        if train:
            dataset = dataset.shuffle(10).repeat(num_epochs)
        dataset = dataset.batch(batch_size).prefetch(1)
        return dataset

    return input_fn
Пример #17
0
def load_dataset():
    # load fashoion mnist dataset
    (x_train, y_train), (x_test, y_test) = fashion_mnist.load_data()
    # print their shape
    print("x_train shape : {}, y_train shape : {}".format(
        x_train.shape, y_train.shape))
    print("x_test shape : {}, y_test shape : {}".format(
        x_test.shape, y_test.shape))

    # Normalize the data in range [0,1]
    x_train = x_train / 255
    x_test = x_test / 255

    x_train = x_train.astype("float32")
    x_test = x_test.astype("float32")

    # Reshape image in 3 dimensions (height = 28px, width = 28px , channel = 1)
    x_train = tf.reshape(x_train, [-1, 28, 28, 1])
    x_test = tf.reshape(x_test, [-1, 28, 28, 1])

    # one hot encoding
    y_train = tf.one_hot(y_train, 10)
    y_test = tf.one_hot(y_test, 10)

    return (x_train, x_test, y_train, y_test)
    def train_donnees_mnist(cls):
        """"Methode qui charge et prepare des images de l'entrainement FashionMNIST dans le format nécessaire pour l'entraînement.

        :return: dataset a utiliser pour l'entraînement
        :rtype: class 'tensorflow.python.data.ops.dataset_ops.BatchDataset'
        """

        # Chargement de l'ensemble de données MNIST composé de 60000 paires
        # image/étiquette d'entraînement et 10000 paires image/étiquette de test
        (train_images, train_labels), (test_images,
                                       test_labels) = mnist.load_data()

        # Remodelation des donnees: 60000 vecteurs de dimension 784 au lieu des 60000 matrices de dimensions 28x28
        train_images = train_images.reshape(60000, 784)

        # Conversion de chaque pixel en un nombre flottant 32 bits
        train_images = train_images.astype('float32')

        # Normalisation des valeurs de chaque pixel pour les rendre entre 0 et 1
        train_images = train_images / 255
        train_images = 1 - train_images

        # Création d'un jeu de données dont les éléments sont des tranches des tenseurs donnés -
        # les tenseurs donnés sont découpés le long de leur première dimension
        # (division des lots en tenseurs individuels pour itérer sur l'ensemble de données):
        train_dataset = tf.data.Dataset.from_tensor_slices(train_images)

        # Mélanger les images (tenseurs)
        train_dataset = train_dataset.shuffle(buffer_size=1024).batch(64)

        return train_dataset
def load_fashion_mnist(single_class=-1):
    (train_x, train_y), _ = fashion_mnist.load_data()
    train_x = np.expand_dims(train_x, axis=-1)
    if single_class != -1:
        return normalize_image(train_x[train_y == single_class])
    else:
        return normalize_image(train_x)
Пример #20
0
def load_real_samples():
    (trainx, _), (testx, _) = fashion_mnist.load_data()
    data = np.concatenate([trainx, testx], axis=0)
    data = np.expand_dims(data, axis=-1)
    x = data.astype(np.float32)
    x = (x - 127.5) / 127.5
    return x
def get_fashion_images():
    """
    Function to load mnist_fashion data set:
        * Only using a subset of the data for testing functionality of the model
        * Resize the images to 512*512
    :return: training and testing data
    """
    (x_train, y_train), (x_test, y_test) = fashion_mnist.load_data()
    x_train = x_train[0:600]
    y_train = y_train[0:600]
    x_test = x_test[0:100]
    y_test = y_test[0:100]

    x_train_new = []
    x_test_new = []
    for pic in x_train:
        upgraded_pic = resize(pic, (512, 512), anti_aliasing=True)
        x_train_new.append(upgraded_pic)
    x_train_new = np.array(x_train_new)
    for pic in x_test:
        upgraded_pic = resize(pic, (512, 512), anti_aliasing=True)
        x_test_new.append(upgraded_pic)
    x_test_new = np.array(x_test_new)

    # Encode labels via one-hot encoding
    onehot_encoder = OneHotEncoder(sparse=False)
    integer_encoded_train = y_train.reshape(len(y_train), 1)
    integer_encoded_test = y_test.reshape(len(y_test), 1)

    y_train = onehot_encoder.fit_transform(integer_encoded_train)
    y_test = onehot_encoder.fit_transform(integer_encoded_test)

    return x_train_new, x_test_new, y_train, y_test
Пример #22
0
    def train(self, epochs, batch_size=128, sample_interval=50):

        # 데이터셋 로드
        (X_train, y_train), (_, _) = fashion_mnist.load_data()

        X_train = (X_train.astype(np.float32) - 127.5) / 127.5
        X_train = np.expand_dims(X_train, axis=3)
        y_train = y_train.reshape(-1, 1)

        # Adversarial ground truths
        valid = np.ones((batch_size, 1))
        fake = np.zeros((batch_size, 1))
        D_loss_list = []
        G_loss_list = []
        for epoch in range(1,epochs+1):

            # ---------------------
            #  판별자 학습
            # ---------------------

            # 학습에 사용할 이미지 랜덤으로 선택
            idx = np.random.randint(0, X_train.shape[0], batch_size)
            imgs, labels = X_train[idx], y_train[idx]

            # 노이즈 생성
            noise = np.random.normal(0, 1, (batch_size, self.latent_dim))

            # 생성자가 이미지 생성
            #### (9) 입력으로 noise와 labels을 함께 사용
            #### hint: noise와 labels을 하나의 리스트로 모두 입력받습니다.
            gen_imgs = self.generator.predict(????)

            # 판별자 학습
            #### (10) 입력으로 imgs와 labels을 함께 사용
            #### hint: imgs와 labels을 하나의 리스트로 모두 입력받습니다.
            d_loss_real = self.discriminator.train_on_batch(????, valid)

            #### (11) 입력으로 gen_imgs와 labels을 함께 사용
            #### hint: gen_imgs와 labels을 하나의 리스트로 모두 입력받습니다.
            d_loss_fake = self.discriminator.train_on_batch(????, fake)
            d_loss = 0.5 * np.add(d_loss_real, d_loss_fake)

            # ---------------------
            #  생성자 학습
            # ---------------------

            sampled_labels = np.random.randint(0, 10, batch_size).reshape(-1, 1)
            noise = np.random.normal(0, 1, (batch_size, self.latent_dim))

            # 판별자 레이블 샘플을 유효한 것으로 지정
            #### (12) 입력으로 noise와 sampled_labels을 함께 사용
            #### hint: noise와 sampled_labels을 하나의 리스트로 모두 입력받습니다.
            g_loss = self.model.train_on_batch(????, valid)
            G_loss_list.append(g_loss)
            D_loss_list.append(d_loss[0])
            print ("%d [D loss: %f, acc.: %.2f%%] [G loss: %f]" % (epoch, d_loss[0], 100*d_loss[1], g_loss))

            if epoch % sample_interval == 0:
                self.sample_images(epoch)
        self.plotLoss(G_loss_list, D_loss_list, epoch)
Пример #23
0
def get_fashion_mnist():
    (x_data, y_data), (x_test, y_test) = fashion_mnist.load_data()
    x_data = x_data.astype(np.float32) / 255
    x_data = x_data.reshape(-1, 28, 28, 1)
    x_data = x_data * 2. - 1.

    y_data = to_categorical(y_data, num_classes=max(y_data) + 1)
    return x_data, y_data
Пример #24
0
    def fashion(auto_var, var_value, inter_var):
        from tensorflow.keras.datasets import fashion_mnist

        (x_train, y_train), (x_test, y_test) = fashion_mnist.load_data()
        x_train, x_test = x_train[:, :, :, np.newaxis], x_test[:, :, :, np.newaxis]
        x_train, x_test = x_train.astype(np.float32) / 255, x_test.astype(np.float32) / 255

        return x_train, y_train, x_test, y_test
Пример #25
0
def load_dataset():
    (train_images, train_labels), (test_images, test_labels) = load_data()
    train_images = train_images.reshape(
        (train_images.shape[0], 28, 28, 1)) / 255
    test_images = test_images.reshape((test_images.shape[0], 28, 28, 1)) / 255
    train_labels = to_categorical(train_labels)
    test_labels = to_categorical(test_labels)
    return train_images, train_labels, test_images, test_labels
Пример #26
0
def get_fashion_mnist():
    (x_train, y_train), (x_test, y_test) = fashion_mnist.load_data()
    x_train = x_train.astype(np.float32) / 255
    x_train = x_train.reshape(-1, 28, 28, 1) * 2. - 1. 
    print(y_train.shape, np.max(y_train)+1)
    y_train = to_categorical(y_train, num_classes=max(y_train)+1)
    print(y_train.shape) 
    return x_train, y_train
Пример #27
0
def load_fashion_mnist():
    from tensorflow.keras.datasets import fashion_mnist  # this requires keras>=2.0.9
    (x_train, y_train), (x_test, y_test) = fashion_mnist.load_data()
    x = np.concatenate((x_train, x_test))
    y = np.concatenate((y_train, y_test))
    x = x.reshape([-1, 28, 28, 1]) / 255.0
    print('Fashion MNIST samples', x.shape)
    return x, y
Пример #28
0
def get_fashion_mnist():
    (x_train, y_train), (x_test, y_test) = fashion_mnist.load_data()
    x_train = x_train.astype(np.float32) / 255
    x_test = x_test.astype(np.float32) / 255
    x_train = x_train.reshape(-1, 28, 28, 1) * 2. - 1.
    x_test = x_test.reshape(-1, 28, 28, 1) * 2. - 1.

    return x_train, x_test
Пример #29
0
def load_fashion_mnist():
  (X_train, y_train), (X_test, y_test) = fashion_mnist.load_data()
  X_train = normalize_minus1_1(
      cast_to_floatx(np.pad(X_train, ((0, 0), (2, 2), (2, 2)), 'constant')))
  X_train = np.expand_dims(X_train, axis=get_channels_axis())
  X_test = normalize_minus1_1(
      cast_to_floatx(np.pad(X_test, ((0, 0), (2, 2), (2, 2)), 'constant')))
  X_test = np.expand_dims(X_test, axis=get_channels_axis())
  return (X_train, y_train), (X_test, y_test)
Пример #30
0
def load_fashion_mnist_keras(sparse=False):
    (X_train, y_train), (X_test, y_test) = fashion_mnist.load_data()
    X_train = X_train.reshape(60000, 28, 28, 1)
    X_test = X_test.reshape(10000, 28, 28, 1)

    if not sparse:
        y_train = to_categorical(y_train)
        y_test = to_categorical(y_test)

    return X_train, y_train, X_test, y_test