示例#1
0
def prepare_data(batch_size):
    mnist_train = datasets.MNIST(train=True)
    X, y = mnist_train[0]
    ("X shape: ", X.shape, "X dtype", X.dtype, "y:", y)
    text_labels = [
        "t-shirt",
        "trouser",
        "pullover",
        "dress",
        "coat",
        "sandal",
        "shirt",
        "sneaker",
        "bag",
        "ankle boot",
    ]
    X, y = mnist_train[0:10]
    transformer = transforms.Compose(
        [transforms.ToTensor(),
         transforms.Normalize(0.13, 0.31)])
    mnist_train = mnist_train.transform_first(transformer)
    train_data = gluon.data.DataLoader(mnist_train,
                                       batch_size=batch_size,
                                       shuffle=True,
                                       num_workers=4)
    mnist_valid = gluon.data.vision.FashionMNIST(train=False)
    valid_data = gluon.data.DataLoader(
        mnist_valid.transform_first(transformer),
        batch_size=batch_size,
        num_workers=4)
    return train_data, valid_data
示例#2
0
def get_data(batch_size):
    mnist_train = datasets.MNIST(train=True)
    train_data = gluon.data.DataLoader(mnist_train.transform_first(
        transforms.ToTensor()),
                                       batch_size=batch_size,
                                       shuffle=True,
                                       num_workers=4)

    mnist_valid = gluon.data.vision.MNIST(train=False)
    valid_data = gluon.data.DataLoader(mnist_valid.transform_first(
        transforms.ToTensor()),
                                       batch_size=batch_size,
                                       num_workers=4)
    return train_data, train_data
示例#3
0
def prepare_data_mxnet(batch_size=256):
    train_transforms = transforms.Compose([
        transforms.Resize((32, 32)),
        transforms.ToTensor(),
        transforms.RandomRotation((-30, 30)),
        transforms.Normalize(0.1307, 0.3081),
    ])
    test_transforms = transforms.Compose([
        transforms.Resize((32, 32)),
        transforms.ToTensor(),
        transforms.Normalize(0.1307, 0.3081),
    ])

    train_data = gluon.data.DataLoader(
        datasets.MNIST(train=True).transform_first(train_transforms),
        batch_size=batch_size,
        shuffle=True,
        num_workers=8)
    test_data = gluon.data.DataLoader(
        datasets.MNIST(train=False).transform_first(test_transforms),
        batch_size=batch_size,
        shuffle=False,
        num_workers=8)
    return train_data, test_data
示例#4
0
import os
import numpy as np
from mxnet import nd, gluon, init, autograd
from mxnet.gluon.data.vision import datasets
from mxnet.gluon import nn
from matplotlib import pyplot as plt
print("Load Package!")

train_raw_data = datasets.MNIST(train=True)
val_raw_data = datasets.MNIST(train=False)

train_data = {}
train_data['data'] = np.array([i[0].asnumpy() for i in train_raw_data])
train_data['label'] = np.array([i[1] for i in train_raw_data])
#train_data['label'] = np.array([np.eye(1, 10, k=i[1]).squeeze(axis=0) for i in train_raw_data])

print(train_data['data'].shape)
print(train_data['label'].shape)

val_data = {}
val_data['data'] = np.array([i[0].asnumpy() for i in val_raw_data])
val_data['label'] = np.array([i[1] for i in val_raw_data])
#val_data['label'] = np.array([np.eye(1, 10, k=i[1]).squeeze(axis=0) for i in val_raw_data])

print(val_data['data'].shape)
print(val_data['label'].shape)

# %%
net = nn.Sequential()
net.add(nn.Dense(256, activation='relu'), nn.Dense(10, activation='sigmoid'))
示例#5
0
from pathlib import Path
import os

# In[2]:

M5_DATA = Path(os.getenv('DATA_DIR', '../../data'), 'module_5')
M5_IMAGES = Path(M5_DATA, 'images')
M5_MODELS = Path(M5_DATA, 'models')

# ## Prepare Dataset
#
# First, let's prepare the dataset we'll use for the training exercise. We will take the popular MNIST dataset but convert the labels so that it becomes a binary classification problem. To do this, we will simply set the label of all digits greater than 0 to 1. This means that we now have two labels. 0 for digit images that correspond to the handwritten 0 and 1 for all other digits in the MNIST dataset.

# In[3]:

train_data = datasets.MNIST(train=True, root=M5_IMAGES)
train_data._label[train_data._label > 0] = 1

val_data = datasets.MNIST(train=False, root=M5_IMAGES)
val_data._label[val_data._label > 0] = 1

batch_size = 128
train_data = gluon.data.DataLoader(train_data.transform_first(
    transforms.ToTensor()),
                                   batch_size=batch_size,
                                   shuffle=True)
val_data = gluon.data.DataLoader(val_data.transform_first(
    transforms.ToTensor()),
                                 batch_size=batch_size,
                                 shuffle=False)
from models.mxnet.resnet_mx_18 import ResNet18Mxnet
from mxnet import nd, gluon, init, autograd, gpu, cpu
from mxnet.gluon import nn
import mxnet as mx
from mxnet.gluon.data.vision import datasets, transforms
import matplotlib.pyplot as plt
import time

mnist_train = datasets.MNIST(train=True)
mnist_val = datasets.MNIST(train=False)

transformer = transforms.Compose(
    [transforms.ToTensor(),
     transforms.Normalize(0.05, 0.05)])

mnist_train = mnist_train.transform_first(transformer)
mnist_val = mnist_val.transform_first(transformer)

batch_size = 32
train_data = gluon.data.DataLoader(mnist_train,
                                   batch_size=batch_size,
                                   shuffle=True,
                                   num_workers=4)
valid_data = gluon.data.DataLoader(mnist_val,
                                   batch_size=batch_size,
                                   shuffle=True,
                                   num_workers=4)

use_gpu = True
if use_gpu:
    # incase you have more than one GPU, you can add gpu(1), gpu(2),...