def load_data(name):
    if name == 'cifar10':
        (X_train, y_train), (X_test, y_test), nout = load_cifar10(path=args.data_dir)
        nout = 16
    elif name == 'cifar100':
        (X_train, y_train), (X_test, y_test) = cifar100.load_data(label_mode='fine')
        X_train = X_train.reshape(50000, 3072)
        X_test = X_test.reshape(10000, 3072)
        nout = 128
    elif name == 'svhn':
        from scipy.io import loadmat
        train = loadmat('../data/svhm_train.mat')
        test = loadmat('../data/svhn_test.mat')
        (X_train, y_train), (X_test, y_test) = (train['X'], train['y']), (test['X'], test['y'])
        s = X_train.shape
        X_train = X_train.reshape(-1, s[-1]).transpose()
        s = X_test.shape
        X_test = X_test.reshape(-1, s[-1]).transpose()
        temp = np.empty(X_train.shape, dtype=np.uint)
        np.copyto(temp, X_train)
        X_train = temp
        temp = np.empty(X_test.shape, dtype=np.uint)
        np.copyto(temp, X_test)
        X_test = temp
        nout = 16
    return (X_train, y_train), (X_test, y_test), nout
    def extract_images(self, overwrite=False):
        from neon.data import load_cifar10
        from PIL import Image
        dataset = dict()
        dataset['train'], dataset['val'], _ = load_cifar10(self.out_dir, normalize=False)

        for setn in ('train', 'val'):
            data, labels = dataset[setn]

            img_dir = os.path.join(self.out_dir, setn)
            ulabels = np.unique(labels)
            for ulabel in ulabels:
                subdir = os.path.join(img_dir, str(ulabel))
                if not os.path.exists(subdir):
                    os.makedirs(subdir)

            for idx in range(data.shape[0]):
                im = np.pad(data[idx].reshape((3, 32, 32)), self.pad_width, mode='mean')
                im = np.uint8(np.transpose(im, axes=[1, 2, 0]).copy())
                im = Image.fromarray(im)
                path = os.path.join(img_dir, str(labels[idx][0]), str(idx) + '.png')
                im.save(path, format='PNG')

            if setn == 'train':
                self.pixel_mean = list(data.mean(axis=0).reshape(3, -1).mean(axis=1))
                self.pixel_mean.reverse()  # We will see this in BGR order b/c of opencv
示例#3
0
def extract_images(out_dir, padded_size):
    '''
    Save CIFAR-10 dataset as PNG files
    '''
    import numpy as np
    from neon.data import load_cifar10
    from PIL import Image
    dataset = dict()
    dataset['train'], dataset['val'], _ = load_cifar10(out_dir,
                                                       normalize=False)
    pad_size = (padded_size - 32) // 2 if padded_size > 32 else 0
    pad_width = ((0, 0), (pad_size, pad_size), (pad_size, pad_size))

    for setn in ('train', 'val'):
        data, labels = dataset[setn]

        img_dir = os.path.join(out_dir, setn)
        ulabels = np.unique(labels)
        for ulabel in ulabels:
            subdir = os.path.join(img_dir, str(ulabel))
            if not os.path.exists(subdir):
                os.makedirs(subdir)

        for idx in range(data.shape[0]):
            im = np.pad(data[idx].reshape((3, 32, 32)), pad_width, mode='mean')
            im = np.uint8(np.transpose(im, axes=[1, 2, 0]).copy())
            im = Image.fromarray(im)
            path = os.path.join(img_dir, str(labels[idx][0]),
                                str(idx) + '.png')
            im.save(path, format='PNG')
示例#4
0
def extract_images(out_dir, padded_size):
    '''
    Save CIFAR-10 dataset as PNG files
    '''
    import numpy as np
    from neon.data import load_cifar10
    from PIL import Image
    dataset = dict()
    dataset['train'], dataset['val'], _ = load_cifar10(out_dir, normalize=False)
    pad_size = (padded_size - 32) // 2 if padded_size > 32 else 0
    pad_width = ((0, 0), (pad_size, pad_size), (pad_size, pad_size))

    for setn in ('train', 'val'):
        data, labels = dataset[setn]

        img_dir = os.path.join(out_dir, setn)
        ulabels = np.unique(labels)
        for ulabel in ulabels:
            subdir = os.path.join(img_dir, str(ulabel))
            if not os.path.exists(subdir):
                os.makedirs(subdir)

        for idx in range(data.shape[0]):
            im = np.pad(data[idx].reshape((3, 32, 32)), pad_width, mode='mean')
            im = np.uint8(np.transpose(im, axes=[1, 2, 0]).copy())
            im = Image.fromarray(im)
            path = os.path.join(img_dir, str(labels[idx][0]), str(idx) + '.png')
            im.save(path, format='PNG')
示例#5
0

be = gen_backend(backend='cpu', batch_size=128)

# doesn't actually do anything with the doc string!
parser = NeonArgparser(__doc__)

# Creates a "namespace" or backend which is then put into the original
# parser instantiation.
args = parser.parse_args()

epochs = 10

# To train a deep network we need to specify the following:
# - dataset
(X_train, y_train), (X_test, y_test), nclass = load_cifar10()

# lshape tells the cnn what shape each row should be resized to since otherwise
# it's a 3 x 32 x 32 shape array.
train_set = ArrayIterator(X_train, y_train, nclass=nclass, lshape=(3, 32, 32))

test_set = ArrayIterator(X_test, y_test, nclass=nclass, lshape=(3, 32, 32))

# - list of layers
arch = Architecture('AlexNet', nclass)
layers = arch.layers
learning_rate = arch.learning_rate
momentum = arch.momentum

# using the code provided by neon
# init_uni = Uniform(low=-0.1, high=0.1)
"""

from neon.data import ArrayIterator, load_cifar10
from neon.initializers import Uniform
from neon.layers import GeneralizedCost, Affine
from neon.models import Model
from neon.optimizers import GradientDescentMomentum
from neon.transforms import Misclassification, CrossEntropyBinary, Logistic, Rectlin
from neon.callbacks.callbacks import Callbacks
from neon.util.argparser import NeonArgparser

# parse the command line arguments
parser = NeonArgparser(__doc__)
args = parser.parse_args()

(X_train, y_train), (X_test, y_test), nclass = load_cifar10(path=args.data_dir)

train = ArrayIterator(X_train, y_train, nclass=nclass, lshape=(3, 32, 32))
test = ArrayIterator(X_test, y_test, nclass=nclass, lshape=(3, 32, 32))

init_uni = Uniform(low=-0.1, high=0.1)
opt_gdm = GradientDescentMomentum(learning_rate=0.01, momentum_coef=0.9)

# set up the model layers
layers = [
    Affine(nout=200, init=init_uni, activation=Rectlin()),
    Affine(nout=10, init=init_uni, activation=Logistic(shortcut=True))
]

cost = GeneralizedCost(costfunc=CrossEntropyBinary())
示例#7
0
# parse the command line arguments
parser = NeonArgparser(__doc__)
args = parser.parse_args()

# hyperparameters
batch_size = 128
num_epochs = args.epochs

# setup backend
be = gen_backend(backend=args.backend,
                 batch_size=batch_size,
                 rng_seed=args.rng_seed,
                 device_id=args.device_id,
                 default_dtype=args.datatype)

(X_train, y_train), (X_test, y_test), nclass = load_cifar10(path=args.data_dir)

# really 10 classes, pad to nearest power of 2 to match conv output
train_set = DataIterator(X_train, y_train, nclass=16, lshape=(3, 32, 32))
valid_set = DataIterator(X_test, y_test, nclass=16, lshape=(3, 32, 32))

init_uni = GlorotUniform()
opt_gdm = GradientDescentMomentum(learning_rate=0.5,
                                  schedule=Schedule(step_config=[200, 250, 300],
                                                    change=0.1),
                                  momentum_coef=0.9, wdecay=.0001)
relu = Rectlin()
layers = []
layers.append(Dropout(keep=.8))
layers.append(Conv((3, 3, 96), init=init_uni, batch_norm=True, activation=relu))
layers.append(Conv((3, 3, 96), init=init_uni, batch_norm=True, activation=relu, pad=1))
示例#8
0
from joblib import dump

be = gen_backend(backend='cpu', batch_size=128)

# doesn't actually do anything with the doc string!
parser = NeonArgparser(__doc__)

# Creates a "namespace" or backend which is then put into the original
# parser instantiation.
args = parser.parse_args()

epochs = 10

# To train a deep network we need to specify the following:
# - dataset
(X_train, y_train), (X_test, y_test), nclass = load_cifar10()

# lshape tells the cnn what shape each row should be resized to since otherwise
# it's a 3 x 32 x 32 shape array.
train_set = ArrayIterator(X_train, y_train, nclass=nclass, lshape=(3, 32, 32))

test_set = ArrayIterator(X_test, y_test, nclass=nclass, lshape=(3, 32, 32))

# - list of layers
arch = Architecture('AlexNet', nclass)
layers = arch.layers
learning_rate = arch.learning_rate
momentum = arch.momentum

# using the code provided by neon
# init_uni = Uniform(low=-0.1, high=0.1)
示例#9
0
def process(inputpath):
    (X_train, y_train), (X_test, y_test), nclass = load_cifar10(inputpath,
                                                                normalize=False)
    process_dataset(X_train, y_train, inputpath, trainimgs)
    process_dataset(X_test, y_test, inputpath, testimgs)
示例#10
0
            "type": "int",
            "bounds": {
                "max": 500,
                "min": 50,
            }
        },
    ],
    # Sign up for an enterprise account to get more observations
    observation_budget=150,
)

DATA_DIR = "/home/ubuntu/data"

(X_train, y_train), (X_test, y_test), nclass = load_cifar10(
    path=DATA_DIR,
    normalize=False,
    contrast_normalize=True,
    whiten=False,
)

# get error on this command
train_set = DataIterator(X_train, y_train, nclass=16, lshape=(3, 32, 32))
valid_set = DataIterator(X_test, y_test, nclass=16, lshape=(3, 32, 32))

# run optimization loop
for ir in xrange(experiment.observation_budget):
    suggestion = conn.experiments(experiment.id).suggestions().create()
    assignments = suggestion.assignments
    print assignments

    num_epochs = int(assignments.get("epochs"))
    init_uni = Gaussian(scale=assignments.get("gaussian_scale"))
示例#11
0
def process(inputpath):
    (X_train, y_train), (X_test, y_test), nclass = load_cifar10(inputpath,
                                                                normalize=False)
    process_dataset(X_train, y_train, inputpath, traindir)
    process_dataset(X_test, y_test, inputpath, testdir)
示例#12
0
from neon.data import ArrayIterator, load_cifar10
from neon.callbacks.callbacks import Callbacks
from neon.util.argparser import NeonArgparser

# parse the command line arguments
parser = NeonArgparser(__doc__)
parser.add_argument("--learning_rate", default=0.05, help="initial learning rate")
parser.add_argument("--weight_decay", default=0.001, help="weight decay")
parser.add_argument("--deconv", action="store_true", help="save visualization data from deconvolution")
args = parser.parse_args()

# hyperparameters
num_epochs = args.epochs

(X_train, y_train), (X_test, y_test), nclass = load_cifar10(
    path=args.data_dir, normalize=False, contrast_normalize=True, whiten=True
)

# really 10 classes, pad to nearest power of 2 to match conv output
train_set = ArrayIterator(X_train, y_train, nclass=16, lshape=(3, 32, 32))
valid_set = ArrayIterator(X_test, y_test, nclass=16, lshape=(3, 32, 32))

init_uni = Gaussian(scale=0.05)
opt_gdm = GradientDescentMomentum(
    learning_rate=float(args.learning_rate),
    momentum_coef=0.9,
    wdecay=float(args.weight_decay),
    schedule=Schedule(step_config=[200, 250, 300], change=0.1),
)

relu = Rectlin()
示例#13
0
from neon.callbacks.callbacks import Callbacks
from neon.util.argparser import NeonArgparser

# parse the command line arguments
parser = NeonArgparser(__doc__)
parser.add_argument("--learning_rate", default=0.05, help="initial learning rate")
parser.add_argument("--weight_decay", default=0.001, help="weight decay")
parser.add_argument('--deconv', action='store_true',
                    help='save visualization data from deconvolution')
args = parser.parse_args()

# hyperparameters
num_epochs = args.epochs

(X_train, y_train), (X_test, y_test), nclass = load_cifar10(path=args.data_dir,
                                                            normalize=False,
                                                            contrast_normalize=True,
                                                            whiten=True)

# really 10 classes, pad to nearest power of 2 to match conv output
train_set = ArrayIterator(X_train, y_train, nclass=16, lshape=(3, 32, 32))
valid_set = ArrayIterator(X_test, y_test, nclass=16, lshape=(3, 32, 32))

init_uni = Gaussian(scale=0.05)
opt_gdm = GradientDescentMomentum(learning_rate=float(args.learning_rate), momentum_coef=0.9,
                                  wdecay=float(args.weight_decay),
                                  schedule=Schedule(step_config=[200, 250, 300], change=0.1))

relu = Rectlin()
conv = dict(init=init_uni, batch_norm=False, activation=relu)
convp1 = dict(init=init_uni, batch_norm=False, activation=relu, padding=1)
convp1s2 = dict(init=init_uni, batch_norm=False, activation=relu, padding=1, strides=2)