Exemple #1
0
def test_inception(devs, kv_type):
    # guarantee the same weight init for each run
    mx.random.seed(0)
    logging.basicConfig(level=logging.DEBUG)

    (train, val) = common.cifar10(batch_size=128, input_shape=(3, 28, 28))

    model = mx.model.FeedForward.create(ctx=devs,
                                        symbol=common.inception(),
                                        X=train,
                                        eval_data=val,
                                        kvstore=kv_type,
                                        num_round=10,
                                        learning_rate=0.1,
                                        momentum=0.9,
                                        wd=0.00001,
                                        initializer=mx.init.Uniform(0.07))

    return common.accuracy(model, val)
Exemple #2
0
def test_inception(devs, kv_type):
    # guarantee the same weight init for each run
    mx.random.seed(0)
    logging.basicConfig(level=logging.DEBUG)

    (train, val) = common.cifar10(batch_size = 128, input_shape=(3,28,28))

    model = mx.model.FeedForward.create(
        ctx           = devs,
        symbol        = common.inception(),
        X             = train,
        eval_data     = val,
        kvstore       = kv_type,
        num_epoch     = 10,
        learning_rate = 0.1,
        momentum      = 0.9,
        wd            = 0.00001,
        initializer   = mx.init.Uniform(0.07))

    return common.accuracy(model, val)
Exemple #3
0
    data_iter.reset()
    Y = np.concatenate([y[0].asnumpy() for _, y, _, _ in data_iter])
    data_iter.reset()
    X = np.concatenate([x[0].asnumpy() for x, _, _, _ in data_iter])
    assert X.shape[0] == Y.shape[0]
    return (X, Y)


def test_iter(data_iter):
    X, Y = get_XY(data_iter)
    print X.shape, Y.shape
    for i in range(4):
        A, B = get_XY(data_iter)
        assert (A.shape == X.shape)
        assert (B.shape == Y.shape)
        assert (np.sum(A != X) == 0)
        assert (np.sum(B != Y) == 0)


(train, val) = mnist(batch_size=100, input_shape=(784, ))
test_iter(train)
test_iter(val)

(train, val) = mnist(batch_size=100, input_shape=(1, 28, 28))
test_iter(train)
test_iter(val)

(train, val) = cifar10(batch_size=128, input_shape=(3, 28, 28))
test_iter(train)
test_iter(val)
#!/usr/bin/env python
# pylint: skip-file
import common
import mxnet as mx
import logging

mx.random.seed(0)
logging.basicConfig(level=logging.DEBUG)
kv = mx.kvstore.create('dist_async')
(train, val) = common.cifar10(num_parts = kv.num_workers,
                              part_index = kv.rank,
                              batch_size = 128,
                              input_shape=(3,28,28))
devs = [mx.gpu(i) for i in range(2)]
model = mx.model.FeedForward.create(
    ctx           = devs,
    kvstore       = kv,
    symbol        = common.inception(),
    X             = train,
    eval_data     = val,
    num_epoch     = 20,
    learning_rate = 0.05,
    momentum      = 0.9,
    wd            = 0.00001,
    initializer   = mx.init.Uniform(0.07))

common.accuracy(model, val)
Exemple #5
0
def get_XY(data_iter):
    data_iter.reset()
    Y = np.concatenate([y[0].asnumpy() for _, y, _, _ in data_iter])
    data_iter.reset()
    X = np.concatenate([x[0].asnumpy() for x, _, _, _ in data_iter])
    assert X.shape[0] == Y.shape[0]
    return (X,Y)

def test_iter(data_iter):
    X, Y = get_XY(data_iter)
    print X.shape, Y.shape
    for i in range(4):
        A, B = get_XY(data_iter)
        assert(A.shape == X.shape)
        assert(B.shape == Y.shape)
        assert(np.sum(A != X) == 0)
        assert(np.sum(B != Y) == 0)


(train, val) = mnist(batch_size = 100, input_shape = (784,))
test_iter(train)
test_iter(val)

(train, val) = mnist(batch_size = 100, input_shape=(1,28,28))
test_iter(train)
test_iter(val)

(train, val) = cifar10(batch_size = 128, input_shape=(3,28,28))
test_iter(train)
test_iter(val)