예제 #1
0
def test_mlp(devs, kv_type):
    # guarantee the same weight init for each run
    mx.random.seed(0)
    logging.basicConfig(level=logging.DEBUG)

    (train, val) = common.mnist(batch_size=100, input_shape=(784,))

    # train
    model = mx.model.FeedForward.create(
        symbol=common.mlp(), ctx=devs, X=train, num_epoch=4, learning_rate=0.1, wd=0.0004, momentum=0.9, kvstore=kv_type
    )

    return common.accuracy(model, val)
예제 #2
0
파일: local_mlp.py 프로젝트: arka9474/mxnet
def test_mlp(devs, kv_type):
    # guarantee the same weight init for each run
    mx.random.seed(0)
    logging.basicConfig(level=logging.DEBUG)

    (train, val) = mnist(batch_size=102, input_shape=(784, ))

    # train
    model = mx.model.FeedForward.create(symbol=softmax,
                                        ctx=devs,
                                        X=train,
                                        num_round=2,
                                        learning_rate=0.1,
                                        wd=0.0004,
                                        momentum=0.9,
                                        kvstore=kv_type)

    return accuracy(model, val)
예제 #3
0
def test_lenet(devs, kv_type):
    # guarantee the same weight init for each run
    mx.random.seed(0)
    logging.basicConfig(level=logging.DEBUG)

    # (train, val) = cifar10(batch_size = 128, input_shape=(3,28,28))
    (train, val) = mnist(batch_size=100, input_shape=(1, 28, 28))

    model = mx.model.FeedForward.create(ctx=devs,
                                        kvstore=kv_type,
                                        symbol=lenet,
                                        X=train,
                                        num_round=3,
                                        learning_rate=0.1,
                                        momentum=0.9,
                                        wd=0.00001)

    return accuracy(model, val)
예제 #4
0
파일: local_lenet.py 프로젝트: reking/mxnet
def test_lenet(devs, kv_type):
    # guarantee the same weight init for each run
    mx.random.seed(0)
    logging.basicConfig(level=logging.DEBUG)

    # (train, val) = common.cifar10(batch_size = 128, input_shape=(3,28,28))
    (train, val) = common.mnist(batch_size = 100, input_shape=(1,28,28))

    model = mx.model.FeedForward.create(
        ctx           = devs,
        kvstore       = kv_type,
        symbol        = common.lenet(),
        X             = train,
        num_round     = 3,
        learning_rate = 0.1,
        momentum      = 0.9,
        wd            = 0.00001)

    return common.accuracy(model, val)
#!/usr/bin/env python
import common
import mxnet as mx
import logging

mx.random.seed(0)
logging.basicConfig(level=logging.DEBUG)

kv = mx.kvstore.create('dist_async')

(train, val) = common.mnist(num_parts=kv.num_workers,
                            part_index=kv.rank,
                            batch_size=100,
                            input_shape=(1, 28, 28))

model = mx.model.FeedForward.create(ctx=mx.gpu(kv.rank),
                                    kvstore=kv,
                                    symbol=common.lenet(),
                                    X=train,
                                    num_epoch=10,
                                    learning_rate=0.05,
                                    momentum=0.9,
                                    wd=0.00001)

common.accuracy(model, val)
예제 #6
0
#!/usr/bin/env python
import mxnet as mx
import logging
import common

mx.random.seed(0)
logging.basicConfig(level=logging.DEBUG)

kv = mx.kvstore.create('dist_async')

(train, val) = common.mnist(num_parts = kv.num_workers,
                            part_index = kv.rank,
                            batch_size = 100,
                            input_shape = (784,))

# train
model  = mx.model.FeedForward.create(
    symbol        = common.mlp(),
    ctx           = mx.cpu(),
    X             = train,
    num_round     = 4,
    learning_rate = 0.05,
    wd            = 0.0004,
    momentum      = 0.9,
    kvstore       = kv)

common.accuracy(model, val)
예제 #7
0
#!/usr/bin/env python

import mxnet as mx
import logging
import common

mx.random.seed(0)
logging.basicConfig(level=logging.DEBUG)

kv = mx.kvstore.create('dist_sync')

# feed each machine the whole data
(train, val) = common.mnist(batch_size = 100,
                            input_shape = (784,))

# train
model  = mx.model.FeedForward.create(
    symbol        = common.mlp(),
    ctx           = mx.cpu(),
    X             = train,
    num_epoch     = 4,
    learning_rate = 0.1,
    wd            = 0.0004,
    momentum      = 0.9,
    kvstore       = kv)

common.accuracy(model, val)
예제 #8
0
    data_iter.reset()
    Y = np.concatenate([y[0].asnumpy() for _, y, _, _ in data_iter])
    data_iter.reset()
    X = np.concatenate([x[0].asnumpy() for x, _, _, _ in data_iter])
    assert X.shape[0] == Y.shape[0]
    return (X, Y)


def test_iter(data_iter):
    X, Y = get_XY(data_iter)
    print X.shape, Y.shape
    for i in range(4):
        A, B = get_XY(data_iter)
        assert (A.shape == X.shape)
        assert (B.shape == Y.shape)
        assert (np.sum(A != X) == 0)
        assert (np.sum(B != Y) == 0)


(train, val) = mnist(batch_size=100, input_shape=(784, ))
test_iter(train)
test_iter(val)

(train, val) = mnist(batch_size=100, input_shape=(1, 28, 28))
test_iter(train)
test_iter(val)

(train, val) = cifar10(batch_size=128, input_shape=(3, 28, 28))
test_iter(train)
test_iter(val)
예제 #9
0
#!/usr/bin/env python
import mxnet as mx
import logging
import common

mx.random.seed(0)
logging.basicConfig(level=logging.DEBUG)

kv = mx.kvstore.create('dist_sync')

# feed each machine the whole data
(train, val) = common.mnist(batch_size=100, input_shape=(1, 28, 28))

# train, worker i uses gpu i
model = mx.model.FeedForward.create(ctx=mx.gpu(kv.rank),
                                    kvstore=kv,
                                    symbol=common.lenet(),
                                    X=train,
                                    num_round=3,
                                    learning_rate=0.1,
                                    momentum=0.9,
                                    wd=0.00001)

common.accuracy(model, val)
예제 #10
0
def get_XY(data_iter):
    data_iter.reset()
    Y = np.concatenate([y[0].asnumpy() for _, y, _, _ in data_iter])
    data_iter.reset()
    X = np.concatenate([x[0].asnumpy() for x, _, _, _ in data_iter])
    assert X.shape[0] == Y.shape[0]
    return (X,Y)

def test_iter(data_iter):
    X, Y = get_XY(data_iter)
    print X.shape, Y.shape
    for i in range(4):
        A, B = get_XY(data_iter)
        assert(A.shape == X.shape)
        assert(B.shape == Y.shape)
        assert(np.sum(A != X) == 0)
        assert(np.sum(B != Y) == 0)


(train, val) = mnist(batch_size = 100, input_shape = (784,))
test_iter(train)
test_iter(val)

(train, val) = mnist(batch_size = 100, input_shape=(1,28,28))
test_iter(train)
test_iter(val)

(train, val) = cifar10(batch_size = 128, input_shape=(3,28,28))
test_iter(train)
test_iter(val)
예제 #11
0
#!/usr/bin/env python

import mxnet as mx
import logging
import common

mx.random.seed(0)
logging.basicConfig(level=logging.DEBUG)

kv = mx.kvstore.create('dist_sync')

# feed each machine the whole data
(train, val) = common.mnist(batch_size=100, input_shape=(784, ))

# train
model = mx.model.FeedForward.create(symbol=common.mlp(),
                                    ctx=mx.cpu(),
                                    X=train,
                                    num_epoch=4,
                                    learning_rate=0.1,
                                    wd=0.0004,
                                    momentum=0.9,
                                    kvstore=kv)

common.accuracy(model, val)
예제 #12
0
#!/usr/bin/env python
import mxnet as mx
import logging
import common

mx.random.seed(0)
logging.basicConfig(level=logging.DEBUG)

kv = mx.kvstore.create("dist_sync")

# feed each machine the whole data
(train, val) = common.mnist(batch_size=100, input_shape=(1, 28, 28))

# train, worker i uses gpu i
model = mx.model.FeedForward.create(
    ctx=mx.gpu(kv.rank),
    kvstore=kv,
    symbol=common.lenet(),
    X=train,
    num_round=3,
    learning_rate=0.1,
    momentum=0.9,
    wd=0.00001,
)

common.accuracy(model, val)