Example #1
0
def main():
    batchsize = args.batch_size if args.gpus is '' else \
        args.batch_size / len(args.gpus.split(','))
    print 'batchsize is ', batchsize

    # define network structure
    net = get_symbol(batchsize)

    # load data
    train, val = mnist_iterator(batch_size=args.batch_size,
                                input_shape=data_shape)

    # train
    print 'training model ...'
    train_model.fit(args, net, (train, val), data_shape)
Example #2
0
def main():
    # load model, get embedding layer
    model = mx.model.FeedForward.load('center_loss',
                                      20,
                                      ctx=mx.cpu(0),
                                      numpy_batch_size=1)
    internals = model.symbol.get_internals()
    embedding_layer = internals['embedding_output']
    feature_extractor = mx.model.FeedForward(ctx=mx.cpu(0), symbol=embedding_layer, numpy_batch_size=1,\
            arg_params = model.arg_params, aux_params=model.aux_params, allow_extra_params=True)
    print 'feature_extractor loaded'

    # load MNIST data
    _, val = mnist_iterator(batch_size=100, input_shape=(1, 28, 28))

    # extract feature
    print 'extracting feature'
    embeds = []
    labels = []
    for i in val:
        preds = feature_extractor.predict(i.data[0])
        embeds.append(preds)
        labels.append(i.label[0].asnumpy())

    embeds = np.vstack(embeds)
    labels = np.hstack(labels)

    print 'embeds shape is ', embeds.shape
    print 'labels shape is ', labels.shape

    # prepare dict for display
    namedict = dict()
    for i in range(10):
        namedict[i] = str(i)

    visual_feature_space(embeds, labels, 10, namedict)
Example #3
0
            if i is None:
                self.sum_metric += (pred_label.flat == label.flat).sum()
                self.num_inst += len(pred_label.flat)
            else:
                self.sum_metric[i] += (pred_label.flat == label.flat).sum()
                self.num_inst[i] += len(pred_label.flat)


batch_size = 100
num_epochs = 100
device = mx.gpu(0)
lr = 0.01

network = build_network()
train, val = mnist_iterator(batch_size=batch_size, input_shape=(784, ))
train = Multi_mnist_iterator(train)
val = Multi_mnist_iterator(val)

model = mx.model.FeedForward(ctx=device,
                             symbol=network,
                             num_epoch=num_epochs,
                             learning_rate=lr,
                             momentum=0.9,
                             wd=0.00001,
                             initializer=mx.init.Xavier(factor_type="in",
                                                        magnitude=2.34))

model.fit(X=train,
          eval_data=val,
          eval_metric=Multi_Accuracy(num=2),
Example #4
0
from data import mnist_iterator
import mxnet as mx
import numpy as np
import logging

data = mx.symbol.Variable('data')
fc1 = mx.symbol.FullyConnected(data = data, name='fc1', num_hidden=128)
act1 = mx.symbol.Activation(data = fc1, name='relu1', act_type="relu")
fc2 = mx.symbol.FullyConnected(data = act1, name = 'fc2', num_hidden = 64)
act2 = mx.symbol.Activation(data = fc2, name='relu2', act_type="relu")
fc3 = mx.symbol.FullyConnected(data = act2, name='fc3', num_hidden=10)
mlp = mx.symbol.SoftmaxOutput(data = fc3, name = 'softmax')

# data

train, val = mnist_iterator(batch_size=100, input_shape = (784,))

# train

logging.basicConfig(level=logging.DEBUG)

model = mx.model.FeedForward(
    ctx = mx.cpu(), symbol = mlp, num_epoch = 20,
    learning_rate = 0.1, momentum = 0.9, wd = 0.00001)

def norm_stat(d):
    return mx.nd.norm(d)/np.sqrt(d.size)
mon = mx.mon.Monitor(100, norm_stat)
model.fit(X=train, eval_data=val, monitor=mon, 
          batch_end_callback = mx.callback.Speedometer(100, 100))
Example #5
0
conv1 = mx.symbol.Convolution(data=data, kernel=(5,5), num_filter=20)
tanh1 = mx.symbol.Activation(data=conv1, act_type="tanh")
pool1 = mx.symbol.Pooling(data=tanh1, pool_type="max",
                          kernel=(2,2), stride=(2,2))
# second conv
conv2 = mx.symbol.Convolution(data=pool1, kernel=(5,5), num_filter=50)
tanh2 = mx.symbol.Activation(data=conv2, act_type="tanh")
pool2 = mx.symbol.Pooling(data=tanh2, pool_type="max",
                          kernel=(2,2), stride=(2,2))
# first fullc
flatten = mx.symbol.Flatten(data=pool2)
fc1 = mx.symbol.FullyConnected(data=flatten, num_hidden=500)
tanh3 = mx.symbol.Activation(data=fc1, act_type="tanh")
# second fullc
fc2 = mx.symbol.FullyConnected(data=tanh3, num_hidden=10)
# loss
lenet = mx.symbol.Softmax(data=fc2)

## data
train, val = mnist_iterator(batch_size=100, input_shape=(1,28,28))

## train
logging.basicConfig(level=logging.DEBUG)
# dev = [mx.gpu(i) for i in range(2)]
dev = mx.gpu()
model = mx.model.FeedForward(
    ctx = dev, symbol = lenet, num_epoch = 20,
    learning_rate = 0.05, momentum = 0.9, wd = 0.00001)
model.fit(X=train, eval_data=val,
          batch_end_callback=mx.callback.Speedometer(100))
Example #6
0
tanh2 = mx.symbol.Activation(data=conv2, act_type="tanh")
pool2 = mx.symbol.Pooling(data=tanh2,
                          pool_type="max",
                          kernel=(2, 2),
                          stride=(2, 2))
# first fullc
flatten = mx.symbol.Flatten(data=pool2)
fc1 = mx.symbol.FullyConnected(data=flatten, num_hidden=500)
tanh3 = mx.symbol.Activation(data=fc1, act_type="tanh")
# second fullc
fc2 = mx.symbol.FullyConnected(data=tanh3, num_hidden=10)
# loss
lenet = mx.symbol.Softmax(data=fc2)

## data
train, val = mnist_iterator(batch_size=100, input_shape=(1, 28, 28))

## train
logging.basicConfig(level=logging.DEBUG)
# dev = [mx.gpu(i) for i in range(2)]
dev = mx.gpu()
model = mx.model.FeedForward(ctx=dev,
                             symbol=lenet,
                             num_epoch=20,
                             learning_rate=0.05,
                             momentum=0.9,
                             wd=0.00001)
model.fit(X=train,
          eval_data=val,
          batch_end_callback=mx.callback.Speedometer(100))
Example #7
0
fc3 = mx.symbol.FullyConnected(data=act1, name='fc3', num_hidden=20)
fc4 = mx.symbol.FullyConnected(data=act1, name='fc3', num_hidden=2)
mlp = mx.symbol.Softmax(data=fc3, name='mlp')
domin = mx.symbol.Softmax(data=fc4, name='domin')

#draw network

batch_size = 100
data_shape = (batch_size, 784)
dot = mx.viz.plot_network(mlp, shape={"data": data_shape})
#dot.render('test-output/round-table.gv', view=True)

# data

train_iter, val_iter = mnist_iterator(batch_size=100, input_shape=(784, ))

# train by model

logging.basicConfig(level=logging.DEBUG)

model = mx.model.FeedForward(ctx=mx.cpu(),
                             symbol=mlp,
                             num_round=3,
                             learning_rate=0.1,
                             momentum=0.9,
                             wd=0.00001)

# model.fit(X=train_iter, eval_data=val_iter)

# ==================train by sinple_bind==============
Example #8
0
    default=100,
    metavar='N',
    help='how many batches to wait before logging training status')
opt = parser.parse_args()

# define network

net = nn.Sequential()
with net.name_scope():
    net.add(nn.Dense(128, activation='relu'))
    net.add(nn.Dense(64, activation='relu'))
    net.add(nn.Dense(10))

# data

train_data, val_data = mnist_iterator(batch_size=opt.batch_size,
                                      input_shape=(28 * 28, ))

# train


def test(ctx):
    metric = mx.metric.Accuracy()
    val_data.reset()
    for batch in val_data:
        data = batch.data[0].as_in_context(ctx)
        label = batch.label[0].as_in_context(ctx)
        output = net(data)
        metric.update([label], [output])

    return metric.get()