Esempio n. 1
0
 def check_fn(weights):
     return layers.softmax_loss(f(x=x, fc_weight=weights), fake_y)
Esempio n. 2
0
 def loss(self, predict, y):
     return layers.softmax_loss(predict, y)
Esempio n. 3
0
 def check_gamma(g):
     y, _, _ = layers.batchnorm(x, g, beta)
     return layers.softmax_loss(y, fake_y)
Esempio n. 4
0
 def check_beta(b):
     y, _, _ = layers.batchnorm(x, gamma, b)
     return layers.softmax_loss(y, fake_y)
Esempio n. 5
0
 def check_fn(w):
     return layers.softmax_loss(layers.affine(x, w, b), fake_y)
Esempio n. 6
0
 def check_fn(x):
     return layers.softmax_loss(layers.relu(x), fake_y)
Esempio n. 7
0
 def check_fn(weights):
     return layers.softmax_loss(f(x=x, fc_weight=weights), fake_y)
Esempio n. 8
0
 def loss(self, predict, y):
     return layers.softmax_loss(predict, y)
Esempio n. 9
0
 def check_beta(b):
     y, _, _ = layers.batchnorm(x, gamma, b)
     return layers.softmax_loss(y, fake_y)
Esempio n. 10
0
 def check_fn(x):
     return layers.softmax_loss(x, lbl)
Esempio n. 11
0
 def check_gamma(g):
     y, _, _ = layers.batchnorm(x, g, beta)
     return layers.softmax_loss(y, fake_y)
Esempio n. 12
0
 def check_fn(x):
     return layers.softmax_loss(layers.relu(x), fake_y)
Esempio n. 13
0
 def check_fn(w):
     return layers.softmax_loss(layers.affine(x, w, b), fake_y)
Esempio n. 14
0
 def check_fn(x):
     return layers.softmax_loss(x, lbl)
Esempio n. 15
0
 def loss(self, prediction, Y):
     return layers.softmax_loss(prediction, Y)
Esempio n. 16
0
 def loss(self, predict, y):
     # Compute softmax loss between the output and the label.
     return layers.softmax_loss(predict, y)
Esempio n. 17
0
    network.extend(convolution_module(num_filter=64))
network.extend([
    bind(lambda data : Pooling(data=data, pool_type='avg', kernel=(8, 8), stride=(1, 1), pad=(0, 0))),
    bind(lambda data : Flatten(data=data)),
    bind(lambda data : FullyConnected(data=data, num_hidden=10)),
])

data = Variable('data', shape=(128, 10))
labels = Variable('labels', shape=(128,))
loss_symbol = SoftmaxOutput(data=data, label=labels)
loss = loss_symbol.simple_bind(context)

import numpy as np

X = mx.nd.random_normal(0, 1, (50000, 3, 32, 32), context)
Y = mx.nd.array(np.random.choice(np.arange(10), (50000,)), context)
iterator = mx.io.NDArrayIter(data=X, label=Y, batch_size=128)

while True:
    iterator.reset()
    for i, batch in enumerate(iterator):
        data, labels = batch.data[0], batch.label[0]
        scores = forward(network, data)
        results = loss.forward(data=scores, labels=labels, is_train=True)
        softmax_loss(results[0].asnumpy(), labels.asnumpy())
        loss.backward()
        backward(network, loss.grad_dict['data'])

        if (i + 1) % 100 == 0:
            print i + 1