import mxnet as mx import numpy as np from dataset import get_data from symbols import symbols import pandas as pd batch_size = 1 num_test_images = 10 # # get data csv mnistcsv = get_data.read(one_hot=True) # get model net = symbols.get_model('simple2', pretrained=True) #net.load_parameters( os.path.join('symbols','para','simple0.params') ) for i in range(num_test_images): X, _ = mnistcsv.validation.next_batch(batch_size) X = nd.array(X) y = net(X) ans = y.argmax(axis=1).asnumpy() print("%d-th type %d" % (i, ans)) # ===================== ==================== img_data = nd.array(mnistcsv.validation.data) y = net(img_data) Label = y.argmax(axis=1).asnumpy().astype(np.int8) Image = np.arange(len(Label)) + 1
from symbols import symbols import pandas as pd from gluoncv import model_zoo as mzoo batch_size = 1 num_test_images = 10 # ctx = mx.cpu( ) #[mx.gpu(i) for i in range(num_gpus)] if num_gpus > 0 else [mx.cpu()] # get data csv mnistcsv = get_data.read(one_hot=True) # get model modelname = 'mobilenet0.25' net = symbols.get_model('mobilenet0.25', pretrained=True) for i in range(num_test_images): X, _ = mnistcsv.validation.next_batch(batch_size) X = nd.array(X) X = X.reshape((-1, 1, 28, 28)) X = nd.concat(X, X, X, dim=1) y = net(X) ans = y.argmax(axis=1).asnumpy() print("%d-th type %d" % (i, ans)) ''' 0-th type 2 1-th type 0 2-th type 9 3-th type 0 4-th type 3
transform = lambda data, label: (data.reshape(784, ).astype(np.float32) / 255, label) train_data = gluon.data.DataLoader(dataset=gluon.data.vision.MNIST( train=True, transform=transform), batch_size=100, shuffle=True, last_batch='discard') val_data = gluon.data.DataLoader(dataset=gluon.data.vision.MNIST( train=False, transform=transform), batch_size=100, shuffle=False) # network modelname = 'semi_pi_simple2' basemodel_zoo = 'simple2' net = symbols.get_model('simple2') net.initialize(mx.init.Xavier(magnitude=2.24)) #net.load_parameters(os.path.join('symbols','para','%s.params'%(modelname))) # g(x) : stochastic input augmentation function def g(x): return x + nd.random.normal(0, stochastic_ratio, shape=x.shape) # loss function l_logistic = gloss.SoftmaxCrossEntropyLoss() l_l2loss = gloss.L2Loss() metric = mx.metric.Accuracy()