def gradients_check(self, func, param, autograds, h=0.0005, df=1):
        # param: PyTensor
        # autograds: numpy_tensor
        p = tensor.to_numpy(param)
        it = np.nditer(p, flags=['multi_index'], op_flags=['readwrite'])
        while not it.finished:
            idx = it.multi_index
            diff = np.zeros_like(p)
            diff[idx] += h
            diff = tensor.from_numpy(diff)
            diff.to_device(gpu_dev)

            param += diff
            pos = func()
            pos = tensor.to_numpy(pos)

            param -= diff
            param -= diff
            neg = func()
            neg = tensor.to_numpy(neg)

            numerical_grad = np.sum((pos - neg) * df) / (2 * h)
            #print((autograds[idx] - numerical_grad)/numerical_grad)
            # threshold set as -5% to +5%
            #self.assertAlmostEqual((autograds[idx] - numerical_grad)/(numerical_grad+0.0000001), 0., places=1)
            self.assertAlmostEqual(
                autograds[idx] - numerical_grad, 0., places=2)

            it.iternext()
Ejemplo n.º 2
0
def onnx_to_singa(niter, use_cpu=False):
    if use_cpu:
        print("Using CPU")
        dev = device.get_default_device()
    else:
        print("Using GPU")
        dev = device.create_cuda_gpu()
    model = sonnx.load("mlp.onnx")
    backend = sonnx.prepare(model, device=dev)
    sgd = opt.SGD(0.1)
    inputs = Tensor(
        data=data,
        device=dev,
        requires_grad=False,
        stores_grad=False,
        name="input",
    )
    target = Tensor(
        data=label,
        device=dev,
        requires_grad=False,
        stores_grad=False,
        name="target",
    )

    for i in range(100):
        y = backend.run([inputs])[0]
        loss = autograd.softmax_cross_entropy(y, target)
        for p, gp in autograd.backward(loss):
            sgd.update(p, gp)
        loss_rate = tensor.to_numpy(loss)[0]
        accuracy_rate = accuracy(tensor.to_numpy(y), label)

        print("Iter {}, accurate={}, loss={}".format(i, accuracy_rate, loss_rate))
Ejemplo n.º 3
0
 def test_copy_data(self):
     t = self.t
     t += 1.23
     s = self.s
     s += 5.43
     self.assertAlmostEqual(tensor.to_numpy(t)[0, 0], 1.23)
     tensor.copy_data_to_from(t, s, 2)
     self.assertAlmostEqual(tensor.to_numpy(t)[0, 0], 5.43, 5)
     self.assertAlmostEqual(tensor.to_numpy(t)[0, 1], 5.43, 5)
     self.assertAlmostEqual(tensor.to_numpy(t)[0, 2], 1.23)
Ejemplo n.º 4
0
    def test_numpy_convert(self):
        a = np.asarray([[1, 0, 0], [0, 1, 0]], dtype=np.int)
        t = tensor.from_numpy(a)
        b = tensor.to_numpy(t)
        self.assertEqual(np.sum(a-b), 0)

        a = np.asarray([[1, 0, 0], [0, 1, 0]], dtype=np.float32)
        t = tensor.from_numpy(a)
        b = tensor.to_numpy(t)
        self.assertEqual(np.sum(a-b), 0.)
Ejemplo n.º 5
0
 def test_slice(self):
     t = np.zeros((3, 3))
     t[:, :2] = float(2)
     t[:, 2] = float(1)
     lyr = layer.Slice('slice', 1, [2], t.shape)
     out = lyr.forward(model_pb2.kTrain, [tensor.from_numpy(t)])
     t1 = tensor.to_numpy(out[0])
     t2 = tensor.to_numpy(out[1])
     self.assertEquals(np.average(t1), 2)
     self.assertEquals(np.average(t2), 1)
Ejemplo n.º 6
0
 def test_unary_operators(self):
     t = self.t
     self.assertAlmostEqual(tensor.to_numpy(t)[0, 0], 0.0)
     t += 1.23
     self.assertAlmostEqual(tensor.to_numpy(t)[0, 0], 1.23)
     t -= 0.23
     self.assertAlmostEqual(tensor.to_numpy(t)[0, 0], 1.23-0.23)
     t *= 2.5
     self.assertAlmostEqual(tensor.to_numpy(t)[0, 0], (1.23-0.23)*2.5)
     t /= 2
     self.assertAlmostEqual(tensor.to_numpy(t)[0, 0], (1.23-0.23)*2.5/2)
Ejemplo n.º 7
0
 def test_binary_operators(self):
     t = self.t
     t += 3.2
     s = self.s
     s += 2.1
     a = t + s
     self.assertAlmostEqual(tensor.to_numpy(a)[0, 0], 3.2+2.1, 5)
     a = t - s
     self.assertAlmostEqual(tensor.to_numpy(a)[0, 0], 3.2-2.1, 5)
     a = t * s
     self.assertAlmostEqual(tensor.to_numpy(a)[0, 0], 3.2*2.1, 5)
     ''' not implemented yet
Ejemplo n.º 8
0
def singa_to_onnx(epochs, use_cpu=False, batchsize=32):
    sgd = opt.SGD(lr=0.1)

    # operations initialization
    conv1 = autograd.Conv2d(1, 8, 3, 2, padding=1) # 28 - 14
    conv2 = autograd.Conv2d(8, 4, 3, 2, padding=1) # 14 - 7
    pooling = autograd.MaxPool2d(3, 2, padding=1) # 7 - 4
    linear = autograd.Linear(64, 10)

    def forward(x, t):
        y = conv1(x)
        y = autograd.relu(y)
        y = conv2(y)
        y = autograd.relu(y)
        y = pooling(y)
        y = autograd.flatten(y)
        y = linear(y)
        loss = autograd.softmax_cross_entropy(y, t)
        return loss, y

    autograd.training = True
    (x_train, y_train), (x_test, y_test), dev = common(use_cpu)

    niter = 1 # x_train.shape[0] // batchsize
    for epoch in range(epochs):
        accuracy_rate = 0.0
        loss_rate = 0.0
        for i in range(niter):
            inputs = tensor.Tensor(
                device=dev,
                data=x_train[i * batchsize : (i + 1) * batchsize],
                stores_grad=False,
                name="input",
            )
            targets = tensor.Tensor(
                device=dev,
                data=y_train[i * batchsize : (i + 1) * batchsize],
                requires_grad=False,
                stores_grad=False,
                name="target",
            )
            loss, y = forward(inputs, targets)
            accuracy_rate += accuracy(
                tensor.to_numpy(y), y_train[i * batchsize : (i + 1) * batchsize]
            )
            loss_rate += tensor.to_numpy(loss)[0]
            for p, gp in autograd.backward(loss):
                sgd.update(p, gp)
        print( "accuracy is {}, loss is {}".format( accuracy_rate / niter, loss_rate / niter))
    model = sonnx.to_onnx_model([inputs], [y])
    sonnx.save(model, "cnn.onnx")
Ejemplo n.º 9
0
 def test_concat(self):
     t1 = tensor.Tensor((2, 3))
     t2 = tensor.Tensor((1, 3))
     t1.set_value(1)
     t2.set_value(2)
     lyr = layer.Concat('concat', 0, [(3,), (3,)])
     t = lyr.forward(model_pb2.kTrain, [t1, t2])
     tnp = tensor.to_numpy(t)
     self.assertEqual(np.sum(tnp), 12)
     t3 = tensor.Tensor((3, 3))
     t3.set_value(1.5)
     grads, _ = lyr.backward(model_pb2.kTrain, [t3])
     gnp = tensor.to_numpy(grads[0])
     self.assertEqual(np.sum(gnp), 6 * 1.5)
Ejemplo n.º 10
0
    def test_transpose(self):
        a = np.array([1.1,1.1,1.1,1.1,1.4,1.3,1.1,1.6,1.1,1.1,1.1,1.2])
        a = np.reshape(a,(2,3,2))
        ta = tensor.from_numpy(a)

        A1 = np.transpose(a)
        tA1 = tensor.transpose(ta)
        TA1 = tensor.to_numpy(tA1)
        A2 = np.transpose(a,[0,2,1])
        tA2 = tensor.transpose(ta,[0,2,1])
        TA2 = tensor.to_numpy(tA2)

        self.assertAlmostEqual(np.sum(TA1 - A1), 0.,places=3)
        self.assertAlmostEqual(np.sum(TA2 - A2), 0.,places=3)
Ejemplo n.º 11
0
    def test_tensordot(self):
        a = np.array([1.1,1.1,1.1,1.1,1.4,1.3,1.1,1.6,1.1,1.1,1.1,1.2])
        a = np.reshape(a,(2,3,2))

        ta = tensor.from_numpy(a)

        res1 = np.tensordot(a, a, axes = 1)
        tres1 = tensor.tensordot(ta, ta, axes = 1)
        Tres1 = tensor.to_numpy(tres1)
        res2 = np.tensordot(a, a, axes = ([0,1],[2,1]))
        tres2 = tensor.tensordot(ta, ta, axes = ([0,1],[2,1]))
        Tres2 = tensor.to_numpy(tres2)

        self.assertAlmostEqual(np.sum(Tres1 - res1), 0., places=3)
        self.assertAlmostEqual(np.sum(Tres2 - res2), 0., places=3)
Ejemplo n.º 12
0
    def test_repeat(self):

        a = np.array([1.1,1.1,1.1,1.1,1.4,1.3,1.1,1.6,1.1,1.1,1.1,1.2])
        a = np.reshape(a,(2,3,2))
        ta = tensor.from_numpy(a)

        ta_repeat1 = tensor.repeat(ta,2,axis = None)
        a_repeat1 = np.repeat(a,2,axis = None)
        Ta_repeat1 = tensor.to_numpy(ta_repeat1)
        ta_repeat2 = tensor.repeat(ta, 4, axis = 1)
        a_repeat2 = np.repeat(a, 4, axis = 1)
        Ta_repeat2 = tensor.to_numpy(ta_repeat2)

        self.assertAlmostEqual(np.sum(Ta_repeat1 - a_repeat1), 0., places=3)
        self.assertAlmostEqual(np.sum(Ta_repeat2 - a_repeat2), 0., places=3)
Ejemplo n.º 13
0
    def test_einsum(self):

        a = np.array([1.1,1.1,1.1,1.1,1.4,1.3,1.1,1.6,1.1,1.1,1.1,1.2])
        a = np.reshape(a,(2,3,2))
        ta = tensor.from_numpy(a)

        res1 = np.einsum('kij,kij->kij', a, a)
        tres1 = tensor.einsum('kij,kij->kij', ta, ta)
        Tres1 = tensor.to_numpy(tres1)
        res2 = np.einsum('kij,kih->kjh', a, a)
        tres2 = tensor.einsum('kij,kih->kjh', ta, ta)
        Tres2 = tensor.to_numpy(tres2)
        
        self.assertAlmostEqual(np.sum(Tres1 - res1), 0.,places=3)
        self.assertAlmostEqual(np.sum(Tres2 - res2), 0.,places=3)
Ejemplo n.º 14
0
def predict(net, dev, synset_list, topk=5):
    '''Predict the label of each image.

    Args:
        net, a pretrained neural net
        images, a batch of images [batch_size, 3, 32, 32], which have been
            pre-processed
        dev, the training device
        synset_list: the synset of labels
        topk, return the topk labels for each image.
    '''
    while True:
        img_path = eval(input("Enter input image path('quit' to exit): "))
        if img_path == 'quit':
            return
        if not os.path.exists(img_path):
            print('Path is invalid')
            continue
        img = read_image(img_path)
        x = tensor.from_numpy(img.astype(np.float32)[np.newaxis, :])
        x.to_device(dev)
        y = net.predict(x)
        y.to_host()
        prob = tensor.to_numpy(y)
        lbl = np.argsort(-prob[0])  # sort prob in descending order
        print([synset_list[lbl[i]] for i in range(topk)])
Ejemplo n.º 15
0
 def test_sgd(self):
     lr = 0.1
     sgd = opt.SGD(lr)
     sgd.apply(0, self.g, self.W, 'w')
     w = tensor.to_numpy(self.W)
     for i in range(self.W.size()):
         self.assertAlmostEqual(w[i], self.np_W[i] - lr * self.np_g[i])
Ejemplo n.º 16
0
 def test_regularizer(self):
     coefficient = 0.0001
     reg = opt.L2Regularizer(coefficient)
     reg.apply(0, self.W, self.g)
     g = tensor.to_numpy(self.g)
     for i in range(g.size):
         self.assertAlmostEqual(g[i],
                                self.np_g[i] + coefficient * self.np_W[i])
Ejemplo n.º 17
0
 def test_constraint(self):
     threshold = 0.02
     cons = opt.L2Constraint(threshold)
     cons.apply(0, self.W, self.g)
     g = tensor.to_numpy(self.g)
     nrm = np.linalg.norm(self.np_g) / self.np_g.size
     for i in range(g.size):
         self.assertAlmostEqual(g[i], self.np_g[i] * threshold / nrm)
Ejemplo n.º 18
0
 def test_slice(self):
     t = np.zeros((3, 3))
     t[:, :2] = float(2)
     t[:, 2] = float(1)
     lyr = layer.Slice('slice', 1, [2], (3,))
     out = lyr.forward(model_pb2.kTrain, [tensor.from_numpy(t)])
     t1 = tensor.to_numpy(out[0])
     t2 = tensor.to_numpy(out[1])
     self.assertEqual(np.average(t1), 2)
     self.assertEqual(np.average(t2), 1)
     t1 = tensor.Tensor((3, 2))
     t2 = tensor.Tensor((3, 1))
     t1.set_value(1)
     t2.set_value(2)
     grad, _ = lyr.backward(model_pb2.kTrain, [t1, t2])
     gnp = tensor.to_numpy(grad)
     self.assertEqual(np.sum(gnp), 12)
Ejemplo n.º 19
0
def serve(agent, use_cpu, parameter_file, topk=5):
    if use_cpu:
        print('running with cpu')
        dev = device.get_default_device()
        layer.engine = 'singacpp'
    else:
        print("runing with gpu")
        dev = device.create_cuda_gpu()
    agent = agent

    print('Start intialization............')
    net = create_net((3, 224, 224), parameter_file)
    net.to_device(dev)
    print('End intialization............')

    labels = np.loadtxt('synset_words.txt', str, delimiter='\t ')
    while True:
        key, val = agent.pull()
        if key is None:
            time.sleep(0.1)
            continue
        msg_type = MsgType.parse(key)
        if msg_type.is_request():
            try:
                response = ""
                img = imread(val['image'], mode='RGB').astype(np.float32)
                height,width = img.shape[:2]
                img[:, :, 0] -= 123.68
                img[:, :, 1] -= 116.779
                img[:, :, 2] -= 103.939
                img[:,:,[0,1,2]] = img[:,:,[2,1,0]]
                img = img.transpose((2, 0, 1))
                img = img[:, (height-224)//2:(height+224)//2,\
                          (width-224)//2:(width+224)//2]
                images = np.expand_dims(img, axis=0)

                x = tensor.from_numpy(images.astype(np.float32))
                x.to_device(dev)
                y = net.predict(x)
                prob = np.average(tensor.to_numpy(y), 0)
                # sort and reverse
                idx = np.argsort(-prob)[0:topk]
                for i in idx:
                    response += "%s:%s<br/>" % (labels[i], prob[i])
            except:
                traceback.print_exc()
                response = "Sorry, system error during prediction."
            agent.push(MsgType.kResponse, response)
        elif MsgType.kCommandStop.equal(msg_type):
                print('get stop command')
                agent.push(MsgType.kStatus, "success")
                break
        else:
            print('get unsupported message %s' % str(msg_type))
            agent.push(MsgType.kStatus, "Unknown command")
            break
        # while loop
    print("server stop")
Ejemplo n.º 20
0
 def test_concat(self):
     t1 = tensor.Tensor((2, 3))
     t2 = tensor.Tensor((1, 3))
     t1.set_value(1)
     t2.set_value(2)
     lyr = layer.Concat('concat', 0, [t1.shape, t2.shape])
     t = lyr.forward(model_pb2.kTrain, [t1, t2])
     tnp = tensor.to_numpy(t[0])
     self.assertEquals(np.sum(tnp), 12)
Ejemplo n.º 21
0
    def test_sum(self):
        a = np.array([1.1,1.1,1.1,1.1,1.4,1.3,1.1,1.6,1.1,1.1,1.1,1.2])
        a = np.reshape(a,(2,3,2))
        ta = tensor.from_numpy(a)

        a_sum0 = np.sum(a)
        ta_sum0 = tensor.sum(ta)
        Ta_sum0 = tensor.to_numpy(ta_sum0)
        a_sum1 = np.sum(a, axis = 1)
        ta_sum1 = tensor.sum(ta, axis = 1)
        Ta_sum1 = tensor.to_numpy(ta_sum1)
        a_sum2 = np.sum(a, axis = 2)
        ta_sum2 = tensor.sum(ta, axis = 2)
        Ta_sum2 = tensor.to_numpy(ta_sum2)

        self.assertAlmostEqual(np.sum(a_sum0 - Ta_sum0), 0., places=3)
        self.assertAlmostEqual(np.sum(a_sum1 - Ta_sum1), 0., places=3)
        self.assertAlmostEqual(np.sum(a_sum2 - Ta_sum2), 0., places=3)
Ejemplo n.º 22
0
 def forward(self, flag, x):
     '''pad zeros'''
     tmp = tensor.to_numpy(x)
     shape = add_to_tuple(x.shape)
     ret = np.zeros(shape)
     ret[:,:,:-1, :-1] = tmp
     y = tensor.from_numpy(ret)
     y.to_device(x.device)
     return y
Ejemplo n.º 23
0
def serve(net, label_map, dev, agent, topk=5):
    '''Serve to predict image labels.

    It prints the topk food names for each image.

    Args:
        label_map: a list of food names, corresponding to the index in meta_file
    '''

    images = tensor.Tensor((num_augmentation, 3, crop_size, crop_size), dev)
    while True:
        msg, val = agent.pull()
        if msg is None:
            time.sleep(0.1)
            continue
        msg = MsgType.parse(msg)
        if msg.is_request():
            try:
                # process images
                img = imread(val['image'], mode='RGB').astype(np.float32) / 255
                height,width = img.shape[:2]
                img -= mean
                img /= std
                img = img.transpose((2, 0, 1))
                img = img[:,\
                (height-224)//2:(height+224)//2,(width-224)//2:(width+224)//2]
                images.copy_from_numpy(img)
                print("input: ", images.l1())
                # do prediction
                y = net.predict(images)
                prob = np.average(tensor.to_numpy(y), 0)
                idx = np.argsort(-prob)
                # prepare results
                response = ""
                for i in range(topk):
                    response += "%s:%f <br/>" % (label_map[idx[i]],
                                                 prob[idx[i]])
            except:
                traceback.print_exc()
                response = "sorry, system error during prediction."
            agent.push(MsgType.kResponse, response)
        elif msg.is_command():
            if MsgType.kCommandStop.equal(msg):
                print('get stop command')
                agent.push(MsgType.kStatus, "success")
                break
            else:
                print('get unsupported command %s' % str(msg))
                agent.push(MsgType.kStatus, "Unknown command")
        else:
            print('get unsupported message %s' % str(msg))
            agent.push(MsgType.kStatus, "unsupported msg; going to shutdown")
            break
    print("server stop")
Ejemplo n.º 24
0
 def test_mult_inputs(self):
     ffn = net.FeedForwardNet(loss.SoftmaxCrossEntropy())
     s1 = ffn.add(layer.Activation('relu1', input_sample_shape=(2,)), [])
     s2 = ffn.add(layer.Activation('relu2', input_sample_shape=(2,)), [])
     ffn.add(layer.Merge('merge', input_sample_shape=(2,)), [s1, s2])
     x1 = tensor.Tensor((2, 2))
     x1.set_value(1.1)
     x2 = tensor.Tensor((2, 2))
     x2.set_value(0.9)
     out = ffn.forward(False, {'relu1':x1, 'relu2':x2})
     out = tensor.to_numpy(out)
     self.assertAlmostEqual(np.average(out), 2)
Ejemplo n.º 25
0
def predict(net, images, num=10):
    '''predict probability distribution for one net.

    Args:
        net: neural net (vgg or resnet)
        images: a batch of augmented images (type numpy)
        num: num of augmentations
    '''
    prob = net.predict(images)
    prob = tensor.to_numpy(prob)
    prob = prob.reshape(((images.shape[0] // num), num, -1))
    prob = np.average(prob, 1)
    return prob
    def test_MeanSquareError(self):
        X=np.array([4.3,5.4,3.3,3.6,5.7,6.0]).reshape(3,2).astype(np.float32)
        T=np.array([4.4,5.3,3.2,3.7,5.4,6.3]).reshape(3,2).astype(np.float32)
        x=tensor.from_numpy(X)
        t=tensor.from_numpy(T)
        x.to_device(gpu_dev)
        t.to_device(gpu_dev)

        loss= autograd.mse_loss(x,t)
        dx=loss.creator.backward()[0]

        loss_np=tensor.to_numpy(loss)
        self.assertAlmostEqual(loss_np, 0.0366666, places=4)
        self.check_shape(dx.shape(), (3, 2))
Ejemplo n.º 27
0
def onnx_to_singa(epochs, use_cpu=False, batchsize=32):
    (x_train, y_train), (x_test, y_test), dev = common(use_cpu)
    model = sonnx.load("cnn.onnx")
    backend = sonnx.prepare(model, dev)
    autograd.training = True
    sgd = opt.SGD(lr=0.01)
    niter = x_train.shape[0] // batchsize
    for epoch in range(epochs):
        accuracy_rate = 0.0
        loss_rate = 0.0
        for i in range(niter):
            inputs = tensor.Tensor(
                device=dev,
                data=x_train[i * batchsize : (i + 1) * batchsize],
                stores_grad=False,
                name="input",
            )
            targets = tensor.Tensor(
                device=dev,
                data=y_train[i * batchsize : (i + 1) * batchsize],
                requires_grad=False,
                stores_grad=False,
                name="target",
            )
            y = backend.run([inputs])[0]
            loss = autograd.softmax_cross_entropy(y, targets)

            accuracy_rate += accuracy(
                tensor.to_numpy(y), y_train[i * batchsize : (i + 1) * batchsize]
            )
            loss_rate += tensor.to_numpy(loss)[0]

            for p, gp in autograd.backward(loss):
                sgd.update(p, gp)

        print("accuracy is {}, loss is {}".format(accuracy_rate / niter, loss_rate / niter))
Ejemplo n.º 28
0
    def _concat_helper(self, dev):
        np1 = np.random.random([5, 6, 7, 8]).astype(np.float32)
        np2 = np.random.random([5, 6, 7, 1]).astype(np.float32)
        np3 = np.concatenate((np1, np2), axis=3)

        t1 = tensor.Tensor(device=dev, data=np1)
        t2 = tensor.Tensor(device=dev, data=np2)

        ctensors = singa_api.VecTensor()
        ctensors.append(t1.data)
        ctensors.append(t2.data)

        t3_ct = singa_api.ConcatOn(ctensors, 3)

        np.testing.assert_array_almost_equal(
            tensor.to_numpy(_cTensor_to_pyTensor(t3_ct)), np3)
Ejemplo n.º 29
0
    def test_dist_opt_spars_value(self):
        # Test the C++ value based sparsification operation for all reduce

        param.set_value(10)
        grad.set_value(1)

        sgd.sparsification(grad.data,
                           accumulation=None,
                           spars=0.05,
                           topK=False)
        sgd.wait()
        sgd.update(param, grad)

        np.testing.assert_array_almost_equal(tensor.to_numpy(param),
                                             expected,
                                             decimal=5)
Ejemplo n.º 30
0
    def test_transpose_and_mul(self):
        s1 = [3, 2, 1, 1]
        s2 = [3, 2, 1, 1]
        x_0 = np.random.random(s1).astype(np.float32)
        y_0 = np.random.random(s2).astype(np.float32)

        x0 = tensor.Tensor(device=gpu_dev, data=x_0)
        y0 = tensor.Tensor(device=gpu_dev, data=y_0)
        x1 = x0.transpose([3, 2, 1, 0])

        #print(x1.shape)
        #print(y0.shape)

        z0 = x1 * y0
        np.testing.assert_array_almost_equal(tensor.to_numpy(z0),
                                             x_0.transpose() * y_0)
Ejemplo n.º 31
0
    def test_MeanSquareError(self):
        X = np.array([4.3, 5.4, 3.3, 3.6, 5.7,
                      6.0]).reshape(3, 2).astype(np.float32)
        T = np.array([4.4, 5.3, 3.2, 3.7, 5.4,
                      6.3]).reshape(3, 2).astype(np.float32)
        x = tensor.from_numpy(X)
        t = tensor.from_numpy(T)
        x.to_device(gpu_dev)
        t.to_device(gpu_dev)

        loss = autograd.mse_loss(x, t)
        dx = loss.creator.backward()[0]

        loss_np = tensor.to_numpy(loss)
        self.assertAlmostEqual(loss_np, 0.0366666, places=4)
        self.check_shape(dx.shape(), (3, 2))
Ejemplo n.º 32
0
    def test_optimizer(self, dev):
        o1 = opt.Optimizer(0.1)

        # test step
        o1.step()
        o1.step()

        # test get states
        s1 = o1.get_states()
        self.assertAlmostEqual(s1['step_counter'], 2)

        # test set states
        s2 = {'step_counter': 5}
        o1.set_states(s2)
        np.testing.assert_array_almost_equal(tensor.to_numpy(o1.step_counter),
                                             [5])
Ejemplo n.º 33
0
        def _test(s1, s2, axis1, axis2, s3):
            x_0 = np.random.random(s1).astype(np.float32)
            y_0 = np.random.random(s2).astype(np.float32)

            x0 = tensor.Tensor(device=gpu_dev, data=x_0)
            y0 = tensor.Tensor(device=gpu_dev, data=y_0)

            x1 = x0.transpose(axis1)
            y1 = y0.transpose(axis2)
            #print(x1.shape)
            #print(y1.shape)

            z0 = x1 * y1
            np.testing.assert_array_almost_equal(
                tensor.to_numpy(z0),
                x_0.transpose(axis1) * y_0.transpose(axis2))
            np.testing.assert_array_almost_equal(z0.shape, s3)
Ejemplo n.º 34
0
        def _run_test(org_shape, axis, aft_shape):
            x_0 = np.random.random(org_shape).astype(np.float32)
            x_0 = x_0 + 1000
            x0 = tensor.Tensor(device=dev, data=x_0)

            # test with axis
            y0 = tensor._call_singa_func(singa_api.SoftMax, x0.data, axis)

            # test with numpy
            x_0 = x_0.reshape(aft_shape)
            x_0 = x_0 - np.max(x_0)
            y1 = np.divide(np.exp(x_0),
                           np.sum(np.exp(x_0), axis=1).reshape(x_0.shape[0],
                                                               1))  # 2d softmax
            y1 = y1.reshape(org_shape)

            np.testing.assert_array_almost_equal(tensor.to_numpy(y0), y1)
Ejemplo n.º 35
0
 def _kint_kint_bc(self, dev=gpu_dev):
     a_np = np.array([[[17, 4, 9, 22, 18], [-9, 9, -1, -1, 4],
                       [1, 14, 7, 1, 4], [3, 14, -2, 3, -8]],
                      [[-25, 6, 8, -7, 22], [-14, 0, -1, 15, 14],
                       [1, 3, -8, -19, -3], [1, 12, 12, -3, -3]],
                      [[-10, -14, -17, 19, -5], [-4, -12, 7, -16, -2],
                       [-8, 3, -5, -11, 0], [4, 0, 3, -6, -3]]],
                     dtype=np.int32)
     b_np = np.array([[-6, -3, -8, -17, 1], [-4, -16, 4, -9, 0],
                      [7, 1, 11, -12, 4], [-6, -8, -5, -3, 0]],
                     dtype=np.int32)
     ta = tensor.from_numpy(a_np)
     tb = tensor.from_numpy(b_np)
     ta.to_device(dev)
     tb.to_device(dev)
     y = ta - tb
     np.testing.assert_array_almost_equal(tensor.to_numpy(y), a_np - b_np)
Ejemplo n.º 36
0
def singa_to_onnx(niter, use_cpu=False):
    if use_cpu:
        print("Using CPU")
        dev = device.get_default_device()
    else:
        print("Using GPU")
        dev = device.create_cuda_gpu()
    inputs = Tensor(
        data=data,
        device=dev,
        requires_grad=False,
        stores_grad=False,
        name="input",
    )
    target = Tensor(
        data=label,
        device=dev,
        requires_grad=False,
        stores_grad=False,
        name="target",
    )

    w0 = Tensor(shape=(2, 3), device=dev, requires_grad=True, stores_grad=True)
    w0.gaussian(0.0, 0.1)
    b0 = Tensor(shape=(3,), device=dev, requires_grad=True, stores_grad=True)
    b0.set_value(0.0)

    w1 = Tensor(shape=(3, 2), device=dev, requires_grad=True, stores_grad=True)
    w1.gaussian(0.0, 0.1)
    b1 = Tensor(shape=(2,), device=dev, requires_grad=True, stores_grad=True)
    b1.set_value(0.0)

    sgd = opt.SGD(0.1)
    # training process
    for i in range(100):
        x = autograd.matmul(inputs, w0)
        x = autograd.add_bias(x, b0)
        x = autograd.relu(x)
        x = autograd.matmul(x, w1)
        x = autograd.add_bias(x, b1)
        loss = autograd.softmax_cross_entropy(x, target)
        for p, gp in autograd.backward(loss):
            sgd.update(p, gp)

        print("training loss = ", tensor.to_numpy(loss)[0])
    sonnx.export([inputs], [x], file_path="mlp.onnx")
Ejemplo n.º 37
0
 def test_tensor_copy(self):
     t = tensor.Tensor((2, 3))
     t += 1.23
     self.assertAlmostEqual(tensor.to_numpy(t)[0, 0], 1.23)
     tc = t.copy()
     tdc = t.deepcopy()
     self.assertAlmostEqual(tensor.to_numpy(tc)[0, 0], 1.23)
     self.assertAlmostEqual(tensor.to_numpy(tdc)[0, 0], 1.23)
     t += 1.23
     self.assertAlmostEqual(tensor.to_numpy(t)[0, 0], 2.46)
     self.assertAlmostEqual(tensor.to_numpy(tc)[0, 0], 2.46)
     self.assertAlmostEqual(tensor.to_numpy(tdc)[0, 0], 1.23)
Ejemplo n.º 38
0
 def test_tensor_copy(self):
     t = tensor.Tensor((2, 3))
     t += 1.23
     self.assertAlmostEqual(tensor.to_numpy(t)[0, 0], 1.23)
     tc = t.copy()
     tdc = t.deepcopy()
     self.assertAlmostEqual(tensor.to_numpy(tc)[0, 0], 1.23)
     self.assertAlmostEqual(tensor.to_numpy(tdc)[0, 0], 1.23)
     t += 1.23
     self.assertAlmostEqual(tensor.to_numpy(t)[0, 0], 2.46)
     self.assertAlmostEqual(tensor.to_numpy(tc)[0, 0], 2.46)
     self.assertAlmostEqual(tensor.to_numpy(tdc)[0, 0], 1.23)
Ejemplo n.º 39
0
 def test_comparison_operators(self):
     t = self.t
     t += 3.45
     a = t < 3.45
     self.assertEqual(tensor.to_numpy(a)[0, 0], 0)
     a = t <= 3.45
     self.assertEqual(tensor.to_numpy(a)[0, 0], 1)
     a = t > 3.45
     self.assertEqual(tensor.to_numpy(a)[0, 0], 0)
     a = t >= 3.45
     self.assertEqual(tensor.to_numpy(a)[0, 0], 1)
     a = tensor.lt(t, 3.45)
     self.assertEqual(tensor.to_numpy(a)[0, 0], 0)
     a = tensor.le(t, 3.45)
     self.assertEqual(tensor.to_numpy(a)[0, 0], 1)
     a = tensor.gt(t, 3.45)
     self.assertEqual(tensor.to_numpy(a)[0, 0], 0)
     a = tensor.ge(t, 3.45)
     self.assertEqual(tensor.to_numpy(a)[0, 0], 1)
Ejemplo n.º 40
0
 def test_comparison_operators(self):
     t = self.t
     t += 3.45
     a = t < 3.45
     self.assertEqual(tensor.to_numpy(a)[0, 0], 0)
     a = t <= 3.45
     self.assertEqual(tensor.to_numpy(a)[0, 0], 1)
     a = t > 3.45
     self.assertEqual(tensor.to_numpy(a)[0, 0], 0)
     a = t >= 3.45
     self.assertEqual(tensor.to_numpy(a)[0, 0], 1)
     a = tensor.lt(t, 3.45)
     self.assertEqual(tensor.to_numpy(a)[0, 0], 0)
     a = tensor.le(t, 3.45)
     self.assertEqual(tensor.to_numpy(a)[0, 0], 1)
     a = tensor.gt(t, 3.45)
     self.assertEqual(tensor.to_numpy(a)[0, 0], 0)
     a = tensor.ge(t, 3.45)
     self.assertEqual(tensor.to_numpy(a)[0, 0], 1)
Ejemplo n.º 41
0
    def matmul_high_dim_helper(self, dev):
        configs = [
            [(1, 12, 7, 64), (1, 12, 64, 7)],
            [(1, 7, 768), (768, 768)],
        ]
        print()
        for config in configs:
            X = np.random.random(config[0]).astype(np.float32)
            x = tensor.from_numpy(X)
            x.to_device(dev)

            W = np.random.random(config[1]).astype(np.float32)
            w = tensor.from_numpy(W)
            w.to_device(dev)

            y_t = np.matmul(X, W)
            y = autograd.matmul(x, w)
            np.testing.assert_array_almost_equal(tensor.to_numpy(y), y_t, 3)
Ejemplo n.º 42
0
    def combine_node(model,modeldic):
        '''
        # for combine operators to layers
        '''

        for idx, i in enumerate(model.graph.node):
            if (i.op_type == 'MatMul'):
                addlist = Backend.find_add(model,i.output[0])
                if (len(addlist) == 0): continue
                if (len(addlist) > 1): continue
                addidx = addlist[0]
                if (i.name == "not_requires_grad" and model.graph.node[addidx].name == "not_requires_grad"): continue
                model.graph.node[idx].output[0] = model.graph.node[addidx].output[0]
                model.graph.node[idx].input.append(model.graph.node[addidx].input[1])
                model.graph.node[idx].op_type = 'Linear'
                model.graph.node[addidx].op_type = 'removed'

        layer = {}
        for i in model.graph.node:
            if (i.op_type == 'Linear'):
                shape = Backend.find_shape(model,i.input[1])
                layer[str(i.output[0])] = autograd.Linear(shape[0], shape[1])
                layer[str(i.output[0])].set_params(W=tensor.to_numpy(modeldic[str(i.input[1])]))
                layer[str(i.output[0])].set_params(b=tensor.to_numpy(modeldic[str(i.input[2])]))


        for i in model.graph.node:
            if (i.op_type == 'Conv'):
                shape = Backend.find_shape(model,i.input[1])
                layer[str(i.output[0])] = autograd.Conv2d(shape[1], shape[0], shape[2],
                                                          padding=int(i.attribute[0].ints[0]))
                layer[str(i.output[0])].set_params(W=tensor.to_numpy(modeldic[str(i.input[1])].clone()))
                layer[str(i.output[0])].set_params(b=tensor.to_numpy(modeldic[str(i.input[2])].clone()))

        for i in model.graph.node:
            if (i.op_type == 'MaxPool'):
                k = (int(i.attribute[0].ints[0]), int(i.attribute[0].ints[0]))
                layer[str(i.output[0])] = autograd.MaxPool2d(k, int(i.attribute[2].ints[0]),
                                                             padding=int(i.attribute[1].ints[0]))
        for i in model.graph.node:
            if (i.op_type == 'AveragePool'):
                k = (int(i.attribute[0].ints[0]), int(i.attribute[0].ints[0]))
                layer[str(i.output[0])] = autograd.AvgPool2d(k, int(i.attribute[2].ints[0]),
                                                             padding=int(i.attribute[1].ints[0]))
        for i in model.graph.node:
            if (i.op_type == 'BatchNormalization'):
                shape = Backend.find_shape(model,i.input[1])
                layer[str(i.output[0])] = autograd.BatchNorm2d(shape[0])
                layer[str(i.output[0])].set_params(scale=tensor.to_numpy(modeldic[str(i.input[1])].clone()))
                layer[str(i.output[0])].set_params(bias=tensor.to_numpy(modeldic[str(i.input[2])].clone()))

        return model,modeldic,layer
Ejemplo n.º 43
0
        def _test(s1, s2, axis1, axis2, s3, s_op, n_op, dev):
            x_0 = np.random.random(s1).astype(np.float32)
            y_0 = np.random.random(s2).astype(np.float32)

            x0 = tensor.Tensor(device=dev, data=x_0)
            y0 = tensor.Tensor(device=dev, data=y_0)

            x1 = x0.transpose(axis1)
            y1 = y0.transpose(axis2)

            z0 = tensor._call_singa_func(s_op, x1.data, y1.data)
            z0.to_host()

            np.testing.assert_array_almost_equal(
                tensor.to_numpy(z0),
                n_op(x_0.transpose(axis1), y_0.transpose(axis2)))
            np.testing.assert_array_almost_equal(z0.shape, s3)
            return
Ejemplo n.º 44
0
def predict(net, images, dev, topk=5):
    '''Predict the label of each image.

    Args:
        net, a pretrained neural net
        images, a batch of images [batch_size, 3, 32, 32], which have been
            pre-processed
        dev, the training device
        topk, return the topk labels for each image.
    '''
    x = tensor.from_numpy(images.astype(np.float32))
    x.to_device(dev)
    y = net.predict(x)
    y.to_host()
    prob = tensor.to_numpy(y)
    # prob = np.average(prob, 0)
    labels = np.flipud(np.argsort(prob))  # sort prob in descending order
    return labels[:, 0:topk]
Ejemplo n.º 45
0
    def predict(self, queries: json):
        print("Get queries")

        bs = self.batch_size

        res = []
        input_ids, input_mask, segment_ids, extra_data, eval_examples = self._preprocess(
            queries)
        n = len(input_ids) // bs
        all_results = []

        tmp_dict = {}
        for idx in range(0, n):
            inputs = [
                np.array([eval_examples[idx].qas_id for idx in range(
                    idx, idx+bs)], dtype=np.int32),
                segment_ids[idx:idx + bs].astype(np.int32),
                input_mask[idx:idx + bs].astype(np.int32),
                input_ids[idx:idx + bs].astype(np.int32),
            ]

            x_batch = []
            for inp in inputs:
                tmp_tensor = tensor.from_numpy(inp)
                tmp_tensor.to_device(self.dev)
                x_batch.append(tmp_tensor)

            outputs = self._model.forward(*x_batch)

            result = []
            for outp in outputs:
                result.append(tensor.to_numpy(outp))

            in_batch = result[1].shape[0]
            start_logits = [float(x) for x in result[1][0].flat]
            end_logits = [float(x) for x in result[0][0].flat]
            for i in range(0, in_batch):
                unique_id = len(all_results)
                all_results.append(
                    RawResult(unique_id=unique_id,
                              start_logits=start_logits,
                              end_logits=end_logits))

        return self._postprocess(eval_examples, extra_data, all_results)
Ejemplo n.º 46
0
    def test_batchnorm_backward_dnnl(self):
        dev = cpu_dev
        N = 1
        C = 3
        H = 2
        W = 2

        data_shape = [N, C, H, W]
        param_shape = [1, C, 1, 1]
        data = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]

        x_0 = np.array(data, dtype=np.float32).reshape(data_shape)
        y_0 = np.array(data, dtype=np.float32).reshape(data_shape)
        dy_0 = np.array(data, dtype=np.float32).reshape(data_shape)
        scale_0 = np.array([1] * C, dtype=np.float32).reshape(param_shape)
        bias_0 = np.array([0] * C, dtype=np.float32).reshape(param_shape)

        mean_0 = x_0.mean(axis=(0, 2, 3), keepdims=True)
        var_0 = x_0.var(axis=(0, 2, 3), keepdims=True)

        hndl = singa_api.BatchNormHandle(
            0.1,
            tensor.Tensor(device=dev, data=x_0).data)
        (dx_2_c, _, _) = singa_api.CpuBatchNormBackwardx(
            hndl,
            tensor.Tensor(device=dev, data=y_0).data,
            tensor.Tensor(device=dev, data=dy_0).data,
            tensor.Tensor(device=dev, data=x_0).data,
            tensor.Tensor(device=dev, data=scale_0).data,
            tensor.Tensor(device=dev, data=bias_0).data,
            tensor.Tensor(device=dev, data=mean_0).data,
            tensor.Tensor(device=dev, data=var_0).data,
        )

        dx_truth = np.array([[[[-1.0769e-05, -3.5985e-06],
                               [3.5985e-06, 1.0769e-05]],
                              [[-1.0769e-05, -3.5985e-06],
                               [3.5985e-06, 1.0769e-05]],
                              [[-1.0769e-05, -3.5985e-06],
                               [3.5985e-06, 1.0769e-05]]]])
        np.testing.assert_array_almost_equal(
            tensor.to_numpy(_cTensor_to_pyTensor(dx_2_c)), dx_truth)

        return
Ejemplo n.º 47
0
	def save_image(self, epoch):
		rows = 5
		cols = 5
		channels = self.channels
		noise = tensor.Tensor((rows*cols*channels, self.noise_size))
		noise.uniform(-1, 1)
		noise.to_device(self.dev)
		gen_imgs = self.gen_net.forward(flag=False, x=noise)
		gen_imgs = tensor.to_numpy(gen_imgs)
		show_imgs = np.reshape(gen_imgs, (gen_imgs.shape[0], self.rows, self.cols, self.channels))
		fig, axs = plt.subplots(rows, cols)
		cnt = 0
		for r in range(rows):
			for c in range(cols):
				axs[r,c].imshow(show_imgs[cnt, :, :, 0], cmap='gray')
				axs[r,c].axis('off')
				cnt += 1
		fig.savefig("{}{}.png".format(self.file_dir, epoch))
		plt.close()
Ejemplo n.º 48
0
    def test_gpu_6d_transpose(self, dev=gpu_dev):
        s0 = (2, 3, 4, 5, 6, 7)
        axes1 = [5, 4, 3, 2, 1, 0]
        s1 = (2, 7, 6, 5, 4, 3)
        s2 = (2, 4, 3, 5, 7, 6)
        a = np.random.random(s1)

        ta = tensor.from_numpy(a)
        ta.to_device(dev)

        ta = tensor.reshape(ta, s1)
        ta = tensor.transpose(ta, axes1)
        ta = tensor.reshape(ta, s2)

        a = np.reshape(a, s1)
        a = np.transpose(a, axes1)
        a = np.reshape(a, s2)

        np.testing.assert_array_almost_equal(tensor.to_numpy(ta), a)
Ejemplo n.º 49
0
        def _run_testing(x_0, s_0, b_0, rm_0, rv_0, m_0=0.1):
            # np api
            y_1 = _np_bn_testing(x_0, s_0, b_0, rm_0, rv_0, momentum=m_0)

            # singa api
            hndl = singa_api.CudnnBatchNormHandle(m_0,
                                                  _np_to_pyTensor(x_0).data)
            y_2_c = singa_api.GpuBatchNormForwardInference(
                hndl,
                _np_to_pyTensor(x_0).data,
                _np_to_pyTensor(s_0).data,
                _np_to_pyTensor(b_0).data,
                _np_to_pyTensor(rm_0).data,
                _np_to_pyTensor(rv_0).data)
            #print(y_1)
            #print(tensor.to_numpy(_cTensor_to_pyTensor(y_2_c)))

            np.testing.assert_array_almost_equal(
                y_1, tensor.to_numpy(_cTensor_to_pyTensor(y_2_c)), decimal=5)
            return
Ejemplo n.º 50
0
 def save_image(self, iteration):
     demo_row = 5
     demo_col = 5
     if not hasattr(self, "demo_noise"):
         self.demo_noise = tensor.Tensor(
             (demo_col * demo_row, self.noise_size), dev, tensor.float32)
     self.demo_noise.uniform(-1, 1)
     gen_imgs = self.model.forward_gen(self.demo_noise)
     gen_imgs = tensor.to_numpy(gen_imgs)
     show_imgs = np.reshape(
         gen_imgs, (gen_imgs.shape[0], self.rows, self.cols, self.channels))
     fig, axs = plt.subplots(demo_row, demo_col)
     cnt = 0
     for r in range(demo_row):
         for c in range(demo_col):
             axs[r, c].imshow(show_imgs[cnt, :, :, 0], cmap='gray')
             axs[r, c].axis('off')
             cnt += 1
     fig.savefig("{}{}.png".format(self.file_dir, iteration))
     plt.close()
Ejemplo n.º 51
0
def predict(img, model, index2label, args=None):

    autograd.training = False
    img_array = image2array(img)

    inputs = tensor.Tensor(device=dev,
                           data=img_array,
                           requires_grad=False,
                           stores_grad=False)

    x = model(inputs)
    y = autograd.soft_max(x)

    y_np = tensor.to_numpy(y)[0]

    prediction = {}
    for idx in index2label:
        prediction[index2label[idx]] = float(y_np[idx])

    return prediction
Ejemplo n.º 52
0
    def step(self, indices, weights, grads, states):
        """Performs w += rescale_grad * grad."""
        if type(indices).__name__ == 'int':
            indices = [indices]
            weights = [weights]
            grads = [grads]

        for index, weight, grad in zip(indices, weights, grads):
            p = tensor.Tensor(
                shape=weight.shape,
                #device=weight.context,
                #dtype=weight.dtype,
                data=weight.asnumpy())
            g = tensor.Tensor(
                shape=grad.shape,
                #device=grad.context,
                #dtype=grad.dtype,
                data=grad.asnumpy())
            self.sgd.update(p, g)
            weight[:] = tensor.to_numpy(p)
Ejemplo n.º 53
0
def backward_and_update(kv, loss):
    global is_kvInitial
    model_pairs = []
    key_list = []
    p_list = []
    if is_kvInitial != True:
        #Initial kv store for workers of ps-architecture
        key = 0
        for p, g in autograd.backward(loss):
            mxnd_p = mx.nd.from_numpy(tensor.to_numpy(p), zero_copy=True)
            kv.init(key, mxnd_p)
            model_pairs.append((key, p, g))
            key += 1
        is_kvInitial = True
    else:
        #push
        key = 0
        #the following push and pull will optimized
        #according to the performance
        for p, g in autograd.backward(loss):
            #create NDarray from p
            #the created NDarray is used to receive pulled parameters with zero copy
            np_p = tensor2numpy_nocopy(p)
            mxnd_p = mx.nd.from_numpy_nocopy(np_p,
                                             device_id=p.device.id(),
                                             zero_copy=True)
            #copy g to CPU and create NDarray from CPU
            #this can avoid creating memory on GPU0
            g.to_host()
            mxnd_g = mx.nd.from_numpy(tensor2numpy_nocopy(g), zero_copy=True)
            kv.push(key, mxnd_g)
            key_list.append(key)
            p_list.append(mxnd_p)
            model_pairs.append((key, p, g))
            key += 1
        #pull
        kv.pull(key_list, out=p_list)
        mx.nd.waitall()
        del model_pairs
        del key_list
        del p_list
Ejemplo n.º 54
0
def train():
    """Start the training procedure 
    """
    num_epochs = 1
    learning_rate = 0.05
    batch_size = 8

    data_loader = DataLoader(os.path.join("data", "fetal_health.csv"))
    data_loader.standardize_column("baseline value")
    x_train, y_train = data_loader.load_data(subset="train")
    x_valid, y_valid = data_loader.load_data(subset="valid")

    num_classes = len(np.unique(y_train))
    num_samples, num_features = x_train.shape

    assert x_train.shape[1] == x_valid.shape[
        1], "Number of features should be equal!"
    assert x_train.shape[0] == y_train.shape[
        0], "Number of training samples should be equal!"
    assert x_valid.shape[0] == y_valid.shape[
        0], "Number of validation samples should be equal!"

    dev = get_default_device()
    tx = tensor.Tensor((num_samples, num_features), dev, tensor.float32)
    ty = tensor.Tensor((num_samples, ), dev, tensor.int32)

    sgd = opt.SGD(learning_rate)
    model = create_MLP_model(perceptron_size=10, num_classes=num_classes)
    model.set_optimizer(sgd)
    model.compile([tx], is_train=True, use_graph=True, sequential=False)
    model.train()

    for i in range(num_epochs):
        tx.copy_from_numpy(x_train.astype(np.float32))
        ty.copy_from_numpy(y_train.astype(np.int32))
        out, loss = model(tx, ty, 'fp32', spars=None)

        # TODO: Add metric evaluation on validation data
        if i % 10 == 0:
            print("training loss = {:.3f}".format(tensor.to_numpy(loss)[0]))
Ejemplo n.º 55
0
    def test_numerical_gradients_check_for_vallina_rnn(self):
        inputs, target, h0 = prepare_inputs_targets_for_rnn_test()

        rnn = autograd.RNN(3, 2)

        def valinna_rnn_forward():
            hs, _ = rnn(inputs, h0)

            loss = autograd.softmax_cross_entropy(hs[0], target[0])
            for i in range(1, len(hs)):
                l = autograd.softmax_cross_entropy(hs[i], target[i])
                loss = autograd.add(loss, l)
            #grads = autograd.gradients(loss)
            return loss

        loss1 = valinna_rnn_forward()
        auto_grads = autograd.gradients(loss1)

        for param in rnn.params:
            auto_grad = tensor.to_numpy(auto_grads[param])

            self.gradients_check(valinna_rnn_forward, param, auto_grad)
Ejemplo n.º 56
0
def create_net(shape, weight_path='bvlc_googlenet.pickle'):
    net = ffnet.FeedForwardNet()
    net.add(Conv2D('conv1/7x7_s2', 64, 7, 2, pad=3, input_sample_shape=shape))
    c1 = net.add(Activation('conv1/relu_7x7'))
    pool1 = pool(net, c1, 'pool1/3x3_s2', 3, 2)
    norm1 = net.add(LRN('pool1/norm1', 5, 0.0001, 0.75))
    c3x3r = conv(net, norm1, 'conv2', 64, 1, suffix='3x3_reduce')
    c3x3 = conv(net, c3x3r, 'conv2', 192, 3, pad=1, suffix='3x3')
    norm2 = net.add(LRN('conv2/norm2', 5, 0.0001, 0.75))
    pool2 = pool(net, norm2, 'pool2/3x3_s2', 3, 2)

    i3a = inception(net, pool2, 'inception_3a', 64, 96, 128, 16, 32, 32)
    i3b = inception(net, i3a, 'inception_3b', 128, 128, 192, 32, 96, 64)
    pool3 = pool(net, i3b, 'pool3/3x3_s2', 3, 2)
    i4a = inception(net, pool3, 'inception_4a', 192, 96, 208, 16, 48, 64)
    i4b = inception(net, i4a, 'inception_4b', 160, 112, 224, 24, 64, 64)
    i4c = inception(net, i4b, 'inception_4c', 128, 128, 256, 24, 64, 64)
    i4d = inception(net, i4c, 'inception_4d', 112, 144, 288, 32, 64, 64)
    i4e = inception(net, i4d, 'inception_4e', 256, 160, 320, 32, 128, 128)
    pool4 = pool(net, i4e, 'pool4/3x3_s2', 3, 2)
    i5a = inception(net, pool4, 'inception_5a', 256, 160, 320, 32, 128, 128)
    i5b = inception(net, i5a, 'inception_5b', 384, 192, 384, 48, 128, 128)
    pool5 = net.add(AvgPooling2D('pool5/7x7_s1', 7, 1, pad=0))
    drop5 = net.add(Dropout('drop', 0.4))
    flat = net.add(Flatten('flat'))
    dense = net.add(Dense('loss3/classifier', 1000))
    # prob=net.add(Softmax('softmax'))

    net.load(weight_path, use_pickle=True)
    print('total num of params %d' % (len(net.param_names())))
    # SINGA and Caffe have different layout for the weight matrix of the dense
    # layer
    for key, val in zip(net.param_names(), net.param_values()):
        # print key
        if key == 'loss3/classifier_weight' or key == 'loss3/classifier/weight':
            tmp = tensor.to_numpy(val)
            tmp = tmp.reshape(tmp.shape[::-1])
            val.copy_from_numpy(np.transpose(tmp))
    return net
Ejemplo n.º 57
0
    def predict(self, queries: List[str]):
        print("Get queries")

        res = []
        queries = [self._preprocess(ele) for ele in queries]

        for input_ids in queries:
            x = tensor.Tensor(device=self.dev, data=input_ids)
            out = []
            for i in range(self.length):
                y = self._model.forward(x)
                y = autograd.reshape(y, y.shape[-2:])[-1, :]
                y = tensor.softmax(y)
                y = tensor.to_numpy(y)[0]
                y = np.argsort(y)[-1]
                out.append(y)
                y = np.array([y]).reshape([1, 1, -1]).astype(np.float32)
                y = tensor.Tensor(device=self.dev, data=y)
                x = tensor.concatenate([x, y], 2)
            result = self._postprocess(out)
            res.append(result)
        return res
Ejemplo n.º 58
0
    def test_numerical_gradients_check_for_lstm(self):
        inputs, target, h0 = prepare_inputs_targets_for_rnn_test()
        c_0 = np.zeros((2, 2)).astype(np.float32)
        c0 = tensor.Tensor(device=gpu_dev, data=c_0)

        rnn = autograd.LSTM(3, 2)

        def lstm_forward():
            hs, _, _ = rnn(inputs, (h0, c0))

            loss = autograd.softmax_cross_entropy(hs[0], target[0])
            for i in range(1, len(hs)):
                l = autograd.softmax_cross_entropy(hs[i], target[i])
                loss = autograd.add(loss, l)
            return loss

        loss1 = lstm_forward()
        auto_grads = autograd.gradients(loss1)

        for param in rnn.params:
            auto_grad = tensor.to_numpy(auto_grads[param])

            self.gradients_check(lstm_forward, param, auto_grad)
Ejemplo n.º 59
0
    def _train_one_batch_helper(self, dev, is_train, use_graph, sequential):
        self.generate_data(dev)
        model = MLP(num_classes=2)
        model.set_optimizer(self.sgd)
        model.compile([self.inputs],
                      is_train=is_train,
                      use_graph=use_graph,
                      sequential=sequential)

        self.get_params(model)

        out, loss = model(self.inputs, self.target)
        np_out, np_loss = self.numpy_train_one_batch(self.data, self.label)

        np.testing.assert_array_almost_equal(tensor.to_numpy(out), np_out)
        np.testing.assert_array_almost_equal(tensor.to_numpy(loss), np_loss)
        np.testing.assert_array_almost_equal(tensor.to_numpy(self.w0), self.W0)
        np.testing.assert_array_almost_equal(tensor.to_numpy(self.b0), self.B0)
        np.testing.assert_array_almost_equal(tensor.to_numpy(self.w1), self.W1)
        np.testing.assert_array_almost_equal(tensor.to_numpy(self.b1), self.B1)