Example #1
0
def test_download_embed():
    @TokenEmbedding.register
    class Test(TokenEmbedding):
        pretrained_file_name_sha1 = \
            {'embedding_test.vec': '29b9a6511cf4b5aae293c44a9ec1365b74f2a2f8'} # 33 bytes
        namespace = 'test'

        def __init__(self,
                     embedding_root='embeddings',
                     init_unknown_vec=nd.zeros,
                     **kwargs):
            pretrained_file_name = 'embedding_test.vec'
            Test._check_pretrained_file_names(pretrained_file_name)

            super(Test, self).__init__(**kwargs)

            pretrained_file_path = Test._get_pretrained_file(
                embedding_root, pretrained_file_name)

            self._load_embedding(pretrained_file_path, ' ', init_unknown_vec)

    test_embed = TokenEmbedding.create('test')
    assert test_embed.token_to_idx['hello'] == 1
    assert test_embed.token_to_idx['world'] == 2
    assert_almost_equal(test_embed.idx_to_vec[1].asnumpy(),
                        (nd.arange(5) + 1).asnumpy())
    assert_almost_equal(test_embed.idx_to_vec[2].asnumpy(),
                        (nd.arange(5) + 6).asnumpy())
    assert_almost_equal(test_embed.idx_to_vec[0].asnumpy(),
                        nd.zeros((5, )).asnumpy())
Example #2
0
def test_download_embed():
    @nlp.embedding.register
    class Test(nlp.embedding.TokenEmbedding):
        # 33 bytes.
        source_file_hash = \
                {'embedding_test': ('embedding_test.vec',
                                    '29b9a6511cf4b5aae293c44a9ec1365b74f2a2f8')}
        namespace = 'test'

        def __init__(self,
                     embedding_root='embedding',
                     init_unknown_vec=nd.zeros,
                     **kwargs):
            source = 'embedding_test'
            Test._check_source(self.source_file_hash, source)

            super(Test, self).__init__(**kwargs)

            file_path = Test._get_file_path(self.source_file_hash,
                                            embedding_root, source)

            self._load_embedding(file_path, ' ')

    test_embed = nlp.embedding.create('test',
                                      embedding_root='tests/data/embedding')
    assert_almost_equal(test_embed['hello'].asnumpy(),
                        (nd.arange(5) + 1).asnumpy())
    assert_almost_equal(test_embed['world'].asnumpy(),
                        (nd.arange(5) + 6).asnumpy())
    assert_almost_equal(test_embed['<unk>'].asnumpy(),
                        nd.zeros((5, )).asnumpy())
Example #3
0
 def _test_crop_resize_with_diff_type(dtype):
     # test normal case
     data_in = nd.arange(60).reshape((5, 4, 3)).astype(dtype)
     out_nd = transforms.CropResize(0, 0, 3, 2)(data_in)
     out_np = out_nd.asnumpy()
     assert(out_np.sum() == 180)
     assert((out_np[0:2,1,1].flatten() == [4, 16]).all())
     # test 4D input
     data_bath_in = nd.arange(180).reshape((2, 6, 5, 3)).astype(dtype)
     out_batch_nd = transforms.CropResize(1, 2, 3, 4)(data_bath_in)
     out_batch_np = out_batch_nd.asnumpy()
     assert(out_batch_np.sum() == 7524)
     assert((out_batch_np[0:2,0:4,1,1].flatten() == [37,  52,  67,  82, 127, 142, 157, 172]).all())
     # test normal case with resize
     data_in = nd.random.uniform(0, 255, (300, 200, 3)).astype(dtype)
     out_nd = transforms.CropResize(0, 0, 100, 50, (25, 25), 2)(data_in)
     data_expected = image.imresize(nd.slice(data_in, (0, 0, 0), (50, 100 , 3)), 25, 25, 2)
     assert_almost_equal(out_nd.asnumpy(), data_expected.asnumpy())
     # test 4D input with resize
     data_bath_in = nd.random.uniform(0, 255, (3, 300, 200, 3)).astype(dtype)
     out_batch_nd = transforms.CropResize(0, 0, 100, 50, (25, 25), 2)(data_bath_in)
     for i in range(len(out_batch_nd)):
         assert_almost_equal(image.imresize(nd.slice(data_bath_in[i], (0, 0, 0), (50, 100, 3)), 25, 25, 2).asnumpy(),
             out_batch_nd[i].asnumpy())
     # test with resize height and width should be greater than 0
     transformer = transforms.CropResize(0, 0, 100, 50, (-25, 25), 2)
     assertRaises(MXNetError, transformer, data_in)
     # test height and width should be greater than 0 
     transformer = transforms.CropResize(0, 0, -100, -50)
     assertRaises(MXNetError, transformer, data_in)
     # test cropped area is bigger than input data
     transformer = transforms.CropResize(150, 200, 200, 500)
     assertRaises(MXNetError, transformer, data_in)
     assertRaises(MXNetError, transformer, data_bath_in)
Example #4
0
def convert_xy(XY):
    B,H,W,A,N = XY.shape
    dy = nd.tile( nd.arange(0,H,repeat=(W*A), ctx = XY.context).reshape((1,H,W,A,1)), (B,1,1,1,1) )
    dx = nd.tile( nd.arange(0,W,repeat=(A),ctx = XY.context).reshape((1,1,W,A,1)), (B,H,1,1,1) )
    x,y = XY.split(num_outputs=2,axis=-1)
    x = (x + dx) / W
    y = (y + dy) / H
    return x,y
Example #5
0
def convert_xy(XY):
    B, H, W, A, N = XY.shape
    dy = nd.tile(
        nd.arange(0, H, repeat=(W * A), ctx=XY.context).reshape(
            (1, H, W, A, 1)), (B, 1, 1, 1, 1))
    dx = nd.tile(
        nd.arange(0, W, repeat=(A), ctx=XY.context).reshape((1, 1, W, A, 1)),
        (B, H, 1, 1, 1))
    x, y = XY.split(num_outputs=2, axis=-1)
    x = (x + dx) / W
    y = (y + dy) / H
    return x, y
Example #6
0
def sparse_matrix(data, index, shape, force_format=False):
    fmt = index[0]
    if fmt == 'coo':
        if force_format:
            raise TypeError('MXNet backend only supports CSR format,'
                            ' but COO format is forced.')
        coord = index[1]
        # generate convert idx
        # FIXME: cannot use int64
        tmp_data = nd.arange(len(coord[0]),
                             dtype=data.dtype,
                             ctx=coord[0].context)
        tmp_spmat = nd.sparse.csr_matrix((tmp_data, (coord[0], coord[1])),
                                         tuple(shape),
                                         ctx=data.context)
        convert_idx = nd.cast(tmp_spmat.data, dtype='int64')
        # shuffle the data
        data = data[convert_idx]
        spmat = nd.sparse.csr_matrix(
            (data, tmp_spmat.indices, tmp_spmat.indptr),
            tuple(shape),
            ctx=data.context)
        return spmat, convert_idx
    elif fmt == 'csr':
        indices = index[1]
        indptr = index[2]
        spmat = nd.sparse.csr_matrix((data, indices, indptr),
                                     tuple(shape),
                                     ctx=data.context)
        # No conversion is required.
        return spmat, None
    else:
        raise TypeError('Invalid format: %s.' % fmt)
Example #7
0
    def __init__(self, data, label=None, batch_size=1, shuffle=True,
                 last_batch_handle='pad', data_name='data',
                 label_name='softmax_label'):
        super(mnistIter, self).__init__(batch_size)


        print(data.shape, label.shape)
        self.data = _init_data(data, allow_empty=False, default_name=data_name)
        self.label = _init_data(label, allow_empty=True, default_name=label_name)

        # shuffle data
        if shuffle:
            tmp_idx = arange(self.data[0][1].shape[0], dtype=np.int32)
            self.idx = random_shuffle(tmp_idx, out=tmp_idx).asnumpy()
            self.data = _shuffle(self.data, self.idx)
            self.label = _shuffle(self.label, self.idx)
        else:
            self.idx = np.arange(self.data[0][1].shape[0])

        self.data_list = [x[1] for x in self.data] + [x[1] for x in self.label]
        #print('data_list :', self.data_list)
        self.num_source = len(self.data_list)
        #print('num_source: ', self.num_source)
        self.num_data = self.idx.shape[0]
        #print('num_data: ', self.num_data)
        self.cursor = -batch_size
        self.batch_size = batch_size
        self.last_batch_handle = last_batch_handle
        data_shape = (1, 28, 28)
        self.provide_data = [(data_name, (batch_size,) + data_shape)]
        self.provide_label = [(label_name, (batch_size,))]
def get_output(name, RAW_DATA):
    batch_size = 1
    train_data = DataLoader(root_path,
                            RAW_DATA,
                            batch_size,
                            shuffle=True,
                            for_show=False,
                            pic_size=(224, 224))

    f = open("output/" + name + ".txt", "w")
    f_label = open("output/" + name + "_label.txt", "w")

    cnt = 1
    for data, label in train_data:
        print(name, cnt)
        cnt = cnt + 1
        seNet.forward(Batch([data]))
        seNet_out = nd.array(seNet.get_outputs()[0].asnumpy())

        dpn.forward(Batch([data]))
        dpn_out = nd.array(dpn.get_outputs()[0].asnumpy())

        net_in = nd.concat(*[seNet_out, dpn_out])
        #    net_in = nd.stack(*[seNet_out, dpn_out])
        #    print(net_in)
        for i in nd.arange(len(net_in[0])):
            f.write("%f " % (net_in[0][i].asscalar()))
        f.write("\n")
        f_label.write("%d\n" % (label.argmax(axis=1).asscalar()))
    f.close()
    f_label.close()
def test_crop_resize():
    def _test_crop_resize_with_diff_type(dtype):
        # test normal case
        data_in = nd.arange(60).reshape((5, 4, 3)).astype(dtype)
        out_nd = transforms.CropResize(0, 0, 3, 2)(data_in)
        out_np = out_nd.asnumpy()
        assert(out_np.sum() == 180)
        assert((out_np[0:2,1,1].flatten() == [4, 16]).all())
        # test 4D input
        data_bath_in = nd.arange(180).reshape((2, 6, 5, 3)).astype(dtype)
        out_batch_nd = transforms.CropResize(1, 2, 3, 4)(data_bath_in)
        out_batch_np = out_batch_nd.asnumpy()
        assert(out_batch_np.sum() == 7524)
        assert((out_batch_np[0:2,0:4,1,1].flatten() == [37,  52,  67,  82, 127, 142, 157, 172]).all())
        # test normal case with resize
        data_in = nd.random.uniform(0, 255, (300, 200, 3)).astype(dtype)
        out_nd = transforms.CropResize(0, 0, 100, 50, (25, 25), 1)(data_in)
        data_expected = transforms.Resize(size=25, interpolation=1)(nd.slice(data_in, (0, 0, 0), (50, 100, 3)))
        assert_almost_equal(out_nd.asnumpy(), data_expected.asnumpy())
        # test 4D input with resize
        data_bath_in = nd.random.uniform(0, 255, (3, 300, 200, 3)).astype(dtype)
        out_batch_nd = transforms.CropResize(0, 0, 100, 50, (25, 25), 1)(data_bath_in)
        for i in range(len(out_batch_nd)):
            actual = transforms.Resize(size=25, interpolation=1)(nd.slice(data_bath_in[i], (0, 0, 0), (50, 100, 3))).asnumpy()
            expected = out_batch_nd[i].asnumpy()
            assert_almost_equal(expected, actual)
        # test with resize height and width should be greater than 0
        transformer = transforms.CropResize(0, 0, 100, 50, (-25, 25), 1)
        assertRaises(MXNetError, transformer, data_in)
        # test height and width should be greater than 0
        transformer = transforms.CropResize(0, 0, -100, -50)
        assertRaises(MXNetError, transformer, data_in)
        # test cropped area is bigger than input data
        transformer = transforms.CropResize(150, 200, 200, 500)
        assertRaises(MXNetError, transformer, data_in)
        assertRaises(MXNetError, transformer, data_bath_in)

    for dtype in ['uint8', 'float32', 'float64']:
        _test_crop_resize_with_diff_type(dtype)

    # test nd.image.crop backward
    def test_crop_backward(test_nd_arr, TestCase):
        a_np = test_nd_arr.asnumpy()
        b_np = a_np[(slice(TestCase.y, TestCase.y + TestCase.height), slice(TestCase.x, TestCase.x + TestCase.width), slice(0, 3))]

        data = mx.sym.Variable('data')
        crop_sym = mx.sym.image.crop(data, TestCase.x, TestCase.y, TestCase.width, TestCase.height)

        expected_in_grad = np.zeros_like(a_np)
        expected_in_grad[(slice(TestCase.y, TestCase.y + TestCase.height), slice(TestCase.x, TestCase.x + TestCase.width), slice(0, 3))] = b_np
        check_symbolic_backward(crop_sym, [a_np], [b_np], [expected_in_grad])

    TestCase = namedtuple('TestCase', ['x', 'y', 'width', 'height'])
    test_list = [TestCase(0, 0, 3, 3), TestCase(2, 1, 1, 2), TestCase(0, 1, 3, 2)]

    for dtype in ['uint8', 'float32', 'float64']:
        data_in = nd.arange(60).reshape((5, 4, 3)).astype(dtype)
        for test_case in test_list:
            test_crop_backward(data_in, test_case)
Example #10
0
 def reset(self):
     logging.debug("reset")
     tmp_idx = arange(self.data[0][1].shape[0], dtype=np.int32)
     self.idx = random_shuffle(tmp_idx, out=tmp_idx).asnumpy()
     self.data = _shuffle(self.data, self.idx)
     self.label = _shuffle(self.label, self.idx)
     self.cursor = 0
     logging.debug("self.cursor: ", self.cursor)
Example #11
0
def map_anchors(ref_anchors, target_shape, scale_h, scale_w, ctx):
    ref_anchors = ref_anchors.as_in_context(ctx)
    ref_anchors = ref_anchors.reshape((1, -1, 1, 1))
    ref_anchors = ref_anchors.broadcast_to(target_shape)
    _n, _c, h, w = ref_anchors.shape
    ref_x = nd.arange(w).as_in_context(ctx).reshape((1, w)) / w
    ref_x = ref_x * scale_w
    ref_x = ref_x.broadcast_to((h, w))
    ref_y = nd.arange(h).as_in_context(ctx).reshape((h, 1)) / h
    ref_y = ref_y * scale_h
    ref_y = ref_y.broadcast_to((h, w))
    for anchor_i in range(_c // 4):
        ref_anchors[0, anchor_i * 4] += ref_x
        ref_anchors[0, anchor_i * 4 + 1] += ref_y
        ref_anchors[0, anchor_i * 4 + 2] += ref_x
        ref_anchors[0, anchor_i * 4 + 3] += ref_y
    return ref_anchors
Example #12
0
def test_crop_resize():
    def _test_crop_resize_with_diff_type(dtype):
        # test normal case
        data_in = nd.arange(60).reshape((5, 4, 3)).astype(dtype)
        out_nd = transforms.CropResize(0, 0, 3, 2)(data_in)
        out_np = out_nd.asnumpy()
        assert(out_np.sum() == 180)
        assert((out_np[0:2,1,1].flatten() == [4, 16]).all())
        # test 4D input
        data_bath_in = nd.arange(180).reshape((2, 6, 5, 3)).astype(dtype)
        out_batch_nd = transforms.CropResize(1, 2, 3, 4)(data_bath_in)
        out_batch_np = out_batch_nd.asnumpy()
        assert(out_batch_np.sum() == 7524)
        assert((out_batch_np[0:2,0:4,1,1].flatten() == [37,  52,  67,  82, 127, 142, 157, 172]).all())
        # test normal case with resize
        data_in = nd.random.uniform(0, 255, (300, 200, 3)).astype(dtype)
        out_nd = transforms.CropResize(0, 0, 100, 50, (25, 25), 2)(data_in)
        data_expected = image.imresize(nd.slice(data_in, (0, 0, 0), (50, 100 , 3)), 25, 25, 2)
        assert_almost_equal(out_nd.asnumpy(), data_expected.asnumpy())
        # test 4D input with resize
        data_bath_in = nd.random.uniform(0, 255, (3, 300, 200, 3)).astype(dtype)
        out_batch_nd = transforms.CropResize(0, 0, 100, 50, (25, 25), 2)(data_bath_in)
        for i in range(len(out_batch_nd)):
            assert_almost_equal(image.imresize(nd.slice(data_bath_in[i], (0, 0, 0), (50, 100, 3)), 25, 25, 2).asnumpy(),
                out_batch_nd[i].asnumpy())
        # test with resize height and width should be greater than 0
        transformer = transforms.CropResize(0, 0, 100, 50, (-25, 25), 2)
        assertRaises(MXNetError, transformer, data_in)
        # test height and width should be greater than 0 
        transformer = transforms.CropResize(0, 0, -100, -50)
        assertRaises(MXNetError, transformer, data_in)
        # test cropped area is bigger than input data
        transformer = transforms.CropResize(150, 200, 200, 500)
        assertRaises(MXNetError, transformer, data_in)
        assertRaises(MXNetError, transformer, data_bath_in)

    for dtype in ['uint8', 'float32', 'float64']:
        _test_crop_resize_with_diff_type(dtype)  

    # test nd.image.crop backward
    def test_crop_backward(test_nd_arr, TestCase):
        a_np = test_nd_arr.asnumpy()
        b_np = a_np[(slice(TestCase.y, TestCase.y + TestCase.height), slice(TestCase.x, TestCase.x + TestCase.width), slice(0, 3))]

        data = mx.sym.Variable('data')
        crop_sym = mx.sym.image.crop(data, TestCase.x, TestCase.y, TestCase.width, TestCase.height)

        expected_in_grad = np.zeros_like(a_np)
        expected_in_grad[(slice(TestCase.y, TestCase.y + TestCase.height), slice(TestCase.x, TestCase.x + TestCase.width), slice(0, 3))] = b_np
        check_symbolic_backward(crop_sym, [a_np], [b_np], [expected_in_grad])

    TestCase = namedtuple('TestCase', ['x', 'y', 'width', 'height'])
    test_list = [TestCase(0, 0, 3, 3), TestCase(2, 1, 1, 2), TestCase(0, 1, 3, 2)]

    for dtype in ['uint8', 'float32', 'float64']:
        data_in = nd.arange(60).reshape((5, 4, 3)).astype(dtype)
        for test_case in test_list:
            test_crop_backward(data_in, test_case)
Example #13
0
def convert_xy(XY):
    B,H,W,A,N = XY.shape
    dy = nd.tile( nd.arange(0,H,repeat=(W*A), ctx = XY.context).reshape((1,H,W,A,1)), (B,1,1,1,1) )
    dx = nd.tile( nd.arange(0,W,repeat=(A),ctx = XY.context).reshape((1,1,W,A,1)), (B,H,1,1,1) )
    x,y = XY.split(num_outputs=2,axis=-1)
    x = (x + dx) / W
    y = (y + dy) / H
    if 0:
        for b in range(B):
            for h in range(H):
                for w in range(W):
                    for a in range(A):
                        for n in range(1):
                            xx = dx[b,h,w,a,n].asnumpy()[0]
                            yy = dy[b,h,w,a,n].asnumpy()[0]
                            #pdb.set_trace()
                            print '(%.3f,%.3f)'%(xx,yy)
                    print ''
    return x,y
Example #14
0
def plot_predictions(preds, labels):
    """Plot predictions vs ground truth.

    """
    T = len(preds)
    time = nd.arange(0, T)
    plt.plot(time.asnumpy(), labels, label='labels')
    plt.plot(time.asnumpy(), preds, label='predictions')
    plt.legend()
    return plt
Example #15
0
def test_jitter_synthetic(
    jitter_method, float_type, ctx=mx.Context('cpu')
) -> None:
    # Initialize problem parameters
    batch_size = 1
    prediction_length = 50
    context_length = 5
    num_samples = 3

    # Initialize test data to generate Gaussian Process from
    lb = -5
    ub = 5
    dx = (ub - lb) / (prediction_length - 1)
    x_test = nd.arange(lb, ub + dx, dx, ctx=ctx, dtype=float_type).reshape(
        -1, 1
    )
    x_test = nd.tile(x_test, reps=(batch_size, 1, 1))

    # Define the GP hyper parameters
    amplitude = nd.ones((batch_size, 1, 1), ctx=ctx, dtype=float_type)
    length_scale = math.sqrt(0.4) * nd.ones_like(amplitude)
    sigma = math.sqrt(1e-5) * nd.ones_like(amplitude)

    # Instantiate desired kernel object and compute kernel matrix
    rbf_kernel = RBFKernel(amplitude, length_scale)

    # Generate samples from 0 mean Gaussian process with RBF Kernel and plot it
    gp = GaussianProcess(
        sigma=sigma,
        kernel=rbf_kernel,
        prediction_length=prediction_length,
        context_length=context_length,
        num_samples=num_samples,
        ctx=ctx,
        float_type=float_type,
        jitter_method=jitter_method,
        sample_noise=False,  # Returns sample without noise
    )

    # Generate training set on subset of interval using the sine function
    x_train = nd.array([-4, -3, -2, -1, 1], ctx=ctx, dtype=float_type).reshape(
        context_length, 1
    )
    x_train = nd.tile(x_train, reps=(batch_size, 1, 1))
    y_train = nd.sin(x_train.squeeze(axis=2))

    # Predict exact GP using the GP predictive mean and covariance using the same fixed hyper-parameters
    samples, predictive_mean, predictive_std = gp.exact_inference(
        x_train, y_train, x_test
    )

    assert (
        np.sum(np.isnan(samples.asnumpy())) == 0
    ), 'NaNs in predictive samples!'
 def _test_crop_resize_with_diff_type(dtype):
     # test normal case
     data_in = nd.arange(60).reshape((5, 4, 3)).astype(dtype)
     out_nd = transforms.CropResize(0, 0, 3, 2)(data_in)
     out_np = out_nd.asnumpy()
     assert (out_np.sum() == 180)
     assert ((out_np[0:2, 1, 1].flatten() == [4, 16]).all())
     # test 4D input
     data_bath_in = nd.arange(180).reshape((2, 6, 5, 3)).astype(dtype)
     out_batch_nd = transforms.CropResize(1, 2, 3, 4)(data_bath_in)
     out_batch_np = out_batch_nd.asnumpy()
     assert (out_batch_np.sum() == 7524)
     assert ((out_batch_np[0:2, 0:4, 1, 1].flatten() == [
         37, 52, 67, 82, 127, 142, 157, 172
     ]).all())
     # test normal case with resize
     data_in = nd.random.uniform(0, 255, (300, 200, 3)).astype(dtype)
     out_nd = transforms.CropResize(0, 0, 100, 50, (25, 25), 1)(data_in)
     data_expected = transforms.Resize(size=25, interpolation=1)(nd.slice(
         data_in, (0, 0, 0), (50, 100, 3)))
     assert_almost_equal(out_nd.asnumpy(), data_expected.asnumpy())
     # test 4D input with resize
     data_bath_in = nd.random.uniform(0, 255,
                                      (3, 300, 200, 3)).astype(dtype)
     out_batch_nd = transforms.CropResize(0, 0, 100, 50, (25, 25),
                                          1)(data_bath_in)
     for i in range(len(out_batch_nd)):
         actual = transforms.Resize(size=25, interpolation=1)(nd.slice(
             data_bath_in[i], (0, 0, 0), (50, 100, 3))).asnumpy()
         expected = out_batch_nd[i].asnumpy()
         assert_almost_equal(expected, actual)
     # test with resize height and width should be greater than 0
     transformer = transforms.CropResize(0, 0, 100, 50, (-25, 25), 1)
     assertRaises(MXNetError, transformer, data_in)
     # test height and width should be greater than 0
     transformer = transforms.CropResize(0, 0, -100, -50)
     assertRaises(MXNetError, transformer, data_in)
     # test cropped area is bigger than input data
     transformer = transforms.CropResize(150, 200, 200, 500)
     assertRaises(MXNetError, transformer, data_in)
     assertRaises(MXNetError, transformer, data_bath_in)
Example #17
0
def test_download_embed():
    @nlp.embedding.register
    class Test(nlp.embedding.TokenEmbedding):
        # 33 bytes.
        source_file_hash = \
                {'embedding_test': ('embedding_test.vec',
                                    '29b9a6511cf4b5aae293c44a9ec1365b74f2a2f8')}
        namespace = 'test'

        def __init__(self, embedding_root='embedding', init_unknown_vec=nd.zeros, **kwargs):
            source = 'embedding_test'
            Test._check_source(source)

            super(Test, self).__init__(**kwargs)

            file_path = Test._get_file_path(embedding_root, source)

            self._load_embedding(file_path, ' ', init_unknown_vec)

    test_embed = nlp.embedding.create('test', embedding_root='tests/data/embedding')
    assert_almost_equal(test_embed['hello'].asnumpy(), (nd.arange(5) + 1).asnumpy())
    assert_almost_equal(test_embed['world'].asnumpy(), (nd.arange(5) + 6).asnumpy())
    assert_almost_equal(test_embed['<unk>'].asnumpy(), nd.zeros((5,)).asnumpy())
Example #18
0
    def cvt_output_for_predict(self,pred): #how to interprete net output according format_groundtruth()
        predCls,predObj, XYWH = self.format_net_output(pred)
        batchSize,height,width,boxNum,_= XYWH.shape
        X,Y,W,H = XYWH.split(num_outputs=4, axis=-1)
        #pdb.set_trace()
        DY = nd.tile(nd.arange(0,height,repeat=width*boxNum, ctx=XYWH.context).reshape((1,height,width,boxNum,1)), (batchSize,1,1,1,1) )
        DX = nd.tile(nd.arange(0,width,repeat=boxNum,ctx=XYWH.context).reshape((1,1,width,boxNum,1)),(batchSize,height,1,1,1))
        X = (X + DX) / width
        Y = (Y + DY) / height
        #pdb.set_trace()
        W = nd.exp(W) - 1
        H = nd.exp(H) - 1

        
        W = nd.clip(W,0,1)
        H = nd.clip(H,0,1)
        X = nd.clip(X,0,1)
        Y = nd.clip(Y,0,1)
        left = X
        top = Y
        right = nd.clip(left + W,0,1)
        bottom = nd.clip(top + H, 0, 1)
        corners = nd.concat(left,top,right,bottom,dim=-1) #nms requiring corner format
        return predCls, predObj, corners
def test_download_embed():
    @text.embedding.register
    class Test(text.embedding._TokenEmbedding):
        # 33 bytes.
        pretrained_file_name_sha1 = \
            {'embedding_test.vec': '29b9a6511cf4b5aae293c44a9ec1365b74f2a2f8'}
        namespace = 'test'

        def __init__(self, embedding_root='embeddings', init_unknown_vec=nd.zeros, **kwargs):
            pretrained_file_name = 'embedding_test.vec'
            Test._check_pretrained_file_names(pretrained_file_name)

            super(Test, self).__init__(**kwargs)

            pretrained_file_path = Test._get_pretrained_file(embedding_root, pretrained_file_name)

            self._load_embedding(pretrained_file_path, ' ', init_unknown_vec)

    test_embed = text.embedding.create('test')
    assert test_embed.token_to_idx['hello'] == 1
    assert test_embed.token_to_idx['world'] == 2
    assert_almost_equal(test_embed.idx_to_vec[1].asnumpy(), (nd.arange(5) + 1).asnumpy())
    assert_almost_equal(test_embed.idx_to_vec[2].asnumpy(), (nd.arange(5) + 6).asnumpy())
    assert_almost_equal(test_embed.idx_to_vec[0].asnumpy(), nd.zeros((5,)).asnumpy())
Example #20
0
    def _sync_params_from_devices(self):
        """Synchronizes parameters from devices to CPU. This function should be called after
        calling `update` that updates the parameters on the devices, before one can read the
        latest parameters from ``self._arg_params`` and ``self._aux_params``.

        For row_sparse parameters on devices, ther are pulled from KVStore with all row ids.

        """
        self._exec_group.get_params(self._arg_params, self._aux_params)
        if self._kvstore and self._update_on_kvstore:
            for param_name, param_val in sorted(self._arg_params.items()):
                if param_val.stype == 'row_sparse':
                    row_ids = nd.arange(0, param_val.shape[0], dtype='int64')
                    self._kvstore.row_sparse_pull(param_name, param_val, row_ids=row_ids)
        self._params_dirty = False
Example #21
0
def unsorted_1d_segment_sum(input, seg_id, n_segs, dim):
    # TODO: support other dimensions
    assert dim == 0, 'MXNet only supports segment sum on first dimension'

    # Use SPMV to simulate segment sum
    ctx = input.context
    n_inputs = input.shape[0]
    input_shape_suffix = input.shape[1:]
    input = input.reshape(n_inputs, -1)
    n_range = nd.arange(n_inputs, dtype='int64').as_in_context(input.context)
    w_nnz = nd.ones(n_inputs).as_in_context(input.context)
    w_nid = nd.stack(seg_id, n_range, axis=0)
    w = nd.sparse.csr_matrix((w_nnz, (seg_id, n_range)), (n_segs, n_inputs))
    w = w.as_in_context(input.context)
    y = nd.dot(w, input)
    y = nd.reshape(y, (n_segs, ) + input_shape_suffix)
    return y
Example #22
0
def generate_anchors(base_size=16,
                     ratios=nd.array([0.5, 1, 2]),
                     scales=2**nd.arange(3, 6)):
    """
    Generate anchor (reference) windows by enumerating aspect ratios X
    scales wrt a reference (0, 0, 15, 15) window.
    This implementation matches the original Faster-RCNN RPN generate_anchors().
    But all calculations are on mxnet.ndarray.NDArray.

    Refer to 
    https://github.com/rbgirshick/py-faster-rcnn/blob/master/lib/rpn/generate_anchors.py
    """

    base_anchor = nd.array([1, 1, base_size, base_size])
    ratio_anchors = _ratio_enum(base_anchor, ratios)
    anchors = nd.concatenate([
        _scale_enum(ratio_anchors[i, :], scales)
        for i in range(ratio_anchors.shape[0])
    ])
    return anchors
Example #23
0
import mxnet.ndarray as nd
import mxnet.autograd as ag

#------------------------------举例------------------------------------
##创建变量
x = nd.arange(4).reshape((4,1))
nd.arange(4)
x
##存储梯度 attach_grad
x.attach_grad()

##记录求梯度的计算,record()
y = nd.arange(3)
with ag.record():
    y = 2*nd.dot(x.T,x)
## 求梯度,只对标量求梯度,如果y不是标量,则会把y内的变量全部求和再求梯度。
y.backward()

## 验证梯度是否正确
assert(x.grad - 4*x).norm().asscalar() == 0
x.grad

## 默认使用预测模式?,调用record 以后默认为训练模式?
print(ag.is_training())
with ag.record():
    print(ag.is_training())

#------------------------------对控制流求导------------------------------------
##定义一个有控制流的函数(记录具体的运算路径再求导?)

def fun(a):
Example #24
0
import numpy as np
import os

x = nd.ones((3, 4))

y = nd.random_normal(0, 1, shape=(3, 4))

print(x.shape)
print(y.shape)
print(y.size)
#print(y)

print(x * y)

# boradcast
a = nd.arange(3).reshape((3, 1))
b = nd.arange(2).reshape((1, 2))
print('a:', a)
print('b:', b)
print('a + b: ', a + b)

x = np.ones((2, 3))
y = nd.array(x)  # numpy -> mxnet
z = y.asnumpy()

print([z, y])

x = nd.ones((3, 4))
y = nd.ones((3, 4))
before = id(y)
y = x + y
Example #25
0
def arange(start, stop):
    return nd.arange(start, stop, dtype=np.int64)
Example #26
0
def _simulate(sym, params, graph, inputs_ext, self):
    logger = logging.getLogger('log.mrt.simulate')
    name, op_name = sym.attr('name'), sym.attr('op_name')
    childs, attr = sym_iter(sym.get_children()), sym.list_attr()
    infer_shapes, th_dict = self.shpes, self.th_dict
    precs, scales = self.precs, self.scales
    op_input_precs = self._op_input_precs

    cns = [c.attr('name') for c in childs] if childs else []

    def _requant_parameter(pname, def_prec, oscale=None):
        P_name = _uniq_name(pname)
        P_prec = precs[pname].get(name, PREC(def_prec))
        xs = oscale if oscale else scale(th_dict[pname], P_prec.p)
        params[P_name] = params[pname] * xs
        P_attr = {'precision': str(P_prec.p)}
        graph[P_name] = mx.sym.var(P_name,
                                   shape=params[P_name].shape,
                                   attr=P_attr)
        logger.debug(
            "Parameter th_dict=%-12.8f name=%-40s requantize with scale=%-16.8f to prec=%s",
            th_dict[pname], pname, xs, P_prec)
        return graph[P_name], P_prec, xs

    def _requant_operator(X, def_prec, oscale=None):
        xopn, xn = X.attr('op_name'), X.attr('name')
        X_name = _uniq_name(xn)
        oprec = precs[xn].get(name, PREC(def_prec))
        exactly = True if oscale else False
        oscale = oscale if oscale else scale(th_dict[xn], oprec.p)
        iscale = scales[xn]
        iprec = precs[xn][out_key]
        if exactly:
            in_prec = _get_bit(th_dict[xn] * iscale)
            out_prec = oprec.p
            sb = in_prec - out_prec if in_prec > out_prec else 0
            if sb > 1:
                iprec = PREC(iprec.p - sb)
                X = _mrt_sim_quantize(X, sb, params, graph, iprec.p)
                iscale = iscale / (2**sb)
                logger.debug(
                    "Operator  %-20s name=%-40s exactly quantize with sb=%s" +
                    " scale=%s, prec=%s", xopn, xn, sb, iscale, iprec)

        if exactly or (iprec > oprec and iscale > oscale):
            rescale = oscale / iscale
            frac, exp = sim.cvm_float(rescale, MAX_BIT - iprec.p)
            sim_scale = frac * (2**exp)
            scale_err = abs((sim_scale - rescale) / rescale)
            if exactly and scale_err > 0.001:
                logger.warn(
                    "Operator  %-20s name=%-40s requantize to scale=%s " +
                    "with <%s, %d, %d>, error=%s", xopn, xn, rescale,
                    sim_scale, frac, exp, scale_err)
            oscale = iscale * frac * (2**exp)
            if frac > 1:
                X = _mrt_sim_quantize(X, 0, params, graph, iprec.p)
                var = mx_const(frac, graph, params)
                mul_name = _uniq_name("mrt_quantize_scale")
                X = mx.sym.broadcast_mul(X, var, name=mul_name)
            X = _mrt_sim_quantize(X, (-exp), params, graph, oprec.p)
            logger.debug(
                "Operator  %-20s name=%-40s requantize with scale=%-16.8f<%d, %d>"
                + " iprec=%s, iscale=%-10.5f, oprec=%s, oscale=%-10.5f", xopn,
                xn, rescale, frac, exp, iprec, iscale, oprec, oscale)
        else:
            X = _mrt_sim_quantize(X, 0, params, graph, oprec.p)
            oscale = iscale
            logger.debug(
                "Operator  %-20s name=%-40s clip with iprec=%s, oprec=%s",
                xopn, xn, iprec, oprec)
        return X, oprec, oscale

    def _requant(X, def_prec, oscale=None):
        if is_params(X, params):
            return _requant_parameter(X.attr('name'), def_prec, oscale)
        else:
            return _requant_operator(X, def_prec, oscale)

    # Update four attributes: th_dict, precs, scales, sym
    if is_inputs(sym, params):
        prec = precs[name][out_key]
        scales[name] = scale(th_dict[name], prec.p)
        attr = {'precision': str(prec.p)}
        sym = mx.sym.var(name, attr=attr)
        return sym, params
    elif is_params(sym, params):
        return sym, params
    elif op_name in disable_requant_ops:
        # TODO: pass through thresholds
        # th_dict[name] = th_dict[cns[0]]
        precs[name][out_key] = PREC(precs[cns[0]][out_key])
        scales[name] = scales[cns[0]]
    elif op_name in ['sigmoid', 'exp']:
        iprec = op_input_precs[op_name]
        xs = scale(th_dict[cns[0]], iprec.p)
        X, xprec, xs = _requant_operator(childs[0], iprec, xs)
        alpha = _get_range(xprec.p)

        data = nd.arange(-alpha, alpha + 1)
        out = get_nd_op(op_name)(data / xs)
        oprec = precs[name].get(out_key, PREC(16, L0))
        opt = out.abs().max().asscalar()
        # opt = th_dict[name]
        oscale = scales[name] = scale(opt, oprec.p)

        W_name = _uniq_name("cvm_lut_weight")
        weight = (out * oscale).round().reshape(2 * alpha + 1, 1)
        params[W_name] = weight
        wattr = {'precision': str(oprec.p)}
        W = graph[W_name] = mx.sym.var(W_name, shape=weight.shape, attr=wattr)
        var = mx_const(alpha, graph, params)
        add_name = _uniq_name(op_name + "_offset")
        X = mx.sym.broadcast_add(X, var, name=add_name)
        sym = mx.sym.Custom(X,
                            W,
                            in_dim=2 * alpha + 1,
                            name=name,
                            op_type='cvm_lut')
        precs[name][out_key] = oprec
    elif op_name in ['softmax']:
        """  Softmax Quantization
        ::math
            y(i) = e ^ i \over {\sum_j^K {e ^ j}}
        ::quantize
            1. Keep value in range [max(input) - lambd, max(input)),
                otherwise set zero to ignore for tiny probability.
            2. Embedding e ^ i for input scale. ie. calculate the value
                of e ^ i for i in range [0, lambd* input scale],
                E(i) = Embedding(e ^ i).
            3. Do math for interger computation.
                sum = \sum_j^K { E(j) }
                \hat_{y}(i) = {E(i) * 2 ^ 14 + sum - 1} \over sum

        """
        iprec = op_input_precs[op_name]
        xs = scale(th_dict[cns[0]], iprec.p)
        axis = get_attr(attr, 'axis', -1)
        X, xprec, xs = _requant_operator(childs[0], iprec, xs)
        lambd = 10
        alpha = int(lambd * xs)
        max_axis = mx.sym.max(X, axis=axis, keepdims=True)
        var = mx_const(alpha, graph, params)
        offset = mx.sym.broadcast_sub(max_axis,
                                      var,
                                      name=_uniq_name("softmax_offset"))
        offset = _mrt_sim_quantize(offset, 0, params, graph, xprec.p)
        norm = mx.sym.relu(mx.sym.broadcast_sub(
            X, offset, name=_uniq_name("softmax_normalize")),
                           name=_uniq_name("softmax_filter"))
        norm = _mrt_sim_quantize(norm, 0, params, graph, xprec.p)

        data = nd.arange(0, alpha + 1)
        table = nd.exp(data / xs)

        tprec = _get_bit(math.exp(lambd))
        table = nd.clip(table, a_min=0, a_max=_get_range(tprec))
        W_name = _uniq_name("cvm_lut_weight")
        params[W_name] = weight = table.round().reshape(alpha + 1, 1)
        wattr = {'precision': str(tprec)}
        W = graph[W_name] = mx.sym.var(W_name, shape=weight.shape, attr=wattr)
        lut = mx.sym.Custom(norm,
                            W,
                            in_dim=alpha + 1,
                            name=name,
                            op_type='cvm_lut')
        sum_lut = mx.sym.sum(lut,
                             axis=axis,
                             keepdims=True,
                             name=_uniq_name("softmax_sum"))

        oprec = min(15, 31 - tprec)
        assert oprec > 8, "operator softmax(%s) lambda(%d) is too large" \
            % (name, lambd)
        oscale = _get_range(oprec)
        var_scale = mx_const(oscale, graph, params)
        prob = mx.sym.broadcast_mul(lut,
                                    var_scale,
                                    name=_uniq_name("softmax_output_scale"))
        var_one = mx_const(1, graph, params)
        half_lut = _mrt_sim_quantize(sum_lut, 1, params, graph, 31)
        prob = mx.sym.broadcast_add(prob,
                                    half_lut,
                                    name=_uniq_name("softmax_round"))
        sym = mx.sym.broadcast_div(prob,
                                   sum_lut,
                                   name=_uniq_name("softmax_prob"))
        sym = sym.astype('int32').astype('float32')
        #  sym = mx.sym.floor(sym) # simulate integer division
        sym = _mrt_sim_quantize(sym, 0, params, graph, oprec)
        precs[name][out_key] = PREC(oprec)
        scales[name] = oscale
    elif op_name in ['Convolution', 'FullyConnected']:
        iprec = op_input_precs[op_name]
        X, xprec, xs = _requant_operator(childs[0], iprec)
        W, wprec, ws = _requant_parameter(cns[1], iprec)
        B, bprec = None, PREC()
        if not get_attr(attr, 'no_bias', False):
            bs = ws * xs
            bias_prec = PREC(_get_bit(th_dict[cns[2]] * bs))
            B, bprec, _ = _requant_parameter(cns[2], bias_prec, bs)
        oscale = scales[name] = ws * xs
        sym = get_mxnet_op(op_name)(X, W, B, **attr, name=name)
        precs[name][out_key] = PREC(_get_bit(th_dict[name] * oscale))
    elif op_name in ['broadcast_mul']:
        iprec = op_input_precs[op_name]
        X, xprec, xs = _requant(childs[0], iprec)
        B, bprec, bs = _requant(childs[1], iprec)
        oscale = scales[name] = xs * bs
        sym = get_mxnet_op(op_name)(X, B, **attr, name=name)
        precs[name][out_key] = PREC(_get_bit(th_dict[name] * oscale))
    elif op_name in ['sum']:
        iprec = op_input_precs[op_name]
        X, xprec, xs = _requant_operator(childs[0], iprec)
        oscale = scales[name] = xs
        sym = get_mxnet_op(op_name)(X, **attr, name=name)
        precs[name][out_key] = PREC(_get_bit(th_dict[name] * oscale))
    elif op_name in [
            'elemwise_add', 'elemwise_sub', 'broadcast_add', 'broadcast_sub',
            'Concat'
    ]:
        iprec = op_input_precs[op_name]
        in_th = max([th_dict[n] for n in cns])
        oscale = scales[name] = scale(in_th, iprec.p)
        new_childs = []
        for c in childs:
            c, cprec, _ = _requant(c, iprec, oscale=oscale)
            new_childs.append(c)
        sym = get_mxnet_op(op_name)(*new_childs, **attr, name=name)
        precs[name][out_key] = PREC(_get_bit(th_dict[name] * oscale))
    elif op_name in ['Embedding']:
        iprec = op_input_precs[op_name]
        X, xs = childs[0], scales[cns[0]]
        if xs != 1:
            X, xprec, _ = _requant_operator(childs[0], PREC(32), 1 / xs)
        W, wprec, ws = _requant_parameter(cns[1], iprec)
        th_dict[name] = th_dict[cns[1]]
        oscale = scales[name] = ws
        sym = get_mxnet_op(op_name)(X, W, **attr, name=name)
        precs[name][out_key] = PREC(_get_bit(th_dict[name] * oscale))
    else:
        print(name, op_name, attr)
        assert False

    logger.debug("operator  %-20s name=%-40s oscale=%s, iscale=%s", op_name,
                 name, scales[name], cns)

    oname = sym.attr('name')
    infer_shapes[oname] = infer_shapes[name]
    th_dict[oname] = th_dict[name]
    precs[oname] = precs[name]
    scales[oname] = scales[name]

    # Requantize output symbol
    if name in precs[name]:
        oprec = precs[name][name]
        os = scale(th_dict[name], oprec.p)
        sym, oprec, os = _requant_operator(sym, PREC(oprec), os)

        oname = sym.attr('name')
        scales[oname] = os
        infer_shapes[oname] = infer_shapes[name]
        th_dict[oname] = th_dict[name]
        precs[oname] = oprec
        scales[oname] = os

    return sym, params
Example #27
0
y = nd.random_normal(0, 1, shape=(3, 4))
print(y)
print(y.shape)
print(y.size)

x = nd.random_normal(0, 1, shape=(3, 4))
print(x)
print(x + y)
print(x * y)
# 指数运算.
print(nd.exp(y))
# 转置
print(nd.dot(x, y.T))

# 广播
a = nd.arange(3).reshape((3, 1))
b = nd.arange(2).reshape((1, 2))
print('a:', a)
print('b:', b)
print('a+b:', a + b)

# 跟 Numpy 的转换
x = np.ones((2, 3))
y = nd.array(x)
z = y.asnumpy()
print([z, y])

# 替换操作
x = nd.ones((3, 4))
y = nd.ones((3, 4))
before = id(y)
def _simulate_layer(sym, params, graph, inputs_ext, scales, precs):
    logger = logging.getLogger('log.calib.sym.sim.requant')
    name, op_name = sym.attr('name'), sym.attr('op_name')
    childs, attr = sym_iter(sym.get_children()), sym.list_attr()

    node = sym
    cscales = [scales[c.attr('name')] for c in childs] if childs else []

    def _restore():
        new_childs = []
        for idx, c in enumerate(childs):
            tmp = c / cscales[idx]
            new_childs.append(tmp)
        out = get_mxnet_op(op_name)(*new_childs, **attr, name=name)
        out = out * scales[name]
        return out

    if _is_annotate_op(sym):
        X_name = childs[0].attr('name')
        requant_scale = scales[name] / scales[childs[0].attr('name')]
        in_prec, out_prec = precs[X_name][out_key], precs[name][out_key]
        node = _simulate(childs[0], requant_scale, in_prec, out_prec, name)
        logger.debug("layer %-40s requant scale=%-16.8f  out=%-16.8f in=%s",
                     name, requant_scale, scales[name],
                     [scales[c.attr('name')]
                      for c in childs] if childs else [])
    elif op_name in [
            'broadcast_add', 'broadcast_sub', 'elemwise_add', 'elemwise_sub',
            'Concat'
    ]:
        cscales = [scales[c.attr('name')] for c in childs]
        new_childs = []
        out_scale = min(cscales)
        for idx, c in enumerate(childs):
            relative_scale = out_scale / cscales[idx]
            if relative_scale != 1:
                cname = c.attr('name')
                in_prec, out_prec = precs[cname][out_key], precs[cname][
                    out_key]
                c = _simulate(c, relative_scale, in_prec, out_prec,
                              "%s_in%d_squeeze" % (name, idx))
                logger.debug("layer %-40s  adjust scale=%-16.8f orig=%-16.8f" + \
                        " for requant %-40s input scale %-16.8f",
                        c.attr('name'), relative_scale,
                        cscales[idx], name, out_scale)
            new_childs.append(c)
        node = get_mxnet_op(op_name)(*new_childs, **attr, name=name)
    elif op_name in ['sigmoid', 'exp']:
        cname = childs[0].attr('name')
        in_prec = precs[cname][name]
        alpha = (2**(in_prec - 1)) - 1
        data = nd.arange(-alpha, alpha + 1)
        out = get_nd_op(op_name)(data / cscales[0])
        weight = (out * scales[name]).round().reshape(2 * alpha, 1)
        W_name = name + '_weight'
        assert W_name not in graph
        W = graph[W_name] = mx.sym.var(W_name, shape=weight.shape)
        params[W_name] = weight
        precs[W_name] = {out_key: precs[name][out_key]}
        alpha_sym, alpha_name = op_const(alpha, graph, var=mx.sym.var)
        precs[alpha_name] = {out_key: in_prec}
        params[alpha_name] = nd.array([alpha])
        X = mx.sym.broadcast_add(childs[0], alpha_sym)
        node = mx.sym.Custom(X,
                             W,
                             in_dim=2 * alpha,
                             name=name,
                             op_type='cvm_lut')

    scales[node.attr('name')] = scales[name]
    precs[node.attr('name')] = precs[name]
    return node, params
Example #29
0
def arange(start, stop, dtype=np.int64, ctx=None):
    if start >= stop:
        return nd.array([], dtype=dtype, ctx=ctx)
    else:
        return nd.arange(start, stop, dtype=dtype, ctx=ctx)
Example #30
0
import matplotlib
matplotlib.rcParams['figure.dpi'] = 120
import matplotlib.pyplot as plt


class SmoothL1Loss(gluon.loss.Loss):
    def __init__(self, scale=1.0, batch_axis=0, **kwargs):
        super(SmoothL1Loss, self).__init__(None, batch_axis, **kwargs)
        self._scale = scale

    def hybrid_forward(self, F, output, label, mask):
        loss = F.smooth_l1((output - label) * mask, scalar=self._scale)
        return loss.mean(axis=self._batch_axis, exclude=True)


if __name__ == '__main__':
    colors = ['blue', 'red', 'green', 'black']
    scales = [.5, 1, 10]

    x = nd.arange(-2, 2, .01)
    for i, s in enumerate(scales):
        y = nd.smooth_l1(x, scalar=s)
        plt.plot(x.asnumpy(), y.asnumpy(), colors[i])

    y = x ** 2
    plt.plot(x.asnumpy(), y.asnumpy(), 'black')

    plt.legend(['scale='+str(s) for s in scales] + ['Square loss'])
    plt.show()

Example #31
0
print("x3是:%s" % x3)

x4 = nd.random_normal(0, 1, shape=(3, 4))
print("x3是:%s \n x4是:%s" % (x3, x4))

print(x4.shape)
print(x4.size)

x5 = nd.dot(x1, x2.T)
print(x5)

# 广播
# 当二元操作符左右两边ndarray形状不一样时,
# 系统会尝试将其复制到一个共同的形状。
# 例如a的第0维是3, b的第0维是1,那么a+b时会将b沿着第0维复制3遍:
a = nd.arange(3).reshape((3, 1))
b = nd.arange(2).reshape((1, 2))
print('a:', a)
print('b:', b)
print('a+b:', a + b)

c1 = nd.arange(4)
print(c1)
c2 = nd.arange(4).reshape((2, 2))
print(c2)

# ndarray可以很方便地同numpy进行转换
import numpy as np
x = np.ones((2, 3))
y = nd.array(x)  # numpy -> mxnet
z = y.asnumpy()  # mxnet -> numpy
Example #32
0
File: tensor.py Project: zwwlp/dgl
def arange(start, stop, dtype=np.int64):
    if start >= stop:
        return nd.array([], dtype=dtype)
    else:
        return nd.arange(start, stop, dtype=dtype)
Example #33
0
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 12 19:17:04 2020

@author: DER
"""
# 1
from mxnet import ndarray as nd

x = nd.arange(12)
print(x)

print(x.shape)

print(x.size)

X = x.reshape((3,4))
print(X)

print(nd.zeros((2, 3, 4)))

print(nd.ones((3, 4)),end="\n\n")

Y = nd.array([[12, 11, 10, 9], [8, 7, 6, 5], [4, 3, 2, 1]])
print(Y)

print(nd.random.normal(0, 1, shape=(3, 4)))

print(Y.exp())

print(nd.dot(X, Y.T))
Example #34
0
def arange(start, stop, dtype="int64"):
    if start >= stop:
        return nd.array([], dtype=data_type_dict()[dtype])
    else:
        return nd.arange(start, stop, dtype=data_type_dict()[dtype])
Example #35
0
def nd_arange(*args, **kwargs):
    return nd.arange(*args, dtype="float64", **kwargs)