Esempio n. 1
0
 def test_to_dense(self):
     if self.nbatch > 0:
         x_shape = (self.nbatch, self.shape[0], self.shape[1])
     else:
         x_shape = self.shape
     x0 = _setup_tensor(.5, 1, x_shape, self.dtype, .75)
     if self.use_ldnz:
         ldnz = self.shape[0] * self.shape[1]
         sp_x = utils.to_coo(x0, ldnz=ldnz)
     else:
         sp_x = utils.to_coo(x0)
     assert sp_x.data.shape == sp_x.row.shape == sp_x.col.shape
     if self.nbatch > 0:
         assert sp_x.data.ndim == 2
         assert sp_x.data.shape[0] == self.nbatch
         if self.use_ldnz:
             assert sp_x.data.shape[1] == ldnz
         else:
             max_nnz = 0
             for i in range(self.nbatch):
                 max_nnz = max(max_nnz, numpy.count_nonzero(x0[i]))
             assert sp_x.data.shape[1] == max_nnz
     else:
         assert sp_x.data.ndim == 1
         if self.use_ldnz:
             assert sp_x.data.shape[0] == ldnz
         else:
             max_nnz = numpy.count_nonzero(x0)
             assert sp_x.data.shape[0] == max_nnz
     x1 = sp_x.to_dense()
     numpy.testing.assert_array_equal(x0, x1)
Esempio n. 2
0
 def test_to_dense(self):
     if self.nbatch > 0:
         x_shape = (self.nbatch, self.shape[0], self.shape[1])
     else:
         x_shape = self.shape
     x0 = _setup_tensor(.5, 1, x_shape, self.dtype, .75)
     if self.use_ldnz:
         ldnz = self.shape[0] * self.shape[1]
         sp_x = utils.to_coo(x0, ldnz=ldnz)
     else:
         sp_x = utils.to_coo(x0)
     assert sp_x.data.shape == sp_x.row.shape == sp_x.col.shape
     if self.nbatch > 0:
         assert sp_x.data.ndim == 2
         assert sp_x.data.shape[0] == self.nbatch
         if self.use_ldnz:
             assert sp_x.data.shape[1] == ldnz
         else:
             max_nnz = 0
             for i in range(self.nbatch):
                 max_nnz = max(max_nnz, numpy.count_nonzero(x0[i]))
             assert sp_x.data.shape[1] == max_nnz
     else:
         assert sp_x.data.ndim == 1
         if self.use_ldnz:
             assert sp_x.data.shape[0] == ldnz
         else:
             max_nnz = numpy.count_nonzero(x0)
             assert sp_x.data.shape[0] == max_nnz
     x1 = sp_x.to_dense()
     numpy.testing.assert_array_equal(x0, x1)
 def test_invalid_inputs(self):
     a = _setup_tensor(.5, 1, (1, 3, 3), numpy.float32, .75)
     b = _setup_tensor(.5, 1, (1, 3, 3), numpy.float32, .75)
     sp_a = utils.to_coo(a)
     sp_b = utils.to_coo(b)
     with self.assertRaises(ValueError):
         F.sparse_matmul(sp_a, sp_b, self.transa, self.transb)
     with self.assertRaises(ValueError):
         F.sparse_matmul(a, b, self.transa, self.transb)
 def test_invalid_shape(self):
     a = _setup_tensor(.5, 1, (1, 2, 3), numpy.float32, .75)
     b = _setup_tensor(.5, 1, (1, 4, 5), numpy.float32, .75)
     sp_a = utils.to_coo(a)
     sp_b = utils.to_coo(b)
     with self.assertRaises(type_check.InvalidType):
         F.sparse_matmul(sp_a, b, self.transa, self.transb)
     with self.assertRaises(type_check.InvalidType):
         F.sparse_matmul(a, sp_b, self.transa, self.transb)
Esempio n. 5
0
 def test_invalid_inputs(self):
     a = _setup_tensor(.5, 1, (1, 3, 3), numpy.float32, .75)
     b = _setup_tensor(.5, 1, (1, 3, 3), numpy.float32, .75)
     sp_a = utils.to_coo(a)
     sp_b = utils.to_coo(b)
     with self.assertRaises(ValueError):
         F.sparse_matmul(sp_a, sp_b, self.transa, self.transb)
     with self.assertRaises(ValueError):
         F.sparse_matmul(a, b, self.transa, self.transb)
Esempio n. 6
0
 def test_invalid_shape(self):
     a = _setup_tensor(.5, 1, (1, 2, 3), numpy.float32, .75)
     b = _setup_tensor(.5, 1, (1, 4, 5), numpy.float32, .75)
     sp_a = utils.to_coo(a)
     sp_b = utils.to_coo(b)
     with self.assertRaises(type_check.InvalidType):
         F.sparse_matmul(sp_a, b, self.transa, self.transb)
     with self.assertRaises(type_check.InvalidType):
         F.sparse_matmul(a, sp_b, self.transa, self.transb)
Esempio n. 7
0
    def check_DNSP_double_backward(
            self, a_data, b_data, c_grad, a_grad_grad, b_grad_grad,
            atol, rtol):
        sp_b = utils.to_coo(b_data)
        sp_ggb = utils.to_coo(b_grad_grad)
        func = F.math.sparse_matmul.CooMatMul(
            sp_b.row, sp_b.col, sp_b.shape, sp_b.order,
            transa=not self.transb, transb=not self.transa, transc=True)

        def op(b, a):
            return func.apply((b, a))[0]
        gradient_check.check_double_backward(
            op, (sp_b.data.data, a_data),
            c_grad, (sp_ggb.data.data, a_grad_grad),
            atol=atol, rtol=rtol, dtype=numpy.float32)
Esempio n. 8
0
    def check_SPDN_double_backward(
            self, a_data, b_data, c_grad, a_grad_grad, b_grad_grad,
            atol, rtol):
        sp_a = utils.to_coo(a_data)
        sp_gga = utils.to_coo(a_grad_grad)
        func = F.math.sparse_matmul.CooMatMul(
            sp_a.row, sp_a.col, sp_a.shape, sp_a.order,
            transa=self.transa, transb=self.transb, transc=False)

        def op(a, b):
            return func.apply((a, b))[0]
        gradient_check.check_double_backward(
            op, (sp_a.data.data, b_data),
            c_grad, (sp_gga.data.data, b_grad_grad),
            atol=atol, rtol=rtol, dtype=numpy.float32)
    def check_DNSP_double_backward(self, a_data, b_data, c_grad, a_grad_grad,
                                   b_grad_grad, atol, rtol):
        sp_b = utils.to_coo(b_data)
        sp_ggb = utils.to_coo(b_grad_grad)
        func = F.math.sparse_matmul.CooMatMul(sp_b.row,
                                              sp_b.col,
                                              sp_b.shape,
                                              sp_b.order,
                                              transa=not self.transb,
                                              transb=not self.transa,
                                              transc=True)

        def op(b, a):
            return func.apply((b, a))[0]

        gradient_check.check_double_backward(op, (sp_b.data.data, a_data),
                                             c_grad,
                                             (sp_ggb.data.data, a_grad_grad),
                                             atol=atol,
                                             rtol=rtol,
                                             dtype=numpy.float32)
Esempio n. 10
0
    def check_SPDN_double_backward(self, a_data, b_data, c_grad, a_grad_grad,
                                   b_grad_grad, atol, rtol):
        sp_a = utils.to_coo(a_data)
        sp_gga = utils.to_coo(a_grad_grad)
        func = F.math.sparse_matmul.CooMatMul(sp_a.row,
                                              sp_a.col,
                                              sp_a.shape,
                                              sp_a.order,
                                              transa=self.transa,
                                              transb=self.transb,
                                              transc=False)

        def op(a, b):
            return func.apply((a, b))[0]

        gradient_check.check_double_backward(op, (sp_a.data.data, b_data),
                                             c_grad,
                                             (sp_gga.data.data, b_grad_grad),
                                             atol=atol,
                                             rtol=rtol,
                                             dtype=numpy.float32)
Esempio n. 11
0
def edge_adj_to_device_batch(edge_list, adj_list, device):
    xp = backend.get_array_module(adj_list[0])
    v_num = xp.array([i.shape[0] for i in adj_list])
    adj = xp.zeros([v_num.sum(), v_num.sum()], dtype=np.float32)
    edge = xp.zeros(
        [v_num.sum(), v_num.sum(), edge_list[0].shape[2]], dtype=np.float32)
    v_num = xp.cumsum(v_num)
    for o, n, a, e in zip(xp.array([0, *v_num[:-1]]), v_num, adj_list,
                          edge_list):
        adj[o:n, o:n] = a
        edge[o:n, o:n, :] = e
    edge = edge.reshape([edge.shape[0]**2, edge.shape[2]])
    ones = xp.ones(adj.shape[0], dtype=np.float32)
    num_array = xp.sum(adj, axis=1)
    num_array = xp.where(num_array == 0, ones, num_array)
    num_array = num_array.reshape([num_array.shape[0], 1])
    num_array = _to_device(device=device, x=num_array)
    edge = _to_device(device=device, x=edge)
    adj = _to_device(device=device, x=adj)
    edge, adj = to_coo(edge), to_coo(adj)

    return edge, adj, num_array
Esempio n. 12
0
 def test_get_order(self):
     if self.nbatch > 0:
         x_shape = (self.nbatch, self.shape[0], self.shape[1])
     else:
         x_shape = self.shape
     x0 = _setup_tensor(.5, 1, x_shape, self.dtype, .75)
     x0 = numpy.ascontiguousarray(x0)
     if self.nbatch > 0:
         x0[0, 0, self.shape[1] - 1] = 1.
         x0[0, self.shape[0] - 1, 0] = 1.
     else:
         x0[0, self.shape[1] - 1] = 1.
         x0[self.shape[0] - 1, 0] = 1.
     sp_x = utils.to_coo(x0)
     row = sp_x.row
     col = sp_x.col
     assert utils.get_order(row, col) == 'C'
     assert utils.get_order(col, row) == 'F'
Esempio n. 13
0
 def check_DNSP_forward(self, a_data, b_data, atol=1e-4, rtol=1e-5):
     a = chainer.Variable(a_data)
     sp_b = utils.to_coo(b_data, requires_grad=True)
     c = F.sparse_matmul(a, sp_b, transa=self.transa, transb=self.transb)
     testing.assert_allclose(self.forward_answer, c.data, atol, rtol)
Esempio n. 14
0
 def check_SPDN_forward(self, a_data, b_data, atol=1e-4, rtol=1e-5):
     sp_a = utils.to_coo(a_data, requires_grad=True)
     b = chainer.Variable(b_data)
     c = F.sparse_matmul(sp_a, b, transa=self.transa, transb=self.transb)
     testing.assert_allclose(self.forward_answer, c.data, atol, rtol)