def check_backward(self, x_data, y_data, z_grad): if self.right_const: op = lambda x: operator.matmul(x, y_data) data = (x_data,) elif self.left_const: op = lambda y: operator.matmul(x_data, y) data = (y_data,) else: op = operator.matmul data = x_data, y_data if self.dtype == numpy.float16: options = {"atol": 1e-3, "rtol": 1e-3} else: options = {"atol": 1e-4, "rtol": 1e-4} gradient_check.check_backward(op, data, z_grad, dtype=numpy.float64, **options)
def check_forward(self, x_data, y_data): if self.left_const: x = x_data else: x = chainer.Variable(x_data) if self.right_const: y = y_data else: y = chainer.Variable(y_data) z = operator.matmul(x, y) if self.dtype == numpy.float16: options = {'atol': 1e-3, 'rtol': 1e-3} else: options = {'atol': 1e-7, 'rtol': 1e-7} testing.assert_allclose( operator.matmul(self.x, self.y), z.data, **options)
def test_operator_matmul(self, xp, dtype1, dtype2): if dtype1 == numpy.float16 and dtype2 == numpy.int8: return xp.array([]) if dtype2 == numpy.float16 and dtype1 == numpy.int8: return xp.array([]) x1 = testing.shaped_arange(self.shape_pair[0], xp, dtype1) x2 = testing.shaped_arange(self.shape_pair[1], xp, dtype2) return operator.matmul(x1, x2)
def test_matmul(self): D = {'shape': self.A.shape, 'matvec': lambda x: np.dot(self.A, x).reshape(self.A.shape[0]), 'rmatvec': lambda x: np.dot(self.A.T.conj(), x).reshape(self.A.shape[1]), 'rmatmat': lambda x: np.dot(self.A.T.conj(), x), 'matmat': lambda x: np.dot(self.A, x)} A = interface.LinearOperator(**D) B = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) b = B[0] assert_equal(operator.matmul(A, b), A * b) assert_equal(operator.matmul(A, B), A * B) assert_raises(ValueError, operator.matmul, A, 2) assert_raises(ValueError, operator.matmul, 2, A)
def test_matmul(self): self.assertRaises(TypeError, operator.matmul) self.assertRaises(TypeError, operator.matmul, 42, 42) class M: def __matmul__(self, other): return other - 1 self.assertEqual(operator.matmul(M(), 42), 41)
def test_matmul(self): if not TEST_MATMUL: raise nose.SkipTest("matmul is only tested in Python 3.5+") D = { "shape": self.A.shape, "matvec": lambda x: np.dot(self.A, x).reshape(self.A.shape[0]), "rmatvec": lambda x: np.dot(self.A.T.conj(), x).reshape(self.A.shape[1]), "matmat": lambda x: np.dot(self.A, x), } A = interface.LinearOperator(**D) B = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) b = B[0] assert_equal(operator.matmul(A, b), A * b) assert_equal(operator.matmul(A, B), A * B) assert_raises(ValueError, operator.matmul, A, 2) assert_raises(ValueError, operator.matmul, 2, A)
def test_matmul(self): if not TEST_MATMUL: raise nose.SkipTest("matmul is only tested in Python 3.5+") D = {'shape': self.A.shape, 'matvec': lambda x: np.dot(self.A, x).reshape(self.A.shape[0]), 'rmatvec': lambda x: np.dot(self.A.T.conj(), x).reshape(self.A.shape[1]), 'matmat': lambda x: np.dot(self.A, x)} A = interface.LinearOperator(**D) B = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) b = B[0] assert_equal(operator.matmul(A, b), A * b) assert_equal(operator.matmul(A, B), A * B) assert_raises(ValueError, operator.matmul, A, 2) assert_raises(ValueError, operator.matmul, 2, A)
def testSparseMatmul(self): X = jnp.arange(16).reshape(4, 4) Xsp = BCOO.fromdense(X) Y = jnp.ones(4) Ysp = BCOO.fromdense(Y) # dot_general result_sparse = self.sparsify(operator.matmul)(Xsp, Y) result_dense = operator.matmul(X, Y) self.assertAllClose(result_sparse, result_dense) # rdot_general result_sparse = self.sparsify(operator.matmul)(Y, Xsp) result_dense = operator.matmul(Y, X) self.assertAllClose(result_sparse, result_dense) # spdot_general result_sparse = self.sparsify(operator.matmul)(Xsp, Ysp) result_dense = operator.matmul(X, Y) self.assertAllClose(result_sparse.todense(), result_dense)
def check_backward(self, x_data, y_data, z_grad): if self.right_const: op = lambda x: operator.matmul(x, y_data) data = x_data, elif self.left_const: op = lambda y: operator.matmul(x_data, y) data = y_data, else: op = operator.matmul data = x_data, y_data if self.dtype == numpy.float16: options = {'atol': 1e-3, 'rtol': 1e-3} else: options = {'atol': 1e-4, 'rtol': 1e-4} gradient_check.check_backward(op, data, z_grad, dtype=numpy.float64, **options)
def test_dot(a_shape, b_shape, a_format, b_format, a_comp_axes, b_comp_axes): if a_format == "coo" or len(a_shape) == 1: a_comp_axes = None if b_format == "coo" or len(b_shape) == 1: b_comp_axes = None sa = sparse.random( a_shape, density=0.5, format=a_format, compressed_axes=a_comp_axes ) sb = sparse.random( b_shape, density=0.5, format=b_format, compressed_axes=b_comp_axes ) a = sa.todense() b = sb.todense() assert_eq(a.dot(b), sa.dot(sb)) assert_eq(np.dot(a, b), sparse.dot(sa, sb)) assert_eq(sparse.dot(sa, b), sparse.dot(a, sb)) assert_eq(np.dot(a, b), sparse.dot(sa, sb)) # Basic equivalences assert_eq(operator.matmul(a, b), operator.matmul(sa, sb))
def check_forward(self, x_data, y_data): if self.left_const: x = x_data else: x = chainer.Variable(x_data) if self.right_const: y = y_data else: y = chainer.Variable(y_data) z = operator.matmul(x, y) if self.dtype == numpy.float16: options = {'atol': 1e-3, 'rtol': 1e-3} else: options = {'atol': 1e-7, 'rtol': 1e-7} testing.assert_allclose(self.x.dot(self.y), z.data, **options)
def test_nb_ops_binary(self): import operator mod = self.make_module(r""" @DEFINE_PointObject #define MYSLOT(NAME) \ HPyDef_SLOT(p_##NAME, NAME##_impl, HPy_nb_##NAME); \ static HPy NAME##_impl(HPyContext ctx, HPy self, HPy other) \ { \ HPy s = HPyUnicode_FromString(ctx, #NAME); \ HPy res = HPyTuple_Pack(ctx, 3, self, s, other); \ HPy_Close(ctx, s); \ return res; \ } MYSLOT(add) MYSLOT(and) MYSLOT(divmod) MYSLOT(floor_divide) MYSLOT(lshift) MYSLOT(multiply) MYSLOT(or) MYSLOT(remainder) MYSLOT(rshift) MYSLOT(subtract) MYSLOT(true_divide) MYSLOT(xor) MYSLOT(matrix_multiply) @EXPORT_POINT_TYPE(&p_add, &p_and, &p_divmod, &p_floor_divide, &p_lshift, &p_multiply, &p_or, &p_remainder, &p_rshift, &p_subtract, &p_true_divide, &p_xor, &p_matrix_multiply) @INIT """) p = mod.Point() assert p + 42 == (p, "add", 42) assert p & 42 == (p, "and", 42) assert divmod(p, 42) == (p, "divmod", 42) assert p // 42 == (p, "floor_divide", 42) assert p << 42 == (p, "lshift", 42) assert p * 42 == (p, "multiply", 42) assert p | 42 == (p, "or", 42) assert p % 42 == (p, "remainder", 42) assert p >> 42 == (p, "rshift", 42) assert p - 42 == (p, "subtract", 42) assert p / 42 == (p, "true_divide", 42) assert p ^ 42 == (p, "xor", 42) # we can't use '@' because we want to be importable on py27 assert operator.matmul(p, 42) == (p, "matrix_multiply", 42)
def matmul( self, y: Union[int, float, torch.Tensor, "ReplicatedSharedTensor"] ) -> "ReplicatedSharedTensor": """Apply the "matmul" operation between "self" and "y". Args: y (Union[int, float, torch.Tensor, "ReplicatedSharedTensor"]): self@y Returns: ReplicatedSharedTensor: Result of the operation. Raises: ValueError: Raised when private matmul is performed parties!=3. """ y_tensor, session = self.sanity_checks(self, y) is_private = isinstance(y, ReplicatedSharedTensor) op_str = "matmul" if is_private: if session.nr_parties == 3: from sympc.protocol import Falcon result = [ Falcon.multiplication_protocol(self, y_tensor, op_str) ] else: raise ValueError( "Private matmul between ReplicatedSharedTensors is allowed only for 3 parties" ) else: result = [ operator.matmul(share, y_tensor.shares[0]) for share in self.shares ] tensor = ReplicatedSharedTensor(ring_size=self.ring_size, session_uuid=self.session_uuid, config=self.config) tensor.shares = result return tensor
def test_operator_matmul(self, xp, dtype1, dtype2): x1 = testing.shaped_arange(self.shape_pair[0], xp, dtype1) x2 = testing.shaped_arange(self.shape_pair[1], xp, dtype2) return operator.matmul(x1, x2)
def op(x, y): z = operator.matmul(x, y) return z * z
def op(x): z = operator.matmul(x, y_data) return z * z
def op(x): return operator.matmul(x, y_data)
def _get_immeditate_energies_avaialable_jit(spins, matrix, bias): return -(2 * spins * (matmul(matrix, spins) + bias))
def _calculate_energy_change(new_spins, matrix, bias, action): return 2 * new_spins[action] * ( matmul(new_spins.T, matrix[:, action]) + bias[action])
def _get_immeditate_energies_avaialable_jit(spins, matrix): return 2 * spins * matmul(matrix, spins)
def op(y): z = operator.matmul(x_data, y) return z * z
def op(y): return operator.matmul(x_data, y)
def matmul_usecase(x, y): return operator.matmul(x, y)
def test_matmul(self): other = mock.MagicMock() self.assertEqual(operator.matmul(self.proxy, other), operator.matmul(self.obj, other)) self.assertEqual(self.proxy.__matmul__(other), operator.matmul(self.obj, other))
def op(x): z = operator.matmul(x, y_data.astype(x.dtype)) return z * z
def _calculate_energy_jit(spins, matrix): return -matmul(spins.T, matmul(matrix, spins)) / 2
def _get_immeditate_cuts_avaialable_jit(spins, matrix): return spins * matmul(matrix, spins)
def test_operator_matmul3(self, xp): x1 = testing.shaped_arange(self.shape_pair[0], xp, numpy.int8) x2 = testing.shaped_arange(self.shape_pair[1], xp, numpy.float16) return operator.matmul(x1, x2)
def _calculate_energy_jit(spins, matrix, bias): return matmul(spins.T, matmul(matrix, spins)) / 2 + matmul( spins.T, bias)
def op(y): return operator.matmul(x_data.astype(y.dtype), y)
def matmul(a, b): if bpy.app.version < (2, 80): return a * b else: return operator.matmul(a, b) # a @ b
def op(x): return operator.matmul(x, y_data.astype(x.dtype))
def __rmatmul__(self, other): proxiee = _get_proxiee(self) _logger.debug("__rmatmul__ on proxiee (%r)", proxiee) # NOTE: this is equivalent to ``proxiee @ other`` but we cannot use # this syntax as long as 3.4 and earlier have to be supported. return operator.matmul(other, proxiee)
def test_matmul(self): # matmul test is for GH #10259 a = Series(np.random.randn(4), index=["p", "q", "r", "s"]) b = DataFrame( np.random.randn(3, 4), index=["1", "2", "3"], columns=["p", "q", "r", "s"] ).T # Series @ DataFrame -> Series result = operator.matmul(a, b) expected = Series(np.dot(a.values, b.values), index=["1", "2", "3"]) tm.assert_series_equal(result, expected) # DataFrame @ Series -> Series result = operator.matmul(b.T, a) expected = Series(np.dot(b.T.values, a.T.values), index=["1", "2", "3"]) tm.assert_series_equal(result, expected) # Series @ Series -> scalar result = operator.matmul(a, a) expected = np.dot(a.values, a.values) tm.assert_almost_equal(result, expected) # GH 21530 # vector (1D np.array) @ Series (__rmatmul__) result = operator.matmul(a.values, a) expected = np.dot(a.values, a.values) tm.assert_almost_equal(result, expected) # GH 21530 # vector (1D list) @ Series (__rmatmul__) result = operator.matmul(a.values.tolist(), a) expected = np.dot(a.values, a.values) tm.assert_almost_equal(result, expected) # GH 21530 # matrix (2D np.array) @ Series (__rmatmul__) result = operator.matmul(b.T.values, a) expected = np.dot(b.T.values, a.values) tm.assert_almost_equal(result, expected) # GH 21530 # matrix (2D nested lists) @ Series (__rmatmul__) result = operator.matmul(b.T.values.tolist(), a) expected = np.dot(b.T.values, a.values) tm.assert_almost_equal(result, expected) # mixed dtype DataFrame @ Series a["p"] = int(a.p) result = operator.matmul(b.T, a) expected = Series(np.dot(b.T.values, a.T.values), index=["1", "2", "3"]) tm.assert_series_equal(result, expected) # different dtypes DataFrame @ Series a = a.astype(int) result = operator.matmul(b.T, a) expected = Series(np.dot(b.T.values, a.T.values), index=["1", "2", "3"]) tm.assert_series_equal(result, expected) msg = r"Dot product shape mismatch, \(4,\) vs \(3,\)" # exception raised is of type Exception with pytest.raises(Exception, match=msg): a.dot(a.values[:3]) msg = "matrices are not aligned" with pytest.raises(ValueError, match=msg): a.dot(b.T)
def test_invalid_type(self): x = chainer.Variable(self.x) y = chainer.Variable(self.y) with pytest.raises(type_check.InvalidType): operator.matmul(x, y)
def cos_sim(v1, v2): norm = np.linalg.norm(v1) * np.linalg.norm(v2) return matmul(v1, v2) / norm if norm else -1
def test_operator_matmul(self, xp, dtype1, dtype2): if not numpy.result_type(dtype1, dtype2) == numpy.float32: return xp.array([]) x1 = xp.array(self.x1, dtype=dtype1) x2 = xp.array(self.x2, dtype=dtype2) return operator.matmul(x1, x2)
def test_matmul(self): # matmul test is for GH#10259 a = DataFrame(np.random.randn(3, 4), index=["a", "b", "c"], columns=["p", "q", "r", "s"]) b = DataFrame(np.random.randn(4, 2), index=["p", "q", "r", "s"], columns=["one", "two"]) # DataFrame @ DataFrame result = operator.matmul(a, b) expected = DataFrame(np.dot(a.values, b.values), index=["a", "b", "c"], columns=["one", "two"]) tm.assert_frame_equal(result, expected) # DataFrame @ Series result = operator.matmul(a, b.one) expected = Series(np.dot(a.values, b.one.values), index=["a", "b", "c"]) tm.assert_series_equal(result, expected) # np.array @ DataFrame result = operator.matmul(a.values, b) assert isinstance(result, DataFrame) assert result.columns.equals(b.columns) assert result.index.equals(Index(range(3))) expected = np.dot(a.values, b.values) tm.assert_almost_equal(result.values, expected) # nested list @ DataFrame (__rmatmul__) result = operator.matmul(a.values.tolist(), b) expected = DataFrame(np.dot(a.values, b.values), index=["a", "b", "c"], columns=["one", "two"]) tm.assert_almost_equal(result.values, expected.values) # mixed dtype DataFrame @ DataFrame a["q"] = a.q.round().astype(int) result = operator.matmul(a, b) expected = DataFrame(np.dot(a.values, b.values), index=["a", "b", "c"], columns=["one", "two"]) tm.assert_frame_equal(result, expected) # different dtypes DataFrame @ DataFrame a = a.astype(int) result = operator.matmul(a, b) expected = DataFrame(np.dot(a.values, b.values), index=["a", "b", "c"], columns=["one", "two"]) tm.assert_frame_equal(result, expected) # unaligned df = DataFrame(np.random.randn(3, 4), index=[1, 2, 3], columns=range(4)) df2 = DataFrame(np.random.randn(5, 3), index=range(5), columns=[1, 2, 3]) with pytest.raises(ValueError, match="aligned"): operator.matmul(df, df2)
def _calculate_energy_change(new_spins, matrix, action): return -2 * new_spins[action] * matmul(new_spins.T, matrix[:, action])
def _calculate_cut_change(new_spins, matrix, action): return -1 * new_spins[action] * matmul(new_spins.T, matrix[:, action])
def op(y): z = operator.matmul(x_data.astype(y.dtype), y) return z * z