Esempio n. 1
0
    def forward(self, input_x):

        #input data dim. is batch X 1 X feature_size X time
        if not input_x.shape[1] == 1:
            exit(1)
        x = input_x.view(
            (input_x.shape[0], input_x.shape[2], input_x.shape[3]), -1)

        x = x.transpose(1, 2)
        for layer in self.layer[:-1]:
            x = layer(x)
        xb = self.layer[-1](x)

        stack = []
        for b_idx in range(xb.shape[0]):
            M = self.M_base.clone()
            for t_idx in range(xb.shape[2]):
                M = torch.addr(M, x[b_idx, :, t_idx], xb[b_idx, :, t_idx])
            stack.append(M.view(-1))
        x = torch.stack(stack)

        for layer in self.bottleneck_layer:
            x = layer(x)
        x = self.bottle_neck(x)

        self.features = self.l2_norm(x)
        # Multiply by alpha = 10 as suggested in https://arxiv.org/pdf/1703.09507.pdf
        alpha = 10
        self.features = self.features * alpha

        return self.features
Esempio n. 2
0
 def forward(ctx, add_matrix, vector1, vector2, alpha=1, beta=1, inplace=False):
     ctx.alpha = alpha
     ctx.beta = beta
     ctx.add_matrix_size = add_matrix.size()
     ctx.save_for_backward(vector1, vector2)
     output = _get_output(ctx, add_matrix, inplace=inplace)
     return torch.addr(alpha, add_matrix, beta,
                       vector1, vector2, out=output)
Esempio n. 3
0
def unbind_role_att(role_vec, att_vec, r, att):
    a = torch.addr(torch.zeros(dim, dim), role_vec, att_vec)
    un_att = torch.mul(memory_tensor, a)
    fil = torch.sum(torch.sum(un_att, 2), 1)
    orig = arr_af_arrs_of_commands[att][r]
    diff = orig - fil
    print(pure_arr_of_commands[att][r])
    print((diff).sum())
Esempio n. 4
0
 def forward(ctx, add_matrix, vector1, vector2, alpha=1, beta=1, inplace=False):
     ctx.alpha = alpha
     ctx.beta = beta
     ctx.add_matrix_size = add_matrix.size()
     ctx.save_for_backward(vector1, vector2)
     output = _get_output(ctx, add_matrix, inplace=inplace)
     return torch.addr(alpha, add_matrix, beta,
                       vector1, vector2, out=output)
Esempio n. 5
0
 def forward(self, add_matrix, vector1, vector2):
     self.save_for_backward(vector1, vector2)
     output = self._get_output(add_matrix)
     return torch.addr(self.alpha,
                       add_matrix,
                       self.beta,
                       vector1,
                       vector2,
                       out=output)
Esempio n. 6
0
    def test_outer_ger_addr_legacy_tests(self, device):
        for size in ((0, 0), (0, 5), (5, 0)):
            a = torch.rand(size[0], device=device)
            b = torch.rand(size[1], device=device)

            self.assertEqual(torch.outer(a, b).shape, size)
            self.assertEqual(torch.ger(a, b).shape, size)

            m = torch.empty(size, device=device)
            self.assertEqual(torch.addr(m, a, b).shape, size)

        m = torch.randn(5, 6, device=device)
        a = torch.randn(5, device=device)
        b = torch.tensor(6, device=device)
        self.assertRaises(RuntimeError, lambda: torch.outer(a, b))
        self.assertRaises(RuntimeError, lambda: torch.outer(b, a))
        self.assertRaises(RuntimeError, lambda: torch.ger(a, b))
        self.assertRaises(RuntimeError, lambda: torch.ger(b, a))
        self.assertRaises(RuntimeError, lambda: torch.addr(m, a, b))
        self.assertRaises(RuntimeError, lambda: torch.addr(m, b, a))
Esempio n. 7
0
 def blas_lapack_ops(self):
     m = torch.randn(3, 3)
     a = torch.randn(10, 3, 4)
     b = torch.randn(10, 4, 3)
     v = torch.randn(3)
     return (
         torch.addbmm(m, a, b),
         torch.addmm(torch.randn(2, 3), torch.randn(2, 3),
                     torch.randn(3, 3)),
         torch.addmv(torch.randn(2), torch.randn(2, 3), torch.randn(3)),
         torch.addr(torch.zeros(3, 3), v, v),
         torch.baddbmm(m, a, b),
         torch.bmm(a, b),
         torch.chain_matmul(torch.randn(3, 3), torch.randn(3, 3),
                            torch.randn(3, 3)),
         # torch.cholesky(a), # deprecated
         torch.cholesky_inverse(torch.randn(3, 3)),
         torch.cholesky_solve(torch.randn(3, 3), torch.randn(3, 3)),
         torch.dot(v, v),
         torch.eig(m),
         torch.geqrf(a),
         torch.ger(v, v),
         torch.inner(m, m),
         torch.inverse(m),
         torch.det(m),
         torch.logdet(m),
         torch.slogdet(m),
         torch.lstsq(m, m),
         torch.lu(m),
         torch.lu_solve(m, *torch.lu(m)),
         torch.lu_unpack(*torch.lu(m)),
         torch.matmul(m, m),
         torch.matrix_power(m, 2),
         # torch.matrix_rank(m),
         torch.matrix_exp(m),
         torch.mm(m, m),
         torch.mv(m, v),
         # torch.orgqr(a, m),
         # torch.ormqr(a, m, v),
         torch.outer(v, v),
         torch.pinverse(m),
         # torch.qr(a),
         torch.solve(m, m),
         torch.svd(a),
         # torch.svd_lowrank(a),
         # torch.pca_lowrank(a),
         # torch.symeig(a), # deprecated
         # torch.lobpcg(a, b), # not supported
         torch.trapz(m, m),
         torch.trapezoid(m, m),
         torch.cumulative_trapezoid(m, m),
         # torch.triangular_solve(m, m),
         torch.vdot(v, v),
     )
Esempio n. 8
0
        def run_test_case(m, a, b, beta=1, alpha=1):
            if dtype == torch.bfloat16:
                a_np = a.to(torch.double).cpu().numpy()
                b_np = b.to(torch.double).cpu().numpy()
                m_np = m.to(torch.double).cpu().numpy()
            else:
                a_np = a.cpu().numpy()
                b_np = b.cpu().numpy()
                m_np = m.cpu().numpy()

            if beta == 0:
                expected = alpha * np.outer(a_np, b_np)
            else:
                expected = beta * m_np + alpha * np.outer(a_np, b_np)

            self.assertEqual(torch.addr(m, a, b, beta=beta, alpha=alpha), expected)
            self.assertEqual(torch.Tensor.addr(m, a, b, beta=beta, alpha=alpha), expected)

            result_dtype = torch.addr(m, a, b, beta=beta, alpha=alpha).dtype
            out = torch.empty_like(m, dtype=result_dtype)
            torch.addr(m, a, b, beta=beta, alpha=alpha, out=out)
            self.assertEqual(out, expected)
Esempio n. 9
0
    def get_mats_from_arr(self, arr, command_number):
        arr_of_mats = []
        arr_of_diags = []
        arr_of_symbols_for_command = []
        arr_of_commands = []
        final_mat = torch.zeros(
            self.dim,
            self.dim)  # missing: matrix of zeros of dimension dim x dim
        nodes_traversed = 0
        for i, el in enumerate(arr):
            vec_for_role = self.dict_of_role_vectors[arr_of_roles[i]]

            if isinstance(el,
                          list):  # missing: check if type of element is array
                command_number += 1
                pointer_name = "T" + str(command_number)
                diags, mats, commands, nodes_traversed_in_this_tree = self.get_mats_from_arr(
                    el, command_number)
                arr_of_mats.append(mats)
                arr_of_diags.append(diags)
                nodes_traversed += nodes_traversed_in_this_tree
                command_number += nodes_traversed - 1
                arr_of_symbols_for_command.append(pointer_name)
                arr_of_commands.append(commands)
                self.dict_of_pointers[pointer_name] = diags[0]
                vec_for_el = diags[0]
            else:  # it's atom
                vec_for_el = self.dict_of_atom_vectors[el]
                arr_of_symbols_for_command.append(el)
            final_mat += torch.addr(
                torch.zeros(dim, dim), vec_for_el,
                vec_for_role)  # missing: outer product of role and element
        diag_of_final_mat = torch.diag(
            final_mat)  # missing: get diagonal of matrix
        normed_diag_of_final_mat = torch.div(diag_of_final_mat,
                                             torch.norm(diag_of_final_mat,
                                                        2))  # normalize it
        arr_of_mats.insert(0, final_mat)  # prepend to the array
        arr_of_diags.insert(0, normed_diag_of_final_mat)
        arr_of_commands.insert(0, arr_of_symbols_for_command)
        return arr_of_diags, arr_of_mats, arr_of_commands, nodes_traversed + 1
Esempio n. 10
0
 def _update(self, s, y, rho_inv):
     rho = rho_inv.reciprocal()
     if self.inverse:
         if self.n_updates == 0:
             self.H.mul_(rho_inv / y.dot(y))
         torch.addr(torch.chain_matmul(torch.addr(self.I, s, y, alpha=-rho),
                                       self.H,
                                       torch.addr(self.I, y, s,
                                                  alpha=-rho)),
                    s,
                    s,
                    alpha=rho,
                    out=self.H)
     else:
         if self.n_updates == 0:
             self.B.mul_(rho * y.dot(y))
         Bs = torch.mv(self.B, s)
         torch.addr(torch.addr(self.B, y, y, alpha=rho),
                    Bs,
                    Bs,
                    alpha=s.dot(Bs).reciprocal().neg(),
                    out=self.B)
Esempio n. 11
0
 def forward(self):
     return torch.addr(self.input_one, self.vec1, self.vec2)
Esempio n. 12
0
# Dot product of 2 tensors
r = torch.dot(torch.Tensor([4, 2]), torch.Tensor([3, 1])) # 14

# Outer product of 2 vectors
# Size 3x2
v1 = torch.arange(1, 4)    # Size 3
v2 = torch.arange(1, 3)    # Size 2
r = torch.ger(v1, v2)

# Add M with outer product of 2 vectors
# Size 3x2
vec1 = torch.arange(1, 4)  # Size 3
vec2 = torch.arange(1, 3)  # Size 2
M = torch.zeros(3, 2)
r = torch.addr(M, vec1, vec2)

# Batch Matrix x Matrix
# Size 10x3x5
batch1 = torch.randn(10, 3, 4)
batch2 = torch.randn(10, 4, 5)
r = torch.bmm(batch1, batch2)

# Batch Matrix + Matrix x Matrix
# Performs a batch matrix-matrix product
# 3x4 + (5x3x4 X 5x4x2 ) -> 5x3x2
M = torch.randn(3, 2)
batch1 = torch.randn(5, 3, 4)
batch2 = torch.randn(5, 4, 2)
r = torch.addbmm(M, batch1, batch2)
Esempio n. 13
0
 def forward(self, x, y, z):
     out1 = x.addr(y, z, beta=2, alpha=3)
     out2 = torch.addr(x, y, z, beta=2, alpha=3)
     return out1, out2
Esempio n. 14
0
 def observe_reward(self, features, action_taken, reward):
     a_t = action_taken
     self.As[a_t] = torch.addr(self.As[a_t], features.squeeze(), features.squeeze())
     self.bs[a_t] += reward * features