예제 #1
0
    def test_kwarg_only(self) -> None:
        with capture_logs() as logs:
            x = LoggingTensor(torch.ones(1))
            y = LoggingTensor(torch.ones(1, 1))
            z = LoggingTensor(torch.ones(1))
            log_input("x", x)
            log_input("y", y)
            log_input("z", z)
            torch.addmv(x, y, z)
            torch.addmv(x, y, z, beta=1)
            torch.addmv(x, y, z, beta=2)
            torch.addmv(x, y, z, alpha=2)
            torch.addmv(x, y, z, beta=2, alpha=2)

        # The expectation is that beta/alpha don't show up when they're
        # defaulted.  This is even if the user explicitly specified it.
        self.assertExpectedInline('\n'.join(logs), '''\
$0 = input('x')
$1 = input('y')
$2 = input('z')
$3 = torch._ops.aten.addmv($0, $1, $2)
$4 = torch._ops.aten.addmv($0, $1, $2)
$5 = torch._ops.aten.addmv($0, $1, $2, beta=2)
$6 = torch._ops.aten.addmv($0, $1, $2, alpha=2)
$7 = torch._ops.aten.addmv($0, $1, $2, beta=2, alpha=2)''')
예제 #2
0
    def forward(self, input_, hx=None):
        """
        An Elman RNN cell with tanh or ReLU non-linearity.
        h' = tanh/relu(w_{ih} x + b_{ih}  +  w_{hh} h + b_{hh})
        """
        # print(self.d_rec)

        if hx is None:
            hx = input_.new_zeros(self.hidden_size, requires_grad=False)

        c_prev = hx

        w_x = torch.addmv(self.bias_ih, self.weight_ih, input_)
        w_h = torch.addmv(self.bias_hh, self.weight_hh, hx)
        w_w = (w_x + w_h)

        c_tilda = self.tanh(w_w[0:self.hidden_size])
        gate = self.sigmoid(w_w[self.hidden_size:2 * self.hidden_size])

        # inp = torch.mv(self.weight_ir, input_)
        # prevh = torch.mv(self.weight_hr, (r * hx))

        c = ((1 - gate) * c_prev) + (gate * c_tilda)

        h = c

        return h, gate
예제 #3
0
def native_update(Beta,
                  Delta,
                  Sigma,
                  nonnegative=True,
                  batch=None):
    """ Native Pytorch Implementation Of HALS Update

    Parameter:
        Beta:
        Delta:
        Sigma:
        nonnegative:
        batch: batch size

    """

    # TODO Implement Batching To Mitigate GPU Bottleneck
    for ndx in range(Beta.shape[0]):
        tmp_scale = 1.0 / Sigma[ndx, ndx].item()
        torch.addmv(Delta[ndx],
                    Beta.t(),
                    Sigma[ndx],
                    alpha=-1 * tmp_scale,
                    beta=tmp_scale,
                    out=Delta[ndx])
        Beta[ndx].add_(Delta[ndx])
        if nonnegative:
            torch.nn.functional.relu(Beta[ndx],
                                     inplace=True)
예제 #4
0
    def forward(self, input_, hx=None):
        """
            begin{array}{ll}
            i = sigma(W_{ii} x + b_{ii} + W_{hi} h + b_{hi}) \\
            f = sigma(W_{if} x + b_{if} + W_{hf} h + b_{hf}) \\
            g = tanh(W_{ig} x + b_{ig} + W_{hg} h + b_{hg}) \\
            o = sigma(W_{io} x + b_{io} + W_{ho} h + b_{ho}) \\
            c' = f * c + i * g \\
            h' = o * tanh(c') \\
            end{array}
        """
        if hx is None:
            hx = input_.new_zeros(self.hidden_size, requires_grad=False)
            hx = (hx, hx)

        use_gate = rectify(self.igate)
        if (self.igate > 1.0):
            #     use_gate = self.igate - 1.0
            print('ho')

        hprev, cprev = hx
        w_x = torch.addmv(self.bias_ih, self.weight_ih, input_)
        w_h = torch.addmv(self.bias_hh, self.weight_hh, hprev)
        w_w = w_x + w_h

        i = self.sigmoid(w_w[0:self.hidden_size])
        f = self.sigmoid(w_w[self.hidden_size:2 * self.hidden_size])
        o = self.sigmoid(w_w[2 * self.hidden_size:3 * self.hidden_size])
        g = self.tanh(w_w[3 * self.hidden_size:4 * self.hidden_size])

        c = (f * cprev) + (i * g)
        h = o * self.tanh(c)

        return (h, c), o
예제 #5
0
    def forward(self, input_, hx=None):
        """
        An Elman RNN cell with tanh or ReLU non-linearity.
        h' = tanh/relu(w_{ih} x + b_{ih}  +  w_{hh} h + b_{hh})
        """
        # print(self.d_rec)
        # print (self.rgate)

        if hx is None:
            hx = input_.new_zeros(self.hidden_size, requires_grad=False)

        #dale_hh = torch.mm(self.relu(self.weight_hh), self.d_rec)

        if (self.bias):
            w_x = torch.addmv(self.bias_ih, self.weight_ih, input_)
            #w_h = torch.addmv(self.bias_hh, dale_hh, hx)
            w_h = torch.addmv(self.bias_hh, self.weight_hh, hx)

        else:
            w_x = torch.mv(self.weight_ih, input_)
            #w_h = torch.mv(dale_hh, hx)
            w_h = torch.mv(self.weight_hh, hx)

        w_w = ((self.rgate) * hx) + ((1 - (self.rgate)) * (w_x + w_h))

        h = self.relu(w_w)

        return h
예제 #6
0
    def get_elig(self, task_indicator_pm):
        ANDmat = self.ANDmat
        b_AND = self.b_AND
        ORmat = self.ORmat # nb_or x
        b_OR = self.b_OR

        indicator = task_indicator_pm.type(torch.float)

        ANDout = torch.addmv(-b_AND, ANDmat, indicator).sign().ne(-1).type(torch.float) #sign(A x indic + b) (+1 or 0)
        elig_hard = torch.addmv(-b_OR, ORmat, ANDout).sign().ne(-1)
        return elig_hard
예제 #7
0
파일: blas.py 프로젝트: Northrend/pytorch
 def forward(ctx, add_vector, matrix, vector, alpha=1, beta=1, inplace=False):
     ctx.alpha = alpha
     ctx.beta = beta
     ctx.add_vector_size = add_vector.size()
     ctx.save_for_backward(matrix, vector)
     output = _get_output(ctx, add_vector, inplace=inplace)
     return torch.addmv(alpha, add_vector, beta,
                        matrix, vector, out=output)
예제 #8
0
 def forward(ctx, add_vector, matrix, vector, alpha=1, beta=1, inplace=False):
     ctx.alpha = alpha
     ctx.beta = beta
     ctx.add_vector_size = add_vector.size()
     ctx.save_for_backward(matrix, vector)
     output = _get_output(ctx, add_vector, inplace=inplace)
     return torch.addmv(alpha, add_vector, beta,
                        matrix, vector, out=output)
예제 #9
0
 def forward(self, add_vector, matrix, vector):
     self.save_for_backward(matrix, vector)
     output = self._get_output(add_vector)
     return torch.addmv(self.alpha,
                        add_vector,
                        self.beta,
                        matrix,
                        vector,
                        out=output)
예제 #10
0
파일: base.py 프로젝트: hfxunlp/transformer
	def forward(self, x):

		xsize = x.size()

		out = torch.addmv(self.bias, x.view(-1, xsize[-1]), self.w) if self.bias else x.view(-1, xsize[-1]).mv(self.w)

		rsize = list(xsize)
		rsize[-1] = 1

		return out.view(rsize)
예제 #11
0
    def forward(self, x):
        xsize = x.size()

        out = torch.addmv(self.bias, x.view(-1, xsize[-1]), self.w)

        xsize = list(xsize)
        xsize[-1] = 1

        return ((torch.abs(self.k) + self.minv) *
                (self.act(out) + 1)).view(xsize)
예제 #12
0
파일: math_ops.py 프로젝트: malfet/pytorch
 def blas_lapack_ops(self):
     m = torch.randn(3, 3)
     a = torch.randn(10, 3, 4)
     b = torch.randn(10, 4, 3)
     v = torch.randn(3)
     return (
         torch.addbmm(m, a, b),
         torch.addmm(torch.randn(2, 3), torch.randn(2, 3),
                     torch.randn(3, 3)),
         torch.addmv(torch.randn(2), torch.randn(2, 3), torch.randn(3)),
         torch.addr(torch.zeros(3, 3), v, v),
         torch.baddbmm(m, a, b),
         torch.bmm(a, b),
         torch.chain_matmul(torch.randn(3, 3), torch.randn(3, 3),
                            torch.randn(3, 3)),
         # torch.cholesky(a), # deprecated
         torch.cholesky_inverse(torch.randn(3, 3)),
         torch.cholesky_solve(torch.randn(3, 3), torch.randn(3, 3)),
         torch.dot(v, v),
         torch.eig(m),
         torch.geqrf(a),
         torch.ger(v, v),
         torch.inner(m, m),
         torch.inverse(m),
         torch.det(m),
         torch.logdet(m),
         torch.slogdet(m),
         torch.lstsq(m, m),
         torch.lu(m),
         torch.lu_solve(m, *torch.lu(m)),
         torch.lu_unpack(*torch.lu(m)),
         torch.matmul(m, m),
         torch.matrix_power(m, 2),
         # torch.matrix_rank(m),
         torch.matrix_exp(m),
         torch.mm(m, m),
         torch.mv(m, v),
         # torch.orgqr(a, m),
         # torch.ormqr(a, m, v),
         torch.outer(v, v),
         torch.pinverse(m),
         # torch.qr(a),
         torch.solve(m, m),
         torch.svd(a),
         # torch.svd_lowrank(a),
         # torch.pca_lowrank(a),
         # torch.symeig(a), # deprecated
         # torch.lobpcg(a, b), # not supported
         torch.trapz(m, m),
         torch.trapezoid(m, m),
         torch.cumulative_trapezoid(m, m),
         # torch.triangular_solve(m, m),
         torch.vdot(v, v),
     )
예제 #13
0
 def init_fusion(self):
     print("Fusing BN-FC")
     bn_weight_var = torch.mul(
         self.batch_norm.weight.data,
         torch.rsqrt(self.batch_norm.running_var + self.batch_norm.eps))
     bias_coeff = self.batch_norm.bias.data - torch.mul(
         self.batch_norm.running_mean, bn_weight_var)
     self.linear.bias.data = torch.addmv(self.linear.bias.data,
                                         self.linear.weight.data,
                                         bias_coeff)
     self.linear.weight.data = self.linear.weight.data * bn_weight_var.expand_as(
         self.linear.weight.data)
예제 #14
0
    def forward(self, input_, hx = None):
        """
        An Elman RNN cell with tanh or ReLU non-linearity.
        h' = tanh/relu(w_{ih} x + b_{ih}  +  w_{hh} h + b_{hh})
        """
        # print(self.d_rec)

        if hx is None:
            hx = input_.new_zeros(self.hidden_size, requires_grad=False)

        w_x = torch.addmv(self.bias_ih, self.weight_ih, input_)
        w_h = torch.addmv(self.bias_hh, self.weight_hh, hx)
        w_w = (w_x + w_h)

        z = self.sigmoid(w_w[0 : self.hidden_size])
        r = self.sigmoid(w_w[self.hidden_size : 2*self.hidden_size])

        inp = torch.mv(self.weight_ir, input_)
        prevh = torch.mv(self.weight_hr, (r * hx))

        h = ((1 - z) * hx) + (z * self.relu(inp + prevh + self.bias_r))

        return h, z, r
예제 #15
0
    def forward(self, input_, hx=None):

        if hx is None:
            hx = input_.new_zeros(self.hidden_size, requires_grad=False)

        # dale_hh = torch.mm(self.relu(self.weight_hh), self.d_rec)
        if (self.bias):
            w_x = torch.addmv(self.bias_ih, self.weight_ih, input_)
        else:
            w_x = torch.mv(self.weight_ih, input_)

        w_w = ((self.rgate) * hx) + ((1 - (self.rgate)) * (w_x))

        h = self.relu(w_w)

        return h
예제 #16
0
    def exact_predictive_mean(self, test_mean, test_train_covar):
        """
        Computes the posterior predictive covariance of a GP

        Args:
            test_mean (:obj:`torch.tensor`): The test prior mean
            test_train_covar (:obj:`gpytorch.lazy.LazyTensor`): Covariance matrix between test and train inputs

        Returns:
            :obj:`torch.tensor`: The predictive posterior mean of the test points
        """
        # For efficiency - we can use addmv in the 2d case
        if test_train_covar.dim() == 2:
            res = torch.addmv(test_mean, delazify(test_train_covar), self.mean_cache)
        # In other cases - we'll use the standard infrastructure
        else:
            res = (test_train_covar @ self.mean_cache.unsqueeze(-1)).squeeze(-1)
            res = res + test_mean
        return res
예제 #17
0
    def step(self, A_batch, b_batch):
        # helper variables
        x = self._x
        step_size = self._step_size
        m = A_batch.shape[0]  # number of rows = batch size

        # compute linear system coefficients
        P_batch = torch.addmm(torch.eye(m, dtype=A_batch.dtype), A_batch, A_batch.t(), beta=m, alpha=step_size)
        rhs = torch.addmv(b_batch, A_batch, x)

        # solve positive-definite linear system using Cholesky factorization
        P_factor = torch.cholesky(P_batch)
        rhs_chol = rhs.unsqueeze(1)
        s_star = torch.cholesky_solve(rhs_chol, P_factor)

        # perform step
        step_dir = torch.mm(A_batch.t(), s_star)
        x.sub_(step_size * step_dir.reshape(x.shape))

        # return the losses w.r.t the params before making the step
        return 0.5 * (rhs ** 2)
예제 #18
0
 print(torch.clamp(x, min=0.5))
 print(torch.clamp(x, max=0.5))
 print(torch.trunc(x))  # truncated integer values
 print(torch.frac(x))  # fractional portion of each element
 print(x.add(1))
 print(torch.exp(x))
 print(torch.expm1(x))
 print(torch.logit(x))
 print(torch.mul(x, 100))
 print(torch.addcdiv(t, t1, t2, value=0.1))  # t + value * t1 / t2
 print(torch.addcmul(t, t1, t2, value=0.1))  # t + value * t1 * t2
 print(torch.addmm(M, mat1, mat2))  # beta * M + alpha * mat1 * mat2
 print(torch.matmul(mat1, mat2))  # mat1 * mat2
 print(torch.mm(mat1, mat2))  # mat1 * mat2
 print(torch.matrix_power(mat1, 2))  # mat1 * mat1
 print(torch.addmv(x, mat1, x))  # β x+α (mat * x)
 print(torch.mv(mat1, x))  # mat * vec
 print(torch.outer(x, x))  # vec1⊗vec2
 print(torch.renorm(mat1, 1, 0, 5))
 input_ = torch.tensor([10000., 1e-07])
 other_ = torch.tensor([10000.1, 1e-08])
 print(torch.floor_divide(input_, other_))  # trunc(input_ / other_)
 print(torch.allclose(input_, other_))  # ∣input−other∣≤atol+rtol×∣other∣
 print(torch.isclose(input_, other_))  # ∣input−other∣≤atol+rtol×∣other∣
 print(mat1)
 print(torch.where(mat1 > 0, mat1, -mat1))
 print(torch.amax(mat1, 0))  # 按列
 print(torch.amax(mat1, 1))  # 按行
 print(torch.max(mat1, 0))  # 按列
 print(torch.max(mat1, 1))  # 按行
 print(torch.argmax(mat1))  # 所有元素
예제 #19
0
#                        8. Matrix, vector multiplication
# ================================================================== #

# Dot product
dot_product_result = torch.dot(torch.Tensor([4, 2]), torch.Tensor([3, 1]))

# Matrix X vector
mat = torch.randn(2, 4)
vec = torch.randn(4)
result_1 = torch.mv(mat, vec)

# Matrix + Matrix X vector
M = torch.randn(2)
mat = torch.randn(2, 3)
vec = torch.randn(3)
result_2 = torch.addmv(M, mat, vec)

# Matrix, Matrix products
mat1 = torch.randn(2, 3)
mat2 = torch.randn(3, 4)
result = torch.mm(mat1, mat2)

# Outer product of vectors
v1 = torch.arange(1, 4)    # Size 3
v2 = torch.arange(1, 3)    # Size 2
result = torch.ger(v1, v2)

"""
Other matrix Operations
**************************************** 
torch.cross           - cross product
 def exact_posterior_mean(self, test_mean, alpha):
     if isinstance(self.var, LazyVariable):
         return self.var.matmul(alpha) + test_mean
     return torch.addmv(test_mean, self.var, alpha)
예제 #21
0
#上三角矩阵
torch.triu(a)

#对矩阵mat1和mat2进行矩阵乘操作。矩阵mat加到最终结果。如果mat1 是一个 n×m张量,mat2 是一个 m×p张量,
# 那么out和mat的形状为n×p。 alpha 和 beta 分别是两个矩阵 mat1@mat2和mat的比例因子,即out=(beta∗M)+(alpha∗mat1@mat2)
M = torch.ones(2, 2)
mat1 = torch.Tensor([[1, 2], [3, 4]])
mat2 = torch.Tensor([[1, 2], [3, 4]])
torch.addmm(M, mat1, mat2)

#torch.addmv,对矩阵mat和向量vec对进行相乘操作。向量tensor加到最终结果。如果mat 是一个 n×m维矩阵,vec 是一个 m维向量,
# 那么out和mat的为n元向量。 可选参数_alpha_ 和 beta 分别是 mat∗vec和mat的比例因子,即out=(beta∗tensor)+(alpha∗(mat@vec))
M = torch.randn(2)
mat = torch.randn(2, 3)
vec = torch.randn(3)
torch.addmv(M, mat, vec)

#点乘
torch.dot(torch.Tensor([1, 2]), torch.Tensor([1, 2]))

#特征分解
torch.eig(torch.randn(4, 4), eigenvectors=True)

#最小二乘解
A = torch.Tensor([[1, 1, 1], [2, 3, 4], [3, 5, 2], [4, 2, 5], [5, 4, 3]])
B = torch.Tensor([[-10, -3], [12, 14], [14, 12], [16, 16], [18, 16]])
X, _ = torch.gels(B, A)
X

#计算线性方程组AX=B的解
A = torch.Tensor([[6.80, -2.11, 5.66, 5.97, 8.23],
예제 #22
0
 def forward(self, x, y, z):
     out1 = x.addmv(y, z, beta=0.1, alpha=0.2)
     out2 = torch.addmv(x, y, z, beta=0.1, alpha=0.2)
     return out1, out2