示例#1
0
def log_matrix_diagonal(mat):
    assert mat.shape[0] == mat.shape[1]
    # make_diagonal() is only defined in the autograd version of numpy
    mat_log_diag = np.make_diagonal(
        np.log(np.diag(mat)), offset=0, axis1=-1, axis2=-2)
    mat_diag = np.make_diagonal(np.diag(mat), offset=0, axis1=-1, axis2=-2)
    return mat_log_diag + mat - mat_diag
示例#2
0
def test_diagonal():
    def fun(D):
        return to_scalar(np.diagonal(D, axis1=-1, axis2=-2))

    D = np.random.randn(4, 4)
    A = np.make_diagonal(D, axis1=-1, axis2=-2)
    check_grads(fun, D)

    D = np.random.randn(3, 4, 4)
    A = np.make_diagonal(D, axis1=-1, axis2=-2)
    check_grads(fun, D)
示例#3
0
def test_diagonal():
    def fun(D):
        return to_scalar(np.diagonal(D, axis1=-1, axis2=-2))

    D = np.random.randn(4, 4)
    A = np.make_diagonal(D, axis1=-1, axis2=-2)
    check_grads(fun, D)

    D = np.random.randn(3, 4, 4)
    A = np.make_diagonal(D, axis1=-1, axis2=-2)
    check_grads(fun, D)
示例#4
0
def test_make_diagonal():
    def fun(D):
        return to_scalar(np.make_diagonal(D, axis1=-1, axis2=-2))

    D = np.random.randn(4)
    A = np.make_diagonal(D, axis1=-1, axis2=-2)
    assert np.allclose(np.diag(A), D)
    check_grads(fun, D)

    D = np.random.randn(3, 4)
    A = np.make_diagonal(D, axis1=-1, axis2=-2)
    assert all([np.allclose(np.diag(A[i]), D[i]) for i in range(3)])
    check_grads(fun, D)
示例#5
0
def test_make_diagonal():
    def fun(D):
        return to_scalar(np.make_diagonal(D, axis1=-1, axis2=-2))

    D = np.random.randn(4)
    A = np.make_diagonal(D, axis1=-1, axis2=-2)
    assert np.allclose(np.diag(A), D)
    check_grads(fun, D)

    D = np.random.randn(3, 4)
    A = np.make_diagonal(D, axis1=-1, axis2=-2)
    assert all([np.allclose(np.diag(A[i]), D[i]) for i in range(3)])
    check_grads(fun, D)
示例#6
0
def _unvectorize_symmetric_matrix(vec_val):
    ld_mat = _unvectorize_ld_matrix(vec_val)
    mat_val = ld_mat + ld_mat.transpose()
    # We have double counted the diagonal.  For some reason the autograd
    # diagonal functions require axis1=-1 and axis2=-2
    mat_val = mat_val - \
        np.make_diagonal(np.diagonal(ld_mat, axis1=-1, axis2=-2),
                         axis1=-1, axis2=-2)
    return mat_val
示例#7
0
文件: gmm.py 项目: zerkh/autograd
 def unpack_params(params):
     """Unpacks parameter vector into the proportions, means and covariances
     of each mixture component.  The covariance matrices are parametrized by
     their Cholesky decompositions."""
     log_proportions = parser.get(params, "log proportions")
     normalized_log_proportions = log_proportions - logsumexp(log_proportions)
     means = parser.get(params, "means")
     lower_tris = np.tril(parser.get(params, "lower triangles"), k=-1)
     diag_chols = np.exp(parser.get(params, "log diagonals"))
     chols = lower_tris + np.make_diagonal(diag_chols, axis1=-1, axis2=-2)
     return normalized_log_proportions, means, chols
示例#8
0
文件: gmm.py 项目: viveksck/autograd
 def unpack_params(params):
     """Unpacks parameter vector into the proportions, means and covariances
     of each mixture component.  The covariance matrices are parametrized by
     their Cholesky decompositions."""
     log_proportions    = parser.get(params, 'log proportions')
     normalized_log_proportions = log_proportions - logsumexp(log_proportions)
     means              = parser.get(params, 'means')
     lower_tris = np.tril(parser.get(params, 'lower triangles'), k=-1)
     diag_chols = np.exp( parser.get(params, 'log diagonals'))
     chols = lower_tris + np.make_diagonal(diag_chols, axis1=-1, axis2=-2)
     return normalized_log_proportions, means, chols
示例#9
0
def calculate_b(Ys, X, Us):
    total = 0.
    count = 0
    for i in range(len(Ys)):
        Y = Ys[i]
        mask_pos = Y == 1
        mask_neg = Y == -1
        tm = np.sum(mask_pos) / np.sum(mask_pos + mask_neg)
        Beta = (mask_pos) * 1 + (mask_neg) * tm
        U = np.make_diagonal(Us[D * i:D * (i + 1)], axis1=-1, axis2=-2)
        b = np.sum(Beta * (Y - np.dot(np.dot(X, U), (X.T))))
        count += np.sum(mask_pos + mask_neg)
        total += b
    return total / count
示例#10
0
def loss_all(Ys, X, Us, b, lambda_x=1., lambda_u=1., sigma_u=0.5):
    sum_loss_M = 0
    sum_reg_u = 0
    num_user = int(Us.size / D)
    for i in range(num_user):
        Y = Ys[i]
        U = np.make_diagonal(Us[D * i:D * (i + 1)], axis1=-1, axis2=-2)
        sum_loss_M += loss_M(Y, X, U, b)
        diff_U = U - sigma_u * np.eye(U.shape[0])
        sum_reg_u += np.linalg.norm(diff_U, ord='fro')**2

    Rx = lambda_x * np.sum((np.linalg.norm(X, axis=1)**2))
    Ru = lambda_u * sum_reg_u
    return Rx + Ru + sum_loss_M
示例#11
0
 def J(self):
     return np.make_diagonal(1 / self.std**2, axis1=-1, axis2=-2)
示例#12
0
 def fun(D):
     return to_scalar(np.make_diagonal(D, axis1=-1, axis2=-2))
示例#13
0
def _pack_posdef_matrix(mat, diag_lb=0.0):
    k = mat.shape[0]
    mat_lb = mat - np.make_diagonal(
        np.full(k, diag_lb), offset=0, axis1=-1, axis2=-2)
    return _vectorize_ld_matrix(
        _log_matrix_diagonal(np.linalg.cholesky(mat_lb)))
示例#14
0
def _unpack_posdef_matrix(free_vec, diag_lb=0.0):
    mat_chol = _exp_matrix_diagonal(_unvectorize_ld_matrix(free_vec))
    mat = np.matmul(mat_chol, mat_chol.T)
    k = mat.shape[0]
    return mat + np.make_diagonal(
        np.full(k, diag_lb), offset=0, axis1=-1, axis2=-2)
示例#15
0
 def fun(D):
     return to_scalar(np.make_diagonal(D, axis1=-1, axis2=-2))
示例#16
0
 def fun(D):
     return np.make_diagonal(D, axis1=-1, axis2=-2)
示例#17
0
 def sigma(self):
     return np.make_diagonal(self.std**2, axis1=-1, axis2=-2)