def uniform_normal_tsm(T, n, d, loc, scale, random_state=None):
    random_state = np.random.mtrand.RandomState(random_state)
    tsm = TimeSerialMatrix(dim=d, random_state=random_state)
    # vars = random_state.normal(size=d)**2
    vars = random_state.beta(1, 10, size=d)
    # vars /= np.linalg.norm(vars)
    if d < 2000:
        dirs = ortho_group.rvs(dim=d, random_state=random_state)
    else:
        dirs = None
    ts_list = uniform.rvs(0, T, size=n // 2, random_state=random_state)
    tsm.add_type(vars=vars, dirs=dirs, ts_list=ts_list)
    # vars = random_state.normal(scale=1, size=d//10)**2
    vars = random_state.beta(1, 10, size=d // 10) * 10
    # vars /= np.linalg.norm(vars)
    if d < 2000:
        dirs = ortho_group.rvs(dim=d, random_state=random_state)[:d // 10]
    else:
        dirs = None
        vars = np.pad(vars, (0, d - len(vars)),
                      'constant',
                      constant_values=(0, 0))
    ts_list = norm.rvs(loc, scale, size=n // 2, random_state=random_state)
    ts_list = np.clip(ts_list, 1, T)
    # ts_list = ts_list[(ts_list > 0) & (ts_list <= T)]
    tsm.add_type(vars=vars, dirs=dirs, ts_list=ts_list)
    return tsm
def gaussian_orthogonal_random_matrix(nb_rows,
                                      nb_columns,
                                      scaling=0,
                                      device=None):
    nb_full_blocks = int(nb_rows / nb_columns)

    block_list = []

    for _ in range(nb_full_blocks):
        q = torch.FloatTensor(ortho_group.rvs(nb_columns),
                              device='cpu').to(device)
        block_list.append(q)

    remaining_rows = nb_rows - nb_full_blocks * nb_columns
    if remaining_rows > 0:
        q = torch.FloatTensor(ortho_group.rvs(nb_columns),
                              device='cpu').to(device)
        block_list.append(q[:remaining_rows])

    final_matrix = torch.cat(block_list)

    if scaling == 0:
        multiplier = torch.randn((nb_rows, nb_columns),
                                 device=device).norm(dim=1)
    elif scaling == 1:
        multiplier = math.sqrt((float(nb_columns))) * torch.ones(
            (nb_rows, ), device=device)
    else:
        raise ValueError(f'Invalid scaling {scaling}')

    return torch.diag(multiplier) @ final_matrix
Beispiel #3
0
def gen_quadratic_data(n,
                       d=2,
                       lambda_min=1,
                       lambda_max=1,
                       logscale=False,
                       theta_star=None,
                       Q=None,
                       offset=0):
    '''
    see distill.ipynb
    '''
    assert n > d, "n > d otherwise ill condition for this illustrative plot"

    if Q is None:
        Q = ortho_group.rvs(d)

    U = ortho_group.rvs(n)
    if not logscale:
        Lambda = np.linspace(lambda_min, lambda_max, d)
    else:
        Lambda = np.logspace(np.log10(lambda_min), np.log10(lambda_max), d)

    Sigma = np.sqrt(Lambda)
    X = U[:, :d].dot(np.diag(Sigma)).dot(Q)
    if theta_star is None:
        theta_star = np.random.randint(5, size=(d, 1))
    #y = np.random.randn(n).reshape(-1,1)
    y = X.dot(theta_star) + offset
    return Q, Lambda, X, y
Beispiel #4
0
def gen_OLS_data(n,
                 d=2,
                 lambda_min=1,
                 lambda_max=1,
                 d_pad_zeros=0,
                 logscale=False,
                 theta_star=None,
                 Q=None,
                 U=None,
                 noise=0):
    # assert n > d, "n > d otherwise ill condition for this illustrative plot"
    if Q is None:
        Q = ortho_group.rvs(d + d_pad_zeros)
    if U is None:
        U = ortho_group.rvs(n)

    if not logscale:
        Lambda = np.linspace(lambda_min, lambda_max, d)
    else:
        Lambda = np.logspace(np.log10(lambda_min), np.log10(lambda_max), d)

    if d_pad_zeros > 0:
        Lambda = np.array([l for l in Lambda] + [0] * d_pad_zeros)
    Sigma = np.sqrt(Lambda)
    X = U[:, :(d + d_pad_zeros)].dot(np.diag(Sigma)).dot(
        Q[:(d + d_pad_zeros), :(d + d_pad_zeros)])

    if theta_star is not None:
        y = (X.dot(theta_star) + noise * np.random.randn(len(X))).reshape(
            (n, 1))
    else:
        y = 30 * np.random.randn(n).reshape((n, 1))

    return Q, U, Lambda, X, y
def generate_dataset(cond_num, noise_scale, n=500, nt=200, d=50):
    rng = np.random.RandomState(1)
    w_star = rng.randn(d)
    w_star /= np.linalg.norm(w_star)

    # hyper param
    R = np.diag(np.linspace(1, cond_num, num=d))

    # Train

    # sample random orthogonal matrix
    m = ortho_group.rvs(dim=n, random_state=rng)
    U = m[:, :d]
    # print(np.trace(U.T.dot(U)))
    # print(LA.eig(U.T.dot(U))[0])

    X = np.matmul(U, R)  # n x d data matrix
    noise = rng.laplace(scale=noise_scale, size=(n, ))
    y = np.dot(X, w_star)**2 + noise

    # Test
    m = ortho_group.rvs(dim=nt, random_state=rng)
    U = m[:, :d]
    # print(np.trace(U.T.dot(U)))
    # print(LA.eig(U.T.dot(U))[0])

    Xt = np.matmul(U, R)  # n x d data matrix
    noise = rng.laplace(scale=noise_scale, size=(nt, ))
    yt = np.dot(Xt, w_star)**2 + noise
    return X, y, Xt, yt
 def test_KRON10(self):
     A = ortho_group.rvs(dim=self.p)
     B = ortho_group.rvs(dim=self.p)
     results1 = invert(reduce(np.kron, [A, B]))
     results2 = reduce(np.kron, invert([A, B]))
     if (np.allclose(results1, results2)):
         return
     else:
         self.fail("Results not equal")
Beispiel #7
0
def attr_sector(x):
    temp1 = np.matmul(ortho_group.rvs(len(x)), diagonal_A(10, len(x)))
    temp2 = np.matmul(temp1, ortho_group.rvs(len(x)))
    z = np.matmul(temp2, x)
    sum1 = 0
    s = np.zeros(len(x))
    for iter in range(0, len(x)):
        sum1 += (z[iter])**2
    sum1 = sum1**0.9
    return T_osz_scalar(sum1)
Beispiel #8
0
def create_data(m, m_1, d):
    A = ortho_group.rvs(
        dim=d)[:m]  # m orthogonal vectors with dim m, a_1, ..., a_m
    #A = np.identity(d)[:,:m]
    A = np.transpose(A)
    B = ortho_group.rvs(
        dim=m)[:m_1]  #m_1 orthogonal vectors with dimension m b_1, ..., b_m_1
    #B = np.identity(m)[:,:m_1]
    B = np.transpose(B)
    return A, B
Beispiel #9
0
def genfixedRankMatrix(M=5, N=3, r=3):
    if (r > min(M, N)):
        print('input error')
        return
    A = np.zeros((M, N))
    A[:r, :N] = np.random.rand(r, N)
    Q3 = np.float32(ortho_group.rvs(dim=M))
    Q4 = np.float32(ortho_group.rvs(dim=N))
    A = Q3.dot(A).dot(Q4)
    return A
Beispiel #10
0
 def test_reproducibility(self):
     np.random.seed(514)
     x = ortho_group.rvs(3)
     x2 = ortho_group.rvs(3, random_state=514)
     # Note this matrix has det -1, distinguishing O(N) from SO(N)
     expected = np.array([[0.993945, -0.045279, 0.100114],
                          [-0.048216, -0.998469, 0.02711],
                          [-0.098734, 0.031773, 0.994607]])
     assert_array_almost_equal(x, expected)
     assert_array_almost_equal(x2, expected)
     assert_almost_equal(np.linalg.det(x), -1)
Beispiel #11
0
 def test_reproducibility(self):
     np.random.seed(514)
     x = ortho_group.rvs(3)
     x2 = ortho_group.rvs(3, random_state=514)
     # Note this matrix has det -1, distinguishing O(N) from SO(N)
     expected = np.array([[0.993945, -0.045279, 0.100114],
                          [-0.048216, -0.998469, 0.02711],
                          [-0.098734, 0.031773, 0.994607]])
     assert_array_almost_equal(x, expected)
     assert_array_almost_equal(x2, expected)
     assert_almost_equal(np.linalg.det(x), -1)
 def test_KRON10_pinv_function(self):
     A = ortho_group.rvs(dim=self.p)
     B = ortho_group.rvs(dim=self.p)
     A[1, :] = A[0, :]
     B[1, :] = B[0, :]
     results1 = pinvert(reduce(np.kron, [A, B]))
     results2 = reduce(np.kron, pinvert([A, B]))
     if (np.allclose(results1, results2)):
         return
     else:
         self.fail("Results not equal")
Beispiel #13
0
def hooi(X, r):
    X = X.reshape(27, 27, 27)
    #任意の直交行列を作る
    L = ortho_group.rvs(27)[:r, :]
    M = ortho_group.rvs(27)[:r, :]
    R = ortho_group.rvs(27)[:r, :]
    for i in range(20):
        LX = np.tensordot(X, L.transpose(),
                          (0, 0)).transpose(2, 0, 1)  #transposeで計算順序入れ替えを調整
        #print(LX.shape)
        MX = np.tensordot(LX, M.transpose(), (1, 0)).transpose(0, 2, 1)
        #print(MX.shape)
        MX = MX.reshape(r**2, 27)
        U, s, V = linalg.svd(MX)
        R = V[:r, :]

        RX = np.tensordot(X, R.transpose(), (2, 0))
        #print(RX.shape)
        LX = np.tensordot(RX, L.transpose(), (0, 0)).transpose(1, 2, 0)
        #print(LX.shape)
        LX = LX.reshape(r**2, 27)
        U, s, V = linalg.svd(LX)
        M = V[:r, :]

        MX = np.tensordot(X, M.transpose(), (1, 0)).transpose(0, 2, 1)
        #print(MX.shape)
        RX = np.tensordot(MX, R.transpose(), (2, 0)).transpose(1, 2, 0)
        #print(RX.shape)
        RX = RX.reshape(r**2, 27)
        U, s, V = linalg.svd(RX)
        L = V[:r, :]
    #コアテンソル
    C2 = np.tensordot(X, L.transpose(), (0, 0)).transpose(2, 0, 1)
    #print(C2.shape)
    C1 = np.tensordot(C2, M.transpose(), (1, 0)).transpose(0, 2, 1)
    #print(C1.shape)
    C = np.tensordot(C1, R.transpose(), (2, 0))
    #print(C.shape)
    #復元
    Y2 = np.tensordot(C, L, (0, 0)).transpose(2, 0, 1)
    #print(Y2.shape)
    Y1 = np.tensordot(Y2, M, (1, 0)).transpose(0, 2, 1)
    #print(Y1.shape)
    Y = np.tensordot(Y1, R, (2, 0))
    #print(Y.shape)
    #圧縮率
    rate = (L.size + M.size + R.size + C.size) / X.size
    #フロべニウスノルムの相対誤差
    norm = np.sqrt(np.sum(X * X))
    norm1 = np.sqrt(np.sum((X - Y) * (X - Y)))
    frob = norm1 / norm

    return [Y.reshape((3, 3, 3, 3, 3, 3, 3, 3, 3)), rate, frob]
Beispiel #14
0
 def __init__(self,
              shape,
              steps,
              Wf=None,
              bf=None,
              Wi=None,
              bi=None,
              Wc=None,
              bc=None,
              Wo=None,
              bo=None,
              initMethod='xavier',
              watchState=False,
              returnSeq=False):
     neuralNetLayer.__init__(self, shape)
     if Wf is None:
         Wf = ortho_group.rvs(shape[0])
         Wf = np.concatenate((Wf, initializer(shape, shape[1], initMethod)),
                             axis=1)
     if bf is None:
         bf = np.zeros((shape[0], 1))
     if Wi is None:
         Wi = ortho_group.rvs(shape[0])
         Wi = np.concatenate((Wi, initializer(shape, shape[1], initMethod)),
                             axis=1)
     if bi is None:
         bi = np.zeros((shape[0], 1))
     if Wc is None:
         Wc = ortho_group.rvs(shape[0])
         Wc = np.concatenate((Wc, initializer(shape, shape[1], initMethod)),
                             axis=1)
     if bc is None:
         bc = np.zeros((shape[0], 1))
     if Wo is None:
         Wo = ortho_group.rvs(shape[0])
         Wo = np.concatenate((Wo, initializer(shape, shape[1], initMethod)),
                             axis=1)
     if bo is None:
         bo = np.zeros((shape[0], 1))
     self.steps = steps
     self.watchState = watchState
     self.returnSeq = returnSeq
     self.params = {
         'Wf': Wf,
         'bf': bf,
         'Wi': Wi,
         'bi': bi,
         'Wc': Wc,
         'bc': bc,
         'Wo': Wo,
         'bo': bo
     }
     self.cache = []
def encryption_train(X,y):
    # U1 is an orthogonal matrix
    U1 = ortho_group.rvs(dim=X.shape[0])

    # U2 is an invertible matrix
    if X.shape[1] > 1:
        U2 = ortho_group.rvs(dim=X.shape[1])
    else:
        U2 = np.random.rand(1,1)

    X_enc = U1.dot(X).dot(U2)
    y_enc = U1.dot(y)
    return [X_enc,y_enc,U1,U2]
Beispiel #16
0
def test_having_zero_singular_case():
    r"""Test symmetric that has zero singular values."""
    # Define a matrix with not full singular values, e.g. 5 and 0 are singular values.
    sing_mat = np.array([[5., 0.], [0., 0.], [0., 0.], [0., 0.]])
    array_a = ortho_group.rvs(4).dot(sing_mat).dot(ortho_group.rvs(2))

    sym_array = np.array([[0.38895636, 0.30523869], [0.30523869, 0.30856369]])
    array_b = np.dot(array_a, sym_array)
    # compute procrustes transformation
    res = symmetric(array_a, array_b)
    # check transformation is symmetric & error is zero
    assert_almost_equal(res["array_u"], res["array_u"].T, decimal=6)
    assert_almost_equal(res["error"], 0.0, decimal=6)
Beispiel #17
0
 def create_population(M):
     xmax = abs(max(M.min(), M.max(), key=abs))
     rows = np.size(M, 0)
     columns = np.size(M, 1)
     population = []
     for i in range(20):
         S = np.zeros((rows, columns))
         for j in range(min(rows, columns)):
             S.itemset((j, j), np.random.rand() * xmax)
         U = ortho_group.rvs(rows)
         Vt = ortho_group.rvs(columns)
         item = Specimen(U, S, Vt)
         population.append(item)
     return population
Beispiel #18
0
 def init_params(self, x):
   for p in self.parameters():
     p.data.uniform_(-x / math.sqrt(p.data.size(-1)), x / math.sqrt(p.data.size(-1)))
   self.word_embed.init_params(x)
   # orthonormal initialization for GRUs
   if self.encode_type in ['GRU', 'LSTM']:
     for p in self.rnn.parameters():
       if p.data.size(-1) == self.rep_dim:
         if self.encode_type == 'GRU':
           m = np.concatenate([ortho_group.rvs(dim=self.rep_dim) for _ in range(3)])
         else:
           m = np.concatenate([ortho_group.rvs(dim=self.rep_dim) for _ in range(4)])
         p.data.copy_(torch.Tensor(m))
         m = None
 def init_params(self, x):
   for p in self.parameters():
     p.data.uniform_(-x / math.sqrt(p.data.size(-1)), x / math.sqrt(p.data.size(-1)))
   # orthonormal initialization for GRUs
   if self.task_type == 'skip':
     for p in self.rnn_l.parameters():
       if p.data.size(-1) == self.hidden_size:
         m = np.concatenate([ortho_group.rvs(dim=self.hidden_size) for _ in range(3)])
         p.data.copy_(torch.Tensor(m))
         m = None
     for p in self.rnn_r.parameters():
       if p.data.size(-1) == self.hidden_size:
         m = np.concatenate([ortho_group.rvs(dim=self.hidden_size) for _ in range(3)])
         p.data.copy_(torch.Tensor(m))
         m = None
Beispiel #20
0
def _generate_stable_symmetric_matrix(hidden_state_dim, eigvalues=None):
    """Generates a symmetric matrix with spectral radius <= 1.

  Args:
    hidden_state_dim: Desired dimension.
    eigvalues: Specified eigenvalues, optional. If None, random eigenvalues will
      be generated from uniform[-1, 1].

  Returns:
    A numpy array of shape [hidden_state_dim, hidden_state_dim] represending a
    symmetric matrix with spectral radius <= 1.
  """
    # Generate eigenvalues.
    if eigvalues is None:
        eigvalues = np.random.uniform(-1.0, 1.0, hidden_state_dim)
    diag_matrix = np.diag(eigvalues)
    if hidden_state_dim == 1:
        change_of_basis = np.ones([1, 1])
    else:
        change_of_basis = ortho_group.rvs(hidden_state_dim)
    # transition_matrix = change_of_basis diag_matrix change_of_basis^T
    transition_matrix = np.matmul(np.matmul(change_of_basis, diag_matrix),
                                  change_of_basis.transpose())
    # Check that the transition_matrix has to recover the correct eigvalues.
    if np.linalg.norm(
            np.sort(np.linalg.eigvals(transition_matrix)) -
            np.sort(eigvalues)) > 1e-6:
        raise ValueError('Eigenvalues do not match.')
    return transition_matrix
Beispiel #21
0
def gen(n):
    z = np.random.normal(size=n)
    X = np.zeros([d, n])
    X[0] = z
    rot = ortho_group.rvs(d)
    X = rot.dot(X).T
    return X
Beispiel #22
0
def make_multiview_blobs_old(n_classes=3,
                             n_views=3,
                             n_features=3,
                             n_samples='auto',
                             rotate=True,
                             shuffle=True,
                             seed=None):
    np.random.seed(seed)
    n_samples = n_classes * 20 if n_samples == 'auto' else n_samples
    X_0, y = make_blobs(n_features=n_features,
                        centers=n_classes,
                        n_samples=n_samples,
                        random_state=seed)
    Xs = [X_0]
    for i in range(n_views - 1):
        X_i = X_0 + np.random.randn(n_features) * np.random.randint(1, 3)
        X_i = np.array([x + np.random.normal(0, 1, len(x.shape)) for x in X_i])
        if rotate:
            X_i = X_i @ ortho_group.rvs(n_features, random_state=seed)
        Xs.append(X_i)
    if shuffle:
        indexes = np.random.permutation(np.arange(len(y)))
        y = y[indexes]
        for _ in range(n_views):
            Xs[_] = Xs[_][indexes, :]
    if n_views > 1:
        return [torch.tensor(X).float() for X in Xs], torch.from_numpy(y)
    return torch.tensor(Xs).squeeze(0).float(), torch.from_numpy(y)
Beispiel #23
0
def make_network(minibatch_size = 128):
	patch_size = 32
	inp = DataProvider("data", shape = (minibatch_size, 3, patch_size, patch_size))
	label = DataProvider("label", shape = (minibatch_size, ))

	#lay = bn_relu_conv(inp, 3, 1, 1, 16, False, False)
	lay, conv = conv_bn(inp, 3, 1, 1, 16, True)
	out = [conv]
	for chl in [32 * 3, 64 * 3, 128 * 3]:
		for i in range(10):
			lay, conv1, conv2 = xcep_layer(lay, chl)
			out.append(conv1)
			out.append(conv2)
		if chl != 128 * 3:
			lay = Pooling2D("pooling{}".format(chl), lay, window = 2, mode = "MAX")

	
	#global average pooling
	print(lay.partial_shape)
	feature = lay.mean(axis = 2).mean(axis = 2)
	#feature = Pooling2D("glbpoling", lay, window = 8, stride = 8, mode = "AVERAGE")
	W = ortho_group.rvs(feature.partial_shape[1])
	W = W[:, :10]
	W = ConstProvider(W)
	b = ConstProvider(np.zeros((10, )))
	pred = Softmax("pred", FullyConnected(
		"fc0", feature, output_dim = 10,
		W = W,
		b = b,
		nonlinearity = Identity()
		))
	
	network = Network(outputs = [pred] + out)
	network.loss_var = CrossEntropyLoss(pred, label)
	return network
Beispiel #24
0
    def test_haar(self):
        # Test that the distribution is constant under rotation
        # Every column should have the same distribution
        # Additionally, the distribution should be invariant under another rotation

        # Generate samples
        dim = 5
        samples = 1000  # Not too many, or the test takes too long
        ks_prob = 0.39  # ...so don't expect much precision
        np.random.seed(518)  # Note that the test is sensitive to seed too
        xs = ortho_group.rvs(dim, size=samples)

        # Dot a few rows (0, 1, 2) with unit vectors (0, 2, 4, 3),
        #   effectively picking off entries in the matrices of xs.
        #   These projections should all have the same disribution,
        #     establishing rotational invariance. We use the two-sided
        #     KS test to confirm this.
        #   We could instead test that angles between random vectors
        #     are uniformly distributed, but the below is sufficient.
        #   It is not feasible to consider all pairs, so pick a few.
        els = ((0, 0), (0, 2), (1, 4), (2, 3))
        #proj = {(er, ec): [x[er][ec] for x in xs] for er, ec in els}
        proj = dict(
            ((er, ec), sorted([x[er][ec] for x in xs])) for er, ec in els)
        pairs = [(e0, e1) for e0 in els for e1 in els if e0 > e1]
        ks_tests = [ks_2samp(proj[p0], proj[p1])[1] for (p0, p1) in pairs]
        assert_array_less([ks_prob] * len(pairs), ks_tests)
Beispiel #25
0
def conv_bn(inp, ker_shape, stride, padding, out_chl, isrelu, mode = None):
	global idx
	idx += 1
	print(inp.partial_shape, ker_shape, out_chl)
	if ker_shape == 1:
		W = ortho_group.rvs(out_chl)
		W = W[:, :inp.partial_shape[1]]
		W = W.reshape(W.shape[0], W.shape[1], 1, 1)
		W = ConstProvider(W)
		b = ConstProvider(np.zeros(out_chl))
	else:
		W = G(mean = 0, std = ((1 + int(isrelu)) / (ker_shape**2 * inp.partial_shape[1]))**0.5)
		b = C(0)
	l1 = Conv2D(
		"conv{}".format(idx), inp, kernel_shape = ker_shape, stride = stride, padding = padding,
		output_nr_channel = out_chl,
		group = mode,
		W = W,
		b = b,
		nonlinearity = Identity()
		)
	l2 = BN("bn{}".format(idx), l1, eps = 1e-9)
	l2 = ElementwiseAffine("bnaff{}".format(idx), l2, shared_in_channels = False, k = C(1), b = C(0))
	if isrelu:
		l2 = arith.ReLU(l2)
	return l2, l1
Beispiel #26
0
    def test_homography_decomposition(self):
        """
        Test homography decomposition by following steps:
        1. prepare affine with random rotation R and translation t
        2. make homography of affine from step 1, with random projection P
          **NOTE: t first, then R !! ***
        3. decompose homography using P to get R' and t'
        4. compare (R, t) and (R', t')
        """
        # step 1
        R = ortho_group.rvs(dim=3)
        R /= np.linalg.det(R)
        t = np.random.random((3, )) * 2 - 1
        P = np.random.random((3, 3))
        P[0, 0] = P[1, 1] = P[2, 2] = 1
        P[0, 1:] = 0
        P[1, 2] = 0

        # step 2
        A = np.array([R[:, 0], R[:, 1], R @ t]).T
        H = P @ A

        # step 3
        hom = Homography(H, P)
        R_, t_ = hom.R, hom.t

        # step 4
        np.testing.assert_almost_equal(R, R_)
        np.testing.assert_almost_equal(t, t_)
Beispiel #27
0
def make_multiview_blobs(n_classes=3,
                         n_views=3,
                         n_features=3,
                         n_samples='auto',
                         cluster_std=1.0,
                         center_box=(-10.0, 10.0),
                         rotate=True,
                         shuffle=True,
                         seed=None):
    np.random.seed(seed)
    n_samples = n_classes * 20 if n_samples == 'auto' else n_samples
    X_0, y = make_blobs(n_samples=n_samples,
                        n_features=n_features,
                        centers=n_classes,
                        cluster_std=cluster_std,
                        center_box=center_box,
                        random_state=seed)
    std = np.std(X_0)
    Xs = [X_0]
    for i in range(n_views - 1):
        X_i = X_0 + np.tile(
            np.random.normal(loc=0.0, scale=10 * std, size=n_features),
            (len(X_0), 1))
        X_i += np.random.normal(loc=0.0, scale=std / 100, size=X_i.shape)
        if rotate:
            X_i = X_i @ ortho_group.rvs(n_features, random_state=seed)
        Xs.append(X_i)
    if shuffle:
        indexes = np.random.permutation(np.arange(len(y)))
        y = y[indexes]
        for _ in range(n_views):
            Xs[_] = Xs[_][indexes, :]
    if n_views > 1:
        return [torch.tensor(X).float() for X in Xs], torch.from_numpy(y)
    return torch.tensor(Xs).squeeze(0).float(), torch.from_numpy(y)
Beispiel #28
0
def random_matrix_expon(size, scale=1):
    '''
  Random matrix UDU^{T}, where U~O(N), p(d_{ii})~exp(-\alpha d_{ii})

  Parameters
  ----------
  size : int
    Number of variables.
  scale : float64
    The scale of exponential distribution.
  Returns
  -------
  A : array (N, N)
    Matrix that defines linear equation.
  b : array (N, 1)
    Right-hand side.
  x_exact : array (N, 1)
    Exact solution.

  '''
    val, vec, x_exact = expon(scale=scale).rvs(
        size=size), ortho_group.rvs(size), np.random.randn(size, 1)
    cond = np.max(val) / np.min(val)
    A = vec @ np.diag(val) @ vec.T
    b = A @ x_exact
    return A, b, x_exact
Beispiel #29
0
 def __init__(self, dim, log_min, log_max):
     self.R = ortho_group.rvs(dim)
     rand_unif = np.random.uniform(log_min, log_max, size=(dim,))
     self.diag = np.diag(np.exp(np.log(10.) * rand_unif))
     S = self.R.T.dot(self.diag).dot(self.R)
     self.dim = dim
     Gaussian.__init__(self, np.zeros((dim,)), S)
Beispiel #30
0
    def test_haar(self):
        # Test that the distribution is constant under rotation
        # Every column should have the same distribution
        # Additionally, the distribution should be invariant under another rotation

        # Generate samples
        dim = 5
        samples = 1000  # Not too many, or the test takes too long
        ks_prob = 0.39  # ...so don't expect much precision
        np.random.seed(518)  # Note that the test is sensitive to seed too
        xs = ortho_group.rvs(dim, size=samples)

        # Dot a few rows (0, 1, 2) with unit vectors (0, 2, 4, 3),
        #   effectively picking off entries in the matrices of xs.
        #   These projections should all have the same disribution,
        #     establishing rotational invariance. We use the two-sided
        #     KS test to confirm this.
        #   We could instead test that angles between random vectors
        #     are uniformly distributed, but the below is sufficient.
        #   It is not feasible to consider all pairs, so pick a few.
        els = ((0,0), (0,2), (1,4), (2,3))
        #proj = {(er, ec): [x[er][ec] for x in xs] for er, ec in els}
        proj = dict(((er, ec), sorted([x[er][ec] for x in xs])) for er, ec in els)
        pairs = [(e0, e1) for e0 in els for e1 in els if e0 > e1]
        ks_tests = [ks_2samp(proj[p0], proj[p1])[1] for (p0, p1) in pairs]
        assert_array_less([ks_prob]*len(pairs), ks_tests)
Beispiel #31
0
    def optimal_transport(self, layer_name, source_layer, target_layer):
        """Sliced optimal transportation of the activation values source_layer towards target_layer,
            seen as pointclouds of an Eucliean space of dimension n_channels.

        :param layer_name: layer name, as stored in 'observed_layers' dictionary
        :type layer_name: string
        :param source_layer: source activation tensor of shape (n_channels, width, height)
        :type source_layer: tensor
        :param target_layer: target activation tensor of shape (n_channels, width, height)
        :type target_layer: tensor
        :return: transported tensor
        :rtype: tensor
        """

        n_channels = source_layer.shape[0]
        assert n_channels == target_layer.shape[0]

        default_n_slices = n_channels // self.n_passes
        n_slices = self.observed_layers[layer_name].get(
            'n_slices', default_n_slices)

        for slice in range(n_slices):
            # random orthonormal basis
            basis = torch.from_numpy(ortho_group.rvs(n_channels)).float()

            # project on the basis
            source_rotated_layer = basis @ source_layer.view(n_channels, -1)
            target_rotated_layer = basis @ target_layer.view(n_channels, -1)

            # sliced transport
            target_rotated_layer = sliced_transport(source_rotated_layer,
                                                    target_rotated_layer)
            target_layer = basis.t() @ target_rotated_layer

        return target_layer
Beispiel #32
0
 def generateSensingMatrix(self, m, n, type): #Generate sensing matrix of dim m by n of given type
     if type == 'sdnormal':
         self.sensing_matrix = np.random.randn(m,n)
         #column normalize the matrix:
         for i in range(n):
             self.sensing_matrix[:,i] = self.sensing_matrix[:,i]/np.linalg.norm(self.sensing_matrix[:,i])
     if type == 'uniform01':
         self.sensing_matrix = np.random.rand(m,n)
         for i in range(n):
             self.sensing_matrix[:,i] = self.sensing_matrix[:,i]/np.linalg.norm(self.sensing_matrix[:,i])
     if type == 'bernoulli':
     #For small m and n, s columns has high prob. of being linearly dependent. This causes x_S feature to have components
     #which blow up, which causes the output of the neural network to output a deterministic prob. dist. of all zeros and a single 1, and v to be nan(because it is so large)
         self.sensing_matrix = np.random.binomial(1,1/2,(m,n))
         self.sensing_matrix = self.sensing_matrix.astype(float)
         for i in range(n):
             self.sensing_matrix[:,i] = self.sensing_matrix[:,i]/np.linalg.norm(self.sensing_matrix[:,i])
     if type == 'hadamard': 
     #n must be a power of 2 here!!! For small m and n, s columns has high prob. of being linearly dependent. This causes x_S feature to have components
     #which blow up, which causes the output of the neural network to output a deterministic prob. dist. of all zeros and a single 1, and v to be nan(because it is so large)
         A = hadamard(n)
         S = sample(range(1, n), m) #sample m indices randomly
         self.sensing_matrix = A[S,:]
         self.sensing_matrix = self.sensing_matrix.astype(float)
         for i in range(n):
             self.sensing_matrix[:,i] = self.sensing_matrix[:,i]/np.linalg.norm(self.sensing_matrix[:,i])
     if type == 'subsampled_haar':
         A = ortho_group.rvs(n)
         S = sample(range(1, n), m)
         self.sensing_matrix = A[S,:]
         for i in range(n):
             self.sensing_matrix[:,i] = self.sensing_matrix[:,i]/np.linalg.norm(self.sensing_matrix[:,i])
 def test_almost_psd_dont_raise(self):
   """Checks that if the metric is almost PSD (i.e. it has some negative
   eigenvalues very close to zero), then transformer_from_metric will still
   work"""
   rng = np.random.RandomState(42)
   D = np.diag([1, 5, 3, 4.2, -1e-20, -2e-20, -1e-20])
   P = ortho_group.rvs(7, random_state=rng)
   M = P.dot(D).dot(P.T)
   L = transformer_from_metric(M)
   assert_allclose(L.T.dot(L), M)
def create_bases(k, s):
    assert(k>1)
    B = np.zeros((k,k))
    B[:2,:2] = np.array([
        [1.,0.],
        [0.,1.]
    ])
    for col in range(2,k):
        B[ :, col] = np.random.randn(k)
        B[ :, col] = B[ :, col]/np.linalg.norm(B[ :, col])

    Q = ortho_group.rvs(k)
    return np.dot(Q,s*B)
Beispiel #35
0
    def test_det_and_ortho(self):
        xs = [ortho_group.rvs(dim)
              for dim in range(2,12)
              for i in range(3)]

        # Test that determinants are always +1
        dets = [np.fabs(np.linalg.det(x)) for x in xs]
        assert_allclose(dets, [1.]*30, rtol=1e-13)

        # Test that these are orthogonal matrices
        for x in xs:
            assert_array_almost_equal(np.dot(x, x.T),
                                      np.eye(x.shape[0]))
 def test_non_psd_raises(self):
   """Checks that a non PSD matrix (i.e. with negative eigenvalues) will
   raise an error when passed to transformer_from_metric"""
   rng = np.random.RandomState(42)
   D = np.diag([1, 5, 3, 4.2, -4, -2, 1])
   P = ortho_group.rvs(7, random_state=rng)
   M = P.dot(D).dot(P.T)
   msg = ("Matrix is not positive semidefinite (PSD).")
   with pytest.raises(ValueError) as raised_error:
     transformer_from_metric(M)
   assert str(raised_error.value) == msg
   with pytest.raises(ValueError) as raised_error:
     transformer_from_metric(D)
   assert str(raised_error.value) == msg
  def test_transformer_from_metric_edge_cases(self):
    """Test that transformer_from_metric returns the right result in various
    edge cases"""
    rng = np.random.RandomState(42)

    # an orthonormal matrix useful for creating matrices with given
    # eigenvalues:
    P = ortho_group.rvs(7, random_state=rng)

    # matrix with all its coefficients very low (to check that the algorithm
    # does not consider it as a diagonal matrix)(non regression test for
    # https://github.com/metric-learn/metric-learn/issues/175)
    M = np.diag([1e-15, 2e-16, 3e-15, 4e-16, 5e-15, 6e-16, 7e-15])
    M = P.dot(M).dot(P.T)
    L = transformer_from_metric(M)
    assert_allclose(L.T.dot(L), M)

    # diagonal matrix
    M = np.diag(np.abs(rng.randn(5)))
    L = transformer_from_metric(M)
    assert_allclose(L.T.dot(L), M)

    # low-rank matrix (with zeros)
    M = np.zeros((7, 7))
    small_random = rng.randn(3, 3)
    M[:3, :3] = small_random.T.dot(small_random)
    L = transformer_from_metric(M)
    assert_allclose(L.T.dot(L), M)

    # low-rank matrix (without necessarily zeros)
    R = np.abs(rng.randn(7, 7))
    M = R.dot(np.diag([1, 5, 3, 2, 0, 0, 0])).dot(R.T)
    L = transformer_from_metric(M)
    assert_allclose(L.T.dot(L), M)

    # matrix with a determinant still high but which should be considered as a
    # non-definite matrix (to check we don't test the definiteness with the
    # determinant which is a bad strategy)
    M = np.diag([1e5, 1e5, 1e5, 1e5, 1e5, 1e5, 1e-20])
    M = P.dot(M).dot(P.T)
    assert np.abs(np.linalg.det(M)) > 10
    assert np.linalg.slogdet(M)[1] > 1  # (just to show that the computed
    # determinant is far from null)
    with pytest.raises(LinAlgError) as err_msg:
      np.linalg.cholesky(M)
    assert str(err_msg.value) == 'Matrix is not positive definite'
    # (just to show that this case is indeed considered by numpy as an
    # indefinite case)
    L = transformer_from_metric(M)
    assert_allclose(L.T.dot(L), M)

    # matrix with lots of small nonzeros that make a big zero when multiplied
    M = np.diag([1e-3, 1e-3, 1e-3, 1e-3, 1e-3, 1e-3, 1e-3])
    L = transformer_from_metric(M)
    assert_allclose(L.T.dot(L), M)

    # full rank matrix
    M = rng.randn(10, 10)
    M = M.T.dot(M)
    assert np.linalg.matrix_rank(M) == 10
    L = transformer_from_metric(M)
    assert_allclose(L.T.dot(L), M)
def create_orthogonal_projection_base(k, s):
    assert k > 1
    B = np.eye(k)
    Q = ortho_group.rvs(k)

    return np.dot(Q,s*B)
Beispiel #39
0
    res[1, 1] = c
    res[1, 2] = -s
    res[2, 1] = s
    res[2, 2] = c
  return res

def Dis(X, Y):
  if X.shape[1] == 3:
    X = X.transpose()
    Y = Y.transpose()
  #shape of X, Y is 3 * J
  R, t = horn87(X, Y)
  return ((np.dot(R, X) + np.dot(t.reshape(3, 1), np.ones((1, Y.shape[1]))) - Y) ** 2).sum()


if __name__ == '__main__':
  S = np.random.randn(3, 10) 
  tt = np.random.randn(3, 1)
  RR = ortho_group.rvs(dim = 3)
  while np.linalg.det(RR) < 0:
    RR = ortho_group.rvs(dim = 3)
  T = np.dot(tt, np.ones((1, 10))) + np.dot(RR, S)

  print 'tt', tt
  print 'RR', RR, np.dot(RR, RR.transpose(1, 0)), np.linalg.det(RR)

  R, t = horn87(S, T)

  print 't', t
  print 'R', R