Ejemplo n.º 1
0
def superMDS(X0, N, d, **kwargs):
    """ Find the set of points from an edge kernel.
    """
    Om = kwargs.get('Om', None)
    dm = kwargs.get('dm', None)
    if Om is not None and dm is not None:
        KE = kwargs.get('KE', None)
        if KE is not None:
            print('superMDS: KE and Om, dm given. Continuing with Om, dm')
        factor, u = eigendecomp(Om, d)
        uhat = u[:, :d]
        lambdahat = np.diag(factor[:d])
        diag_dm = np.diag(dm)
        Vhat = np.dot(diag_dm, np.dot(uhat, lambdahat))
    elif Om is None or dm is None:
        KE = kwargs.get('KE', None)
        if KE is None:
            raise NameError('Either KE or Om and dm have to be given.')
        factor, u = eigendecomp(KE, d)
        lambda_ = np.diag(factor)
        Vhat = np.dot(u, lambda_)[:, :d]

    C_inv = -np.eye(N)
    C_inv[0, 0] = 1.0
    C_inv[:, 0] = 1.0
    b = np.zeros((C_inv.shape[1], d))
    b[0, :] = X0
    b[1:, :] = Vhat[:N - 1, :]
    Xhat = np.dot(C_inv, b)
    return Xhat, Vhat
Ejemplo n.º 2
0
def MDS(D, dim, method='simple', theta=False):
    """ recover points from euclidean distance matrix using classic MDS algorithm. 
    """
    N = D.shape[0]
    if method == 'simple':
        d1 = D[0, :]
        # buf_ = d1 * np.ones([1, N]).T + (np.ones([N, 1]) * d1).T
        buf_ = np.broadcast_to(d1, D.shape) + np.broadcast_to(
            d1[:, np.newaxis], D.shape)
        np.subtract(D, buf_, out=buf_)
        G = buf_  # G = (D - d1 * np.ones([1, N]).T - (np.ones([N, 1]) * d1).T)
    elif method == 'advanced':
        # s1T = np.vstack([np.ones([1, N]), np.zeros([N - 1, N])])
        s1T = np.zeros_like(D)
        s1T[0, :] = 1
        np.subtract(np.identity(N), s1T, out=s1T)
        G = np.dot(np.dot(s1T.T, D), s1T)
    elif method == 'geometric':
        J = np.identity(N) + np.full((N, N), -1.0 / float(N))
        G = np.dot(np.dot(J, D), J)
    else:
        print('Unknown method {} in MDS'.format(method))
    G *= -0.5
    factor, u = eigendecomp(G, dim)
    if (theta):
        return theta_from_eigendecomp(factor, u)
    else:
        return x_from_eigendecomp(factor, u, dim)
Ejemplo n.º 3
0
def RLS_SDR(anchors, W, r, print_out=False):
    """ Range least squares (RLS) using SDR.

    Algorithm cited by A.Beck, P.Stoica in "Approximate and Exact solutions of Source Localization Problems".

    :param anchors: anchor points
    :param r2: squared distances from anchors to point x.

    :return: estimated position of point x.
    """
    from pylocus.basics import low_rank_approximation, eigendecomp
    from pylocus.mds import x_from_eigendecomp

    m = anchors.shape[0]
    d = anchors.shape[1]

    G = cp.Variable(m + 1, m + 1)
    X = cp.Variable(d + 1, d + 1)
    constraints = [
        G[m, m] == 1.0, X[d, d] == 1.0, G >> 0, X >> 0, G == G.T, X == X.T
    ]
    for i in range(m):
        Ci = np.eye(d + 1)
        Ci[:-1, -1] = -anchors[i]
        Ci[-1, :-1] = -anchors[i].T
        Ci[-1, -1] = np.linalg.norm(anchors[i])**2
        constraints.append(G[i, i] == cp.trace(Ci * X))

    obj = cp.Minimize(
        cp.trace(G) - 2 * cp.sum_entries(cp.mul_elemwise(r, G[m, :-1].T)))
    prob = cp.Problem(obj, constraints)

    ## Solution
    total = prob.solve(verbose=True)
    rank_G = np.linalg.matrix_rank(G.value)
    rank_X = np.linalg.matrix_rank(X.value)
    if rank_G > 1:
        u, s, v = np.linalg.svd(G.value, full_matrices=False)
        print('optimal G is not of rank 1!')
        print(s)
    if rank_X > 1:
        u, s, v = np.linalg.svd(X.value, full_matrices=False)
        print('optimal X is not of rank 1!')
        print(s)

    factor, u = eigendecomp(X.value, 1)
    xhat = x_from_eigendecomp(factor, u, 1)
    return xhat