コード例 #1
0
def solve_gen_eig_prob(A, B, eps=1e-6):
    """
    Solves the generalised eigenvalue problem of the form:
    Aw = \lambda*Bw

    Note: can be validated against `scipy.linalg.eig(A, b=B)`

    Ref:
    'Eigenvalue and Generalized Eigenvalue Problems: Tutorial (2019)'
    Benyamin Ghojogh and Fakhri Karray and Mark Crowley
    arXiv 1903.11240

    """
    Lam_b, Phi_b = np.linalg.eig(B)  # eig decomp of B alone
    Lam_b = np.eye(
        len(Lam_b)) * Lam_b  # convert to diagonal matrix of eig vals

    Lam_b_sq = replace_nan(Lam_b**0.5) + np.eye(len(Lam_b)) * eps
    Phi_b_hat = np.dot(Phi_b, np.linalg.inv(Lam_b_sq))
    A_hat = np.dot(np.dot(Phi_b_hat.transpose(), A), Phi_b_hat)
    Lam_a, Phi_a = np.linalg.eig(A_hat)
    Lam_a = np.eye(len(Lam_a)) * Lam_a

    Lam = Lam_a
    Phi = np.dot(Phi_b_hat, Phi_a)

    return np.diag(Lam), Phi
def predict(new_data):
    """
    Predict label for a single new data instance.
    """

    gc.collect()
    score = np.array([(np.dot(new_data, w) + b)
                      for w, b in zip(weights, bias)])
    return labels[np.argmin(score)]
コード例 #3
0
def solve_eig_qr(A, iterations=30):
    """
    Use the QR iteration algorithm to iteratively solve for the eigenvectors and eigenvalues
    of a matrix A. Note: only guaranteed to recover exactly for symmetric matrices
    with real eigenvalues. May work partially for asymmetric matrices (no complex support yet).

    Returns:
        `lam`: vector of eigenvalues
        `Q_bar`: matrix of eigenvectors (columns)
    """

    Ak = A
    Q_bar = np.eye(len(Ak))

    for _ in range(iterations):
        Qk, Rk = np.linalg.qr(Ak)
        Ak = np.dot(Rk, Qk)
        Q_bar = np.dot(Q_bar, Qk)

    lam = np.diag(Ak)
    return lam, Q_bar
コード例 #4
0
    def cca_eig(X, Y, Cxx=None, eps=1e-6):
        if Cxx is None:
            Cxx = np.dot(X, X.transpose())  # auto correlation matrix
        Cyy = np.dot(Y, Y.transpose())
        Cxy = np.dot(X, Y.transpose())  # cross correlation matrix
        Cyx = np.dot(Y, X.transpose())  # same as Cxy.T

        M1 = np.dot(np.linalg.inv(Cxx + eps), Cxy)  # intermediate result
        M2 = np.dot(np.linalg.inv(Cyy + eps), Cyx)

        lam, _ = solve_eig_qr(np.dot(M1, M2), 20)
        return np.sqrt(lam)
コード例 #5
0
 def compute_corr(self, X_test):
     result = {}
     Cxx = np.dot(
         X_test,
         X_test.transpose())  # precompute data auto correlation matrix
     for f in self.stim_freqs:
         Y = harmonic_reference(f,
                                self.fs,
                                np.max(X_test.shape),
                                Nh=self.Nh,
                                standardise_out=False)
         rho = self.cca_eig(
             X_test, Y,
             Cxx=Cxx)  # canonical variable matrices. Xc = X^T.W_x
         result[f] = rho
     return result
コード例 #6
0
def power_iteration(A, iterations):
    """
    Iterative algo. to find the eigenvector of a matrix A corresponding to the largest
    eigenvalue.

    TODO: Establish some measure or heuristic of min number of iterations required
    """
    # choose random initial vector to reduce risk of choosing one orthogonal to
    # target eigen vector
    b_k = np.array([urandom.random() for i in range(len(A))])

    for _ in range(iterations):
        b_k1 = np.dot(A, b_k)
        b_k1_norm = np.linalg.norm(b_k1)
        # re normalize the vector
        b_k = b_k1 / b_k1_norm

    return b_k1_norm, b_k
def fit():
    """
    Train the model with dataset.
    """

    gc.collect()

    global weights, bias, training_time

    weights = []
    bias = []
    training_time = 0

    start_time = time.monotonic_ns()

    for l in labels:

        pos_labels = np.array([x for x, y in zip(data, target) if y != l])
        neg_labels = np.array([x for x, y in zip(data, target) if y == l])

        avg_pos = np.mean(pos_labels, axis=0) / data_factor
        avg_neg = np.mean(neg_labels, axis=0) / data_factor

        weight = ((avg_pos - avg_neg) / (avg_pos + avg_neg))
        weights.append(weight)

        weighted_scores = np.array([np.dot(d, weight) for d in data])

        weighted_pos_labels = np.array(
            [x for x, y in zip(weighted_scores, target) if y != l])
        weighted_neg_labels = np.array(
            [x for x, y in zip(weighted_scores, target) if y == l])

        pos_score_avg = np.mean(weighted_pos_labels) / data_factor
        neg_score_avg = np.mean(weighted_neg_labels) / data_factor

        bias_label = -(neg_labels.size * pos_score_avg + pos_labels.size *
                       neg_score_avg) / (pos_labels.size + neg_labels.size)

        bias.append(bias_label)

    training_time = (time.monotonic_ns() - start_time) / 1000000
    weights = np.array(weights)
    bias = np.array(bias)
コード例 #8
0
try:
    from ulab import numpy as np
except ImportError:
    import numpy as np


def matrix_is_close(A, B, n):
    # primitive (i.e., independent of other functions) check of closeness of two square matrices
    for i in range(n):
        for j in range(n):
            print(math.isclose(A[i][j], B[i][j], rel_tol=1E-9, abs_tol=1E-9))


a = np.array([1, 2, 3], dtype=np.int16)
b = np.array([4, 5, 6], dtype=np.int16)
ab = np.dot(a.transpose(), b)
print(math.isclose(ab, 32.0, rel_tol=1E-9, abs_tol=1E-9))

a = np.array([1, 2, 3], dtype=np.int16)
b = np.array([4, 5, 6], dtype=np.float)
ab = np.dot(a.transpose(), b)
print(math.isclose(ab, 32.0, rel_tol=1E-9, abs_tol=1E-9))

a = np.array([[1, 2], [3, 4]])
b = np.array([[5, 6], [7, 8]])

c = np.array([[19, 22], [43, 50]])
matrix_is_close(np.dot(a, b), c, 2)

c = np.array([[26, 30], [38, 44]])
matrix_is_close(np.dot(a.transpose(), b), c, 2)
コード例 #9
0
    def pre_emphasis(self, frame):
        # FIXME: Do this with matrix multiplication
        outfr = numpy.empty(len(frame), 'd')
        outfr[0] = frame[0] - self.alpha * self.prior
        for i in range(1,len(frame)):
            outfr[i] = frame[i] - self.alpha * frame[i-1]
        self.prior = frame[-1]
        return outfr

    def frame2logspec(self, frame):
        frame = self.pre_emphasis(frame) * self.win
        fft = numpy.fft.rfft(frame, self.nfft)
        # Square of absolute value
        power = fft.real * fft.real + fft.imag * fft.imag
        return numpy.log(numpy.dot(power, self.filters).clip(1e-5,numpy.inf))

    def frame2s2mfc(self, frame):
        logspec = self.frame2logspec(frame)
        return numpy.dot(logspec, self.s2dct.T) / self.nfilt

def s2dctmat(nfilt,ncep,freqstep):
    """Return the 'legacy' not-quite-DCT matrix used by Sphinx"""
    melcos = numpy.empty((ncep, nfilt), 'double')
    for i in range(0,ncep):
        freq = numpy.pi * float(i) / nfilt
        melcos[i] = numpy.cos(freq * numpy.arange(0.5, float(nfilt)+0.5, 1.0, 'double'))
    melcos[:,0] = melcos[:,0] * 0.5
    return melcos

def logspec2s2mfc(logspec, ncep=13):
コード例 #10
0
                print(math.isclose(ab[i][j], 0.0, rel_tol=1E-9, abs_tol=1E-9))

    a = np.array([[1, 2, 3, 4], [4, 5, 6, 4], [7, 8.6, 9, 4], [3, 4, 5, 6]])
    b = np.linalg.inv(a)
    ab = np.linalg.dot(a, b)
    m, n = ab.shape()
    for i in range(m):
        for j in range(n):
            if i == j:
                print(math.isclose(ab[i][j], 1.0, rel_tol=1E-9, abs_tol=1E-9))
            else:
                print(math.isclose(ab[i][j], 0.0, rel_tol=1E-9, abs_tol=1E-9))
else:
    a = np.array([1, 2, 3], dtype=np.int16)
    b = np.array([4, 5, 6], dtype=np.int16)
    ab = np.dot(a.transpose(), b)
    print(math.isclose(ab, 32.0, rel_tol=1E-9, abs_tol=1E-9))

    a = np.array([1, 2, 3], dtype=np.int16)
    b = np.array([4, 5, 6], dtype=np.float)
    ab = np.dot(a.transpose(), b)
    print(math.isclose(ab, 32.0, rel_tol=1E-9, abs_tol=1E-9))

    a = np.array([[1., 2.], [3., 4.]])
    b = np.linalg.inv(a)
    ab = np.dot(a, b)
    m, n = ab.shape
    for i in range(m):
        for j in range(n):
            if i == j:
                print(math.isclose(ab[i][j], 1.0, rel_tol=1E-9, abs_tol=1E-9))