Ejemplo n.º 1
0
    def __new__(self, M=10, beta=1, phi=0, normalised=True, inverted=False):

        if not inverted:
            if phi == 0:
                wc = np.kaiser(M, beta)  # Window coefficients
                win = wc / sum(wc)  # Normalised window
            else:
                wc = np.kaiser(M, beta)  # Window coefficients
                m = np.arange(0, M)  # Create M indeces from 0 to 1
                a = exp(-1j * 2 * pi * m * phi)  # Steering vector
                ws = dot(wc, a)  # Normalisation factor
                win = a * wc / ws  # Steered and normalised window
        else:
            if phi == 0:
                wc = 1 / np.kaiser(M, beta)  # Window coefficients
                win = wc / sum(wc)  # Normalised window
            else:
                wc = 1 / np.kaiser(M, beta)  # Window coefficients
                m = np.arange(0, M)  # Create M indeces from 0 to 1
                a = exp(-1j * 2 * pi * m * phi)  # Steering vector
                ws = dot(wc, a)  # Normalisation factor
                win = a * wc / ws  # Steered and normalised window

        w = np.Ndarray.__new__(self, win)
        #                               axes=('M',),
        #                               desc = 'Kaiser (beta=%d, phi=%d)'%(beta,phi))
        #                               shape_desc=('M','1'))
        return w
Ejemplo n.º 2
0
def grad_zipped(input, P_0, pi, beta, J, I):
    iu = np.triu_indices(len(P_0), k=1)
    A = np.zeros((len(P_0), len(P_0)))
    eta = np.exp(-beta)
    A[iu] = input
    A = np.maximum(A, A.T)
    output = gradient(P_0, pi, A, eta, J, I)
    return output[iu]
Ejemplo n.º 3
0
def cost_func(P_0, pi, A, beta, J, I):
    AJ = A @ J
    P_f = A / AJ
    P_f[np.isnan(P_f)] = 0
    eta = np.exp(-beta)
    Q = sp.linalg.inv(I - eta * P_f)
    prod = P_f @ Q
    M2 = np.log(prod / P_0)
    M2[P_0 == 0] = 0
    combined = np.einsum('i, ij -> ij', pi, P_0)
    return -np.log(1 - eta) - np.trace(combined.T @ M2)
Ejemplo n.º 4
0
    def __new__(self, M=10, phi=0, normalised=True):

        # Create the window
        if phi == 0:
            win = np.ones((M, ), dtype=None) / M
        else:
            wc = np.ones(M, dtype=complex)  # Window coefficients
            m = np.arange(0, M)  # Create M indeces from 0 to 1
            a = exp(-1j * 2 * pi * m * phi)  # Steering vector
            ws = dot(wc, a)  # Normalisation factor
            win = a * wc / ws  # Steered and normalised window

        w = np.Ndarray.__new__(self, win)
        #                               axes=('M',),
        #                               desc = 'Rectangular (phi=%d)'%phi)
        #                               desc='Rectangular (phi=%d)'%phi,
        #                               shape_desc=('M','1'))
        return w
Ejemplo n.º 5
0
    def __new__(self, M=10, a=0.54, phi=0, normalised=True):

        # Create the window
        if phi == 0:
            wc = a + (1 - a) * np.cos(2 * pi * np.linspace(-0.5, 0.5, M))
            win = wc / sum(wc)  # Normalised window
        else:
            n = np.linspace(-0.5, 0.5, M)
            wc = a + (1 - a) * np.cos(2 * pi * n)  # Window coefficients
            m = np.arange(0, M)  # Create M indeces from 0 to 1
            aa = exp(-1j * 2 * pi * m * phi)  # Steering vector
            ws = dot(wc, aa)  # Normalisation factor
            win = aa * wc / ws  # Steered and normalised window

        w = np.Ndarray.__new__(self, win)
        #                               axes=('M',),
        #                               desc = 'Rectangular (phi=%d)'%phi)
        #                               desc='Rectangular (phi=%d)'%phi,
        #                               shape_desc=('M','1'))
        return w
Ejemplo n.º 6
0
            return # TODO return the appropriate value

        # backwards algorithm
        def beta(t):
            if t==(len(X)-1):
                return np.ones(self.bins)
            return # TODO return the apprpriate value

        res = alpha(t)*beta(t)
        return res/sum(res)

if __name__ == "__main__":
    import matplotlib.pyplot as plt

    # Define size of network
    T = 100 # number of timebinss
    N = 3   # number of candidates
    M = 5   # number of polls

    # Randomly model parameters based on priors
    I = 2*np.ones(N)
    B = 10*np.random.randn(M,N)
    B = np.exp(B)
    W = np.concatenate((.5*np.random.randn(1) + 7,np.random.randn(M)))
    W = np.exp(W)

    model = Model(i=I,b=B,w=W)
    Z, X = model.generate(T)

    plt.plot(range(T),Z)
    plt.show()
Ejemplo n.º 7
0
def learn(A, beta):
    A = normalize(A)
    inverse_argument = np.identity(len(A)) - np.exp(-beta) * A
    inverse = sp.linalg.inv(inverse_argument)
    return normalize((1 - np.exp(-beta)) * (A @ inverse))
Ejemplo n.º 8
0
def get_optimal_directly(A_target, beta):
    I = np.identity(len(A_target))
    inv_argument = I * (1 - np.exp(-beta)) + np.exp(-beta) * A_target
    # print(np.linalg.cond(inv_argument, p='fro'))
    inv = sp.linalg.inv(inv_argument)
    return inv @ A_target
Ejemplo n.º 9
0
def Q_inv(A, beta, depth):
    nu = np.exp(-beta)
    result = 0
    for i in range(depth):
        result += np.linalg.matrix_power(nu * A, i)
    return result