Esempio n. 1
0
def sparse_algorithm(mp: np.ndarray,
                     Pp: np.ndarray,
                     model: np.ndarray,
                     vis: np.ndarray,
                     weight: np.ndarray,
                     Q: np.ndarray,
                     R: np.ndarray,
                     ant1: np.ndarray,
                     ant2: np.ndarray,
                     tbin_indices: np.ndarray,
                     tbin_counts: np.ndarray,
                     Nsamples: np.int32 = 10):
    """Sparse-matrix implementation of EnKF algorithm. Not
    numba-compiled."""

    # Time counts
    n_time = len(tbin_indices)

    # Number of Baselines
    n_bl = model.shape[0] // n_time

    # Number of Antennas
    n_ant = int((np.sqrt(8 * n_bl + 1) + 1)) // 2

    # Dimensions
    n_chan, n_dir = model.shape[1:]

    # Matrix-size
    axis_length = n_ant * n_chan * n_dir

    # Original matrix size
    gains_shape = (n_time, n_ant, n_chan, n_dir, 2)

    # Jacobian shape
    shape = gains_shape[1:]

    # Covariance shape
    covs_shape = (n_time, 2 * axis_length, 2 * axis_length)

    # State vectors
    m = np.zeros(gains_shape, dtype=np.complex128)

    # Covariance matrices
    P = np.zeros(covs_shape, dtype=np.complex128)

    # Initial state and covariance
    m[0] = gains_reshape(mp, shape)

    C = Pp.real

    # Select CSR jacobian function
    aug_jac = compute_aug_csr

    # Calculate R^{-1} for a diagonal
    Rinv = np.diag(1.0 / np.diag(R))

    # Run Extended Kalman Filter with
    # Sparse matrices
    print("==> Ensemble Kalman Filter (SPARSE):")
    for k in range(1, n_time):

        # Progress Bar
        bar_len = 100
        total = n_time - 1
        filled_len = int(round(bar_len * k / float(n_time - 1)))
        bar = u"\u2588" * filled_len + ' '\
                * (bar_len - filled_len)
        print("\r%d%%|%s| %d/%d" % (k / total * 100, bar, k, total), end="")

        q = process_noise(Q)

        mp = gains_vector(m[k - 1])

        # Ensemble Step
        M = state_ensemble(mp, C) + q

        mi = np.mean(M, axis=1)

        # Slice indices
        start = tbin_indices[k - 1]
        end = start + tbin_counts[k - 1]

        # Calculate Slices
        row_slice = slice(start, end)
        vis_slice = vis[row_slice]
        model_slice = model[row_slice]
        weight_slice = weight[row_slice]
        ant1_slice = ant1[row_slice]
        ant2_slice = ant2[row_slice]
        jones_slice = gains_reshape(mi, shape)

        # Calculate Augmented Jacobian
        J = aug_jac(model_slice, weight_slice, jones_slice, ant1_slice,
                    ant2_slice)

        # Hermitian of Jacobian
        J_herm = J.conjugate().T

        D = J @ C @ J_herm + R
        K = C @ J_herm @ np.linalg.inv(D)

        # Calculate Measure Vector
        y = measure_vector(vis_slice, weight_slice, n_ant, n_chan)

        e = measure_noise(R)

        Y = np.zeros((y.shape[0], Nsamples), dtype=np.complex128)
        Mf = np.zeros_like(M)

        for i in range(Nsamples):
            Y[:, i] = y + e[:, i]
            jones_slice = gains_reshape(M[:, i], shape)

        Mf = M + K @ (Y - J @ M)

        mf = np.mean(Mf, axis=1)
        C = np.diag(np.diag(np.cov(Mf))).real

        m[k] = gains_reshape(mf, shape)

    # Newline
    print()

    # Return Posterior states and covariances
    return m
Esempio n. 2
0
def sparse_algorithm(mp: np.ndarray, Pp: np.ndarray, model: np.ndarray,
                     vis: np.ndarray, weight: np.ndarray, Q: np.ndarray,
                     R: np.ndarray, ant1: np.ndarray, ant2: np.ndarray,
                     tbin_indices: np.ndarray, tbin_counts: np.ndarray):
    """Sparse-matrix implementation of EKF algorithm. Not
    numba-compiled."""

    # Time counts
    n_time = len(tbin_indices)

    # Number of Baselines
    n_bl = model.shape[0] // n_time

    # Number of Antennas
    n_ant = int((np.sqrt(8 * n_bl + 1) + 1)) // 2

    # Dimensions
    n_chan, n_dir = model.shape[1:]

    # Matrix-size
    axis_length = n_ant * n_chan * n_dir

    # Original matrix size
    gains_shape = (n_time, n_ant, n_chan, n_dir, 2)

    # Jacobian shape
    shape = gains_shape[1:]

    # Covariance shape
    covs_shape = (n_time, 2 * axis_length, 2 * axis_length)

    # State vectors
    m = np.zeros(gains_shape, dtype=np.complex128)

    # Covariance matrices
    P = np.zeros(covs_shape, dtype=np.complex128)

    # Initial state and covariance
    m[0] = gains_reshape(mp, shape)
    P[0] = Pp

    # Select CSR jacobian function
    aug_jac = compute_aug_csr

    # Calculate R^{-1} for a diagonal
    Rinv = np.diag(1.0 / np.diag(R))

    # Run Extended Kalman Filter with
    # Sparse matrices
    head = "==> Extended Kalman Filter (SPARSE): "
    for k in range(1, n_time):

        # Progress Bar
        bar_len = 100
        total = n_time - 1
        filled_len = int(round(bar_len * k / float(n_time - 1)))
        bar = u"\u2588" * filled_len + ' '\
                * (bar_len - filled_len)
        print("\r%s%d%%|%s| %d/%d" % (head, k / total * 100, bar, k, total),
              end="")

        # Predict Step
        mp = gains_vector(m[k - 1])
        Pp = P[k - 1] + Q

        # Slice indices
        start = tbin_indices[k - 1]
        end = start + tbin_counts[k - 1]

        # Calculate Slices
        row_slice = slice(start, end)
        vis_slice = vis[row_slice]
        model_slice = model[row_slice]
        weight_slice = weight[row_slice]
        ant1_slice = ant1[row_slice]
        ant2_slice = ant2[row_slice]
        jones_slice = m[k - 1]

        # Calculate Augmented Jacobian
        J = aug_jac(model_slice, weight_slice, jones_slice, ant1_slice,
                    ant2_slice)

        # Hermitian of Jacobian
        J_herm = J.conjugate().T

        # Calculate Measure Vector
        y = measure_vector(vis_slice, weight_slice, n_ant, n_chan)

        # Update Step
        v = y - csr_dot_vec(J, mp)
        Pinv = np.diag(1.0 / np.diag(Pp))
        Tinv = np.linalg.inv(Pinv + J_herm @ Rinv @ J)
        Sinv = Rinv - Rinv @ J @ Tinv @ J_herm @ Rinv
        K = Pp @ J_herm @ Sinv

        # Record Posterior values
        m[k] = gains_reshape(mp + K @ v / 2.0, shape)
        P[k] = np.diag(np.diag(Pp - K @ J @ Pp / 2.0).real)

    # Newline
    print()

    # Return Posterior states and covariances
    return m, P
Esempio n. 3
0
def numba_algorithm(
    mp           : np.ndarray, 
    Pp           : np.ndarray, 
    model        : np.ndarray, 
    vis          : np.ndarray, 
    weight       : np.ndarray, 
    Q            : np.ndarray, 
    R            : np.ndarray, 
    ant1         : np.ndarray, 
    ant2         : np.ndarray, 
    tbin_indices : np.ndarray, 
    tbin_counts  : np.ndarray,
    alpha        : np.float64):  

    """Numpy-matrix implementation of EKF algorithm. It is
    numba-compiled."""

    # Time counts
    n_time = len(tbin_indices)

    # Number of Baselines
    n_bl = model.shape[0]//n_time

    # Number of Antennas
    n_ant = int((np.sqrt(8*n_bl + 1) + 1)/2)

    # Dimensions
    n_chan, n_dir = model.shape[1], model.shape[2]

    # Matrix-size
    axis_length = n_ant * n_chan * n_dir

    # Original matrix size    
    gains_shape = (n_time, n_ant, n_chan, n_dir, 2)

    # Jacobian shape
    shape = gains_shape[1:]

    # Covariance shape
    covs_shape = (n_time, 2*axis_length, 2*axis_length)

    # State vectors
    m = np.zeros(gains_shape, dtype=np.complex128)

    # Covariance matrices
    P = np.zeros(covs_shape, dtype=np.complex128)
    
    # Initial state and covariance
    m[0] = gains_reshape(mp, shape)
    P[0] = Pp
    
    # Select CSR jacobian function 
    aug_jac = compute_aug_np
    
    # Calculate R^{-1} for a diagonal
    Rinv = np.diag(1.0/np.diag(R))
    I = np.eye(Rinv.shape[0])
    # Run Extended Kalman Filter with 
    # NUMPY matrices
    head = "==> Extended Kalman Filter (NUMPY|JIT): "
    for k in range(1, n_time): 
        
        # Progress Bar in object-mode
        with objmode():
            progress_bar(head, n_time, k)
        
        # Predict Step
        mp = gains_vector(m[k - 1])
        Pp = (P[k - 1] + Q)
        
        # Slice indices
        start = tbin_indices[k - 1]
        end = start + tbin_counts[k - 1]
        
        # Calculate Slices
        row_slice = slice(start, end)
        vis_slice = vis[row_slice]
        model_slice = model[row_slice]
        weight_slice = weight[row_slice]
        ant1_slice = ant1[row_slice]
        ant2_slice = ant2[row_slice]
        jones_slice = m[k - 1]        

        # Calculate Augmented Jacobian
        J = aug_jac(model_slice, weight_slice, 
                        jones_slice, ant1_slice, ant2_slice)
        
        # Hermitian of Jacobian
        J_herm = J.conjugate().T

        # Calculate Measure Vector
        y = measure_vector(vis_slice, weight_slice, 
                            n_ant, n_chan)        

        ym = measure_vector(model_slice[:, :, 0], weight_slice, 
                            n_ant, n_chan)
        # Update Step

        #! NORMAL IMPLEMENTATION (FULL)
        # v = y - J @ mp
        # Pinv = np.diag(1.0/np.diag(Pp))       
        # Tinv = np.linalg.inv(Pinv + J_herm @ Rinv @ J)
        # Sinv = Rinv - Rinv @ J @ Tinv @ J_herm @ Rinv
        # K = Pp @ J_herm @ Sinv

        # u = np.diag(J_herm @ J)
        # z = J_herm @ v
        # p = np.diag(Pp)
        # pinv = 1.0/p        
        # mt = mp + alpha * p * (1.0 - np.diag(J_herm @ J)/(pinv + np.diag(J_herm @ J))) * (J_herm @ v)
        # pt = p - alpha * p * (1.0 - u/(pinv + np.diag(J_herm @ J))) * np.diag(J_herm @ J) * p        

        # Record Posterior values
        # m[k] = gains_reshape(mt, shape)
        # P[k] = np.diag(pt.real) 

        # mt = mp + alpha * K @ v
        # m[k] = gains_reshape(mp + alpha * K @ v, shape)
        # P[k] = np.diag(np.diag(Pp - alpha * K @ J @ Pp).real)


        # DIAGONALISATION IMPLEMENTATION
        v = y - J @ mp 
        p = np.diag(Pp)
        pinv = 1.0/p
        u = diag_mat_dot_mat(J_herm, J) # Diagonal of JHJ
        
        z = J_herm @ v # JHr
        updt = alpha / (pinv + u)
        est_m = mp + z * updt
        est_P = (1 - alpha) * p + updt
        
        # Record Posterior values
        m[k] = gains_reshape(est_m, shape)
        P[k] = np.diag(est_P.real)

    # Newline
    print()

    # Return Posterior states and covariances
    return m, P
Esempio n. 4
0
def sparse_algorithm(mp: np.ndarray,
                     Pp: np.ndarray,
                     model: np.ndarray,
                     vis: np.ndarray,
                     weight: np.ndarray,
                     Q: np.ndarray,
                     R: np.ndarray,
                     ant1: np.ndarray,
                     ant2: np.ndarray,
                     tbin_indices: np.ndarray,
                     tbin_counts: np.ndarray,
                     tol: np.float64 = 1e-5,
                     maxiter: np.int32 = 20,
                     alpha: np.float64 = 0.5):
    """Sparse-matrix implementation of Iterated-EKF algorithm. Not
    numba-compiled."""

    # Time counts
    n_time = len(tbin_indices)

    # Number of Baselines
    n_bl = model.shape[0] // n_time

    # Number of Antennas
    n_ant = int((np.sqrt(8 * n_bl + 1) + 1)) // 2

    # Dimensions
    n_chan, n_dir = model.shape[1:]

    # Matrix-size
    axis_length = n_ant * n_chan * n_dir

    # Original matrix size
    gains_shape = (n_time, n_ant, n_chan, n_dir, 2)

    # Jacobian shape
    shape = gains_shape[1:]

    # Covariance shape
    covs_shape = (n_time, 2 * axis_length, 2 * axis_length)

    # State vectors
    m = np.zeros(gains_shape, dtype=np.complex128)

    # Covariance matrices
    P = np.zeros(covs_shape, dtype=np.complex128)

    # Initial state and covariance
    m[0] = gains_reshape(mp, shape)
    P[0] = Pp

    # Select CSR jacobian function
    aug_jac = compute_aug_csr

    # Calculate R^{-1} for a diagonal
    Rinv = np.diag(1.0 / np.diag(R))

    # Run Iterated Extended Kalman Filter with
    # Sparse matrices
    head = "==> Iterated Extended Kalman Filter (SPARSE): "
    for k in range(1, n_time):

        # Progress Bar
        progress_bar(head, n_time, k)

        # Predict Step
        mp = gains_vector(m[k - 1])
        Pp = P[k - 1] + Q

        # Slice indices
        start = tbin_indices[k - 1]
        end = start + tbin_counts[k - 1]

        # Calculate Slices
        row_slice = slice(start, end)
        vis_slice = vis[row_slice]
        model_slice = model[row_slice]
        weight_slice = weight[row_slice]
        ant1_slice = ant1[row_slice]
        ant2_slice = ant2[row_slice]

        # Current Iteration jones
        jones_slice = gains_reshape(mp, shape)

        # Calculate Augmented Jacobian
        J = aug_jac(model_slice, weight_slice, jones_slice, ant1_slice,
                    ant2_slice)

        # Hermitian of Jacobian
        J_herm = J.conjugate().T

        # Calculate Measure Vector
        y = measure_vector(vis_slice, weight_slice, n_ant, n_chan)

        # Initial state
        mi = mp.copy()

        # Current state
        mn = mp

        # Inverse of Prior Covariance
        Pinv = np.diag(1.0 / np.diag(Pp))

        # Iteration counter
        i = 0

        # State Estimation to reduce bias on
        # estimation
        while i < maxiter:
            # Calculate Augmented Jacobian
            J = aug_jac(model_slice, weight_slice, jones_slice, ant1_slice,
                        ant2_slice)

            # Hermitian of Jacobian
            J_herm = J.conjugate().T

            # Update Step
            v = y - csr_dot_vec(J, mi)
            Tinv = np.linalg.inv(Pinv + J_herm @ Rinv @ J)
            Sinv = Rinv - Rinv @ J @ Tinv @ J_herm @ Rinv
            K = Pp @ J_herm @ Sinv

            # Next state update
            mt = mn + alpha * (mi - mn + K @ v)

            # Stop if tolerance reached
            if np.mean(np.abs(mt - mn)) <= tol:
                break

            # Else iterate next step
            i += 1
            mn = mt

            # Next Iteration jones
            jones_slice = gains_reshape(mn, shape)

        # Record Posterior values from last iterated state
        m[k] = gains_reshape(mt, shape)
        P[k] = np.diag(np.diag(Pp - K @ J @ Pp).real)

    # Newline
    print()

    # Return Posterior states and covariances
    return m, P
Esempio n. 5
0
def clean_meas_vec(data_slice, n_ant, n_chan):
    clean_vis, _, _, weight, _, _, _ = data_slice
    return measure_vector(clean_vis, weight, n_ant, n_chan)