示例#1
0
def new_Q(T, ms, Ps, G):
    """TODO DOC STRING"""

    print("==> Tuning Q")
    SIGMA = np.zeros_like(Ps[0], dtype=np.complex128)
    PHI = np.zeros_like(Ps[0], dtype=np.complex128)
    C = np.zeros_like(Ps[0], dtype=np.complex128)
    shape = (Ps[0].shape[0], 1)

    for k in range(1, T + 1):
        m1 = gains_vector(ms[k]).reshape(shape)
        m2 = gains_vector(ms[k - 1]).reshape(shape)

        SIGMA += Ps[k] + m1 @ m1.conj().T
        PHI += Ps[k - 1] + m2 @ m2.conj().T
        C += Ps[k] @ G[k - 1].conj().T + m1 @ m2.conj().T

    SIGMA *= 1 / T
    PHI *= 1 / T
    C *= 1 / T

    # Diagonal real-values
    Q_diag = np.diag(SIGMA - C - C.conj().T + PHI).real

    # Return new Q
    return np.diag(Q_diag).real
示例#2
0
def numpy_algorithm(m: np.ndarray, P: np.ndarray, Q: np.ndarray):

    # State dimensions
    n_time = m.shape[0]

    # Original State Shape
    gains_shape = m.shape[1:]

    # Smooth State Vectors
    ms = np.zeros_like(m)

    # Smooth Covariance matrices
    Ps = np.zeros_like(P)
    G_values = np.zeros_like(P)

    # Set Initial Smooth Values
    ms[-1] = m[-1]
    Ps[-1] = P[-1]

    # Run Extended Kalman Smoother with
    # Numpy matrices
    head = "==> Extended Kalman Smoother (NUMPY|JIT): "
    for k in range(-2, -(n_time + 1), -1):

        # Progress Bar in object-mode
        with objmode():
            bar_len = 100
            total = n_time - 1
            filled_len = int(round(bar_len * (-k - 1) / float(n_time - 1)))
            bar = u"\u2588" * filled_len + ' '\
                    * (bar_len - filled_len)
            print("\r%s%d%%|%s| %d/%d" %
                  (head, (-k - 1) / total * 100, bar, -k - 1, total),
                  end="")

        # Predict Step
        mp = gains_vector(m[k])
        Pt = P[k].astype(np.complex128)
        Pp = Pt + Q

        # Smooth Step
        Pinv = np.linalg.inv(Pp).astype(np.complex128)
        G = Pt @ Pinv

        # Record Posterior Smooth Values
        e = gains_vector(ms[k + 1]) - mp
        E = (Ps[k + 1] - Pp).astype(np.complex128)
        ms[k] = gains_reshape(mp + G @ e, gains_shape)
        Ps[k] = np.diag(np.diag(Pt + G @ E @ G.T).real)

        G_values[k] = G.real

    # Newline
    print()

    # Return Posterior smooth states and covariances
    return ms, Ps, G_values
示例#3
0
def new_P0(m0, ms0, Ps0, alpha=1.0):
    """TODO DOC STRING"""

    print("==> Tuning P0")
    # Calculate diagonal of P0 - considering only the diagonal
    # and real values
    P0_diag = np.diag(Ps0 + (gains_vector((ms0 - m0))\
        @ gains_vector((ms0 - m0)).conj().T).real)

    # Return full matrix of P0 with alpha component
    return alpha * np.diag(P0_diag)
示例#4
0
def numba_algorithm(m: np.ndarray, P: np.ndarray, Q: np.ndarray):

    # State dimensions
    n_time = m.shape[0]

    # Original State Shape
    gains_shape = m.shape[1:]

    # Smooth State Vectors
    ms = np.zeros_like(m)

    # Smooth Covariance matrices
    Ps = np.zeros_like(P)
    G_values = np.zeros_like(P)

    # Set Initial Smooth Values
    ms[-1] = m[-1]
    Ps[-1] = P[-1]

    # Run Extended Kalman Smoother with
    # Numpy matrices
    head = "==> Extended Kalman Smoother (NUMPY|JIT): "
    for k in range(-2, -(n_time + 1), -1):

        # Progress Bar in object-mode
        with objmode():
            progress_bar(head, n_time, -k - 1)

        # Predict Step
        mp = gains_vector(m[k])
        Pt = P[k].astype(np.complex128)
        Pp = Pt + Q

        # Smooth Step
        Pinv = np.diag(1.0 / np.diag(Pp))
        G = Pt @ Pinv

        # Record Posterior Smooth Values
        e = gains_vector(ms[k + 1]) - mp
        E = (Ps[k + 1] - Pp).astype(np.complex128)
        ms[k] = gains_reshape(mp + G @ e, gains_shape)
        Ps[k] = np.diag(np.diag(Pt + G @ E @ G.T).real)

        G_values[k] = G.real

    # Newline
    print()

    # Return Posterior smooth states and covariances
    return ms, Ps, G_values
示例#5
0
def sparse_algorithm(mp: np.ndarray,
                     Pp: np.ndarray,
                     model: np.ndarray,
                     vis: np.ndarray,
                     weight: np.ndarray,
                     Q: np.ndarray,
                     R: np.ndarray,
                     ant1: np.ndarray,
                     ant2: np.ndarray,
                     tbin_indices: np.ndarray,
                     tbin_counts: np.ndarray,
                     Nsamples: np.int32 = 10):
    """Sparse-matrix implementation of EnKF algorithm. Not
    numba-compiled."""

    # Time counts
    n_time = len(tbin_indices)

    # Number of Baselines
    n_bl = model.shape[0] // n_time

    # Number of Antennas
    n_ant = int((np.sqrt(8 * n_bl + 1) + 1)) // 2

    # Dimensions
    n_chan, n_dir = model.shape[1:]

    # Matrix-size
    axis_length = n_ant * n_chan * n_dir

    # Original matrix size
    gains_shape = (n_time, n_ant, n_chan, n_dir, 2)

    # Jacobian shape
    shape = gains_shape[1:]

    # Covariance shape
    covs_shape = (n_time, 2 * axis_length, 2 * axis_length)

    # State vectors
    m = np.zeros(gains_shape, dtype=np.complex128)

    # Covariance matrices
    P = np.zeros(covs_shape, dtype=np.complex128)

    # Initial state and covariance
    m[0] = gains_reshape(mp, shape)

    C = Pp.real

    # Select CSR jacobian function
    aug_jac = compute_aug_csr

    # Calculate R^{-1} for a diagonal
    Rinv = np.diag(1.0 / np.diag(R))

    # Run Extended Kalman Filter with
    # Sparse matrices
    print("==> Ensemble Kalman Filter (SPARSE):")
    for k in range(1, n_time):

        # Progress Bar
        bar_len = 100
        total = n_time - 1
        filled_len = int(round(bar_len * k / float(n_time - 1)))
        bar = u"\u2588" * filled_len + ' '\
                * (bar_len - filled_len)
        print("\r%d%%|%s| %d/%d" % (k / total * 100, bar, k, total), end="")

        q = process_noise(Q)

        mp = gains_vector(m[k - 1])

        # Ensemble Step
        M = state_ensemble(mp, C) + q

        mi = np.mean(M, axis=1)

        # Slice indices
        start = tbin_indices[k - 1]
        end = start + tbin_counts[k - 1]

        # Calculate Slices
        row_slice = slice(start, end)
        vis_slice = vis[row_slice]
        model_slice = model[row_slice]
        weight_slice = weight[row_slice]
        ant1_slice = ant1[row_slice]
        ant2_slice = ant2[row_slice]
        jones_slice = gains_reshape(mi, shape)

        # Calculate Augmented Jacobian
        J = aug_jac(model_slice, weight_slice, jones_slice, ant1_slice,
                    ant2_slice)

        # Hermitian of Jacobian
        J_herm = J.conjugate().T

        D = J @ C @ J_herm + R
        K = C @ J_herm @ np.linalg.inv(D)

        # Calculate Measure Vector
        y = measure_vector(vis_slice, weight_slice, n_ant, n_chan)

        e = measure_noise(R)

        Y = np.zeros((y.shape[0], Nsamples), dtype=np.complex128)
        Mf = np.zeros_like(M)

        for i in range(Nsamples):
            Y[:, i] = y + e[:, i]
            jones_slice = gains_reshape(M[:, i], shape)

        Mf = M + K @ (Y - J @ M)

        mf = np.mean(Mf, axis=1)
        C = np.diag(np.diag(np.cov(Mf))).real

        m[k] = gains_reshape(mf, shape)

    # Newline
    print()

    # Return Posterior states and covariances
    return m
示例#6
0
def sparse_algorithm(mp: np.ndarray, Pp: np.ndarray, model: np.ndarray,
                     vis: np.ndarray, weight: np.ndarray, Q: np.ndarray,
                     R: np.ndarray, ant1: np.ndarray, ant2: np.ndarray,
                     tbin_indices: np.ndarray, tbin_counts: np.ndarray):
    """Sparse-matrix implementation of EKF algorithm. Not
    numba-compiled."""

    # Time counts
    n_time = len(tbin_indices)

    # Number of Baselines
    n_bl = model.shape[0] // n_time

    # Number of Antennas
    n_ant = int((np.sqrt(8 * n_bl + 1) + 1)) // 2

    # Dimensions
    n_chan, n_dir = model.shape[1:]

    # Matrix-size
    axis_length = n_ant * n_chan * n_dir

    # Original matrix size
    gains_shape = (n_time, n_ant, n_chan, n_dir, 2)

    # Jacobian shape
    shape = gains_shape[1:]

    # Covariance shape
    covs_shape = (n_time, 2 * axis_length, 2 * axis_length)

    # State vectors
    m = np.zeros(gains_shape, dtype=np.complex128)

    # Covariance matrices
    P = np.zeros(covs_shape, dtype=np.complex128)

    # Initial state and covariance
    m[0] = gains_reshape(mp, shape)
    P[0] = Pp

    # Select CSR jacobian function
    aug_jac = compute_aug_csr

    # Calculate R^{-1} for a diagonal
    Rinv = np.diag(1.0 / np.diag(R))

    # Run Extended Kalman Filter with
    # Sparse matrices
    head = "==> Extended Kalman Filter (SPARSE): "
    for k in range(1, n_time):

        # Progress Bar
        bar_len = 100
        total = n_time - 1
        filled_len = int(round(bar_len * k / float(n_time - 1)))
        bar = u"\u2588" * filled_len + ' '\
                * (bar_len - filled_len)
        print("\r%s%d%%|%s| %d/%d" % (head, k / total * 100, bar, k, total),
              end="")

        # Predict Step
        mp = gains_vector(m[k - 1])
        Pp = P[k - 1] + Q

        # Slice indices
        start = tbin_indices[k - 1]
        end = start + tbin_counts[k - 1]

        # Calculate Slices
        row_slice = slice(start, end)
        vis_slice = vis[row_slice]
        model_slice = model[row_slice]
        weight_slice = weight[row_slice]
        ant1_slice = ant1[row_slice]
        ant2_slice = ant2[row_slice]
        jones_slice = m[k - 1]

        # Calculate Augmented Jacobian
        J = aug_jac(model_slice, weight_slice, jones_slice, ant1_slice,
                    ant2_slice)

        # Hermitian of Jacobian
        J_herm = J.conjugate().T

        # Calculate Measure Vector
        y = measure_vector(vis_slice, weight_slice, n_ant, n_chan)

        # Update Step
        v = y - csr_dot_vec(J, mp)
        Pinv = np.diag(1.0 / np.diag(Pp))
        Tinv = np.linalg.inv(Pinv + J_herm @ Rinv @ J)
        Sinv = Rinv - Rinv @ J @ Tinv @ J_herm @ Rinv
        K = Pp @ J_herm @ Sinv

        # Record Posterior values
        m[k] = gains_reshape(mp + K @ v / 2.0, shape)
        P[k] = np.diag(np.diag(Pp - K @ J @ Pp / 2.0).real)

    # Newline
    print()

    # Return Posterior states and covariances
    return m, P
def main():
    # Get configurations from yaml
    yaml_args = parser.yaml_parser('config.yml')
    cms_args = yaml_args['create_ms']
    fms_args = yaml_args['from_ms']

    # Create measurement set
    if path.isdir(cms_args.msname):
        s = input(f"==> `{cms_args.msname}` exists, "\
                + "continue with `create_ms`? (y/n) ")

        if s == 'y':
            create_ms.new(cms_args)
    else:
        create_ms.new(cms_args)

    # Generate jones and data
    if path.isfile(fms_args.out):
        s = input(f"==> `{fms_args.out}` exists, "\
                + "continue with `generate`? (y/n) ")

        if s == 'y':
            from_ms.both(fms_args)
    else:
        from_ms.both(fms_args)

    # Load ms and gains data
    tbin_indices, tbin_counts, ant1, ant2,\
            vis, model, weight, jones = loader.get(fms_args)

    #Get dimension values
    n_time, n_ant, n_chan, n_dir = jones.shape

    sigma_f = 1.0
    sigma_n = 0.1

    ext_kalman_filter = ekf.numpy_algorithm
    ext_kalman_smoother = eks.numpy_algorithm

    np.random.seed(666)
    mp = np.ones((n_ant, n_chan, n_dir, 2), dtype=np.complex128)
    mp = gains_vector(mp)

    Pp = np.eye(mp.size, dtype=np.complex128)
    Q = sigma_f**2 * np.eye(mp.size, dtype=np.complex128)
    R = 2 * sigma_n**2 * np.eye(n_ant * (n_ant - 1) * n_chan,
                                dtype=np.complex128)

    m, P = ext_kalman_filter(mp, Pp, model, vis, weight, Q, R, ant1, ant2,
                             tbin_indices, tbin_counts)

    ms, Ps, G = ext_kalman_smoother(m, P, Q)

    n_states = 2 * n_time * n_ant * n_chan * n_dir
    SF1 = np.mean(state_sigma_test(m, jones, P, 1)) * 100
    SS1 = np.mean(state_sigma_test(ms, jones, Ps, 1)) * 100
    SF2 = np.mean(state_sigma_test(m, jones, P, 2)) * 100
    SS2 = np.mean(state_sigma_test(ms, jones, Ps, 2)) * 100
    SF3 = np.mean(state_sigma_test(m, jones, P, 3)) * 100
    SS3 = np.mean(state_sigma_test(ms, jones, Ps, 3)) * 100

    S1 = ['1', f"{SF1:.2f}%", f"{SS1:.2f}%", n_states]
    S2 = ['2', f"{SF2:.2f}%", f"{SS2:.2f}%", n_states]
    S3 = ['3', f"{SF3:.2f}%", f"{SS3:.2f}%", n_states]
    S = [S1, S2, S3]

    print("==> State Sigma-tests")
    print(
        tabulate(S,
                 headers=['Sigmas', 'Filter', 'Smoother', 'States'],
                 tablefmt='fancy_grid'))

    MF1 = np.mean(magnitude_sigma_test(m, jones, P, 1)) * 100
    MS1 = np.mean(magnitude_sigma_test(ms, jones, Ps, 1)) * 100
    MF2 = np.mean(magnitude_sigma_test(m, jones, P, 2)) * 100
    MS2 = np.mean(magnitude_sigma_test(ms, jones, Ps, 2)) * 100
    MF3 = np.mean(magnitude_sigma_test(m, jones, P, 3)) * 100
    MS3 = np.mean(magnitude_sigma_test(ms, jones, Ps, 3)) * 100

    M1 = ['1', f"{MF1:.2f}%", f"{MS1:.2f}%", n_states]
    M2 = ['2', f"{MF2:.2f}%", f"{MS2:.2f}%", n_states]
    M3 = ['3', f"{MF3:.2f}%", f"{MS3:.2f}%", n_states]
    M = [M1, M2, M3]

    print("==> Magnitude Sigma-tests")
    print(
        tabulate(M,
                 headers=['Sigmas', 'Filter', 'Smoother', 'States'],
                 tablefmt='fancy_grid'))

    show = [1, 2, 3]
    plot_time(jones,
              'True Jones',
              '-',
              m,
              'EKF - NUMPY',
              '+',
              ms,
              'EKS - NUMPY',
              '--',
              title='NUMPY Algorithms',
              show=show)

    plt.show()
示例#8
0
def sparse_algorithm(mp: np.ndarray,
                     Pp: np.ndarray,
                     model: np.ndarray,
                     vis: np.ndarray,
                     weight: np.ndarray,
                     Q: np.ndarray,
                     R: np.ndarray,
                     ant1: np.ndarray,
                     ant2: np.ndarray,
                     tbin_indices: np.ndarray,
                     tbin_counts: np.ndarray,
                     tol: np.float64 = 1e-5,
                     maxiter: np.int32 = 20,
                     alpha: np.float64 = 0.5):
    """Sparse-matrix implementation of Iterated-EKF algorithm. Not
    numba-compiled."""

    # Time counts
    n_time = len(tbin_indices)

    # Number of Baselines
    n_bl = model.shape[0] // n_time

    # Number of Antennas
    n_ant = int((np.sqrt(8 * n_bl + 1) + 1)) // 2

    # Dimensions
    n_chan, n_dir = model.shape[1:]

    # Matrix-size
    axis_length = n_ant * n_chan * n_dir

    # Original matrix size
    gains_shape = (n_time, n_ant, n_chan, n_dir, 2)

    # Jacobian shape
    shape = gains_shape[1:]

    # Covariance shape
    covs_shape = (n_time, 2 * axis_length, 2 * axis_length)

    # State vectors
    m = np.zeros(gains_shape, dtype=np.complex128)

    # Covariance matrices
    P = np.zeros(covs_shape, dtype=np.complex128)

    # Initial state and covariance
    m[0] = gains_reshape(mp, shape)
    P[0] = Pp

    # Select CSR jacobian function
    aug_jac = compute_aug_csr

    # Calculate R^{-1} for a diagonal
    Rinv = np.diag(1.0 / np.diag(R))

    # Run Iterated Extended Kalman Filter with
    # Sparse matrices
    head = "==> Iterated Extended Kalman Filter (SPARSE): "
    for k in range(1, n_time):

        # Progress Bar
        progress_bar(head, n_time, k)

        # Predict Step
        mp = gains_vector(m[k - 1])
        Pp = P[k - 1] + Q

        # Slice indices
        start = tbin_indices[k - 1]
        end = start + tbin_counts[k - 1]

        # Calculate Slices
        row_slice = slice(start, end)
        vis_slice = vis[row_slice]
        model_slice = model[row_slice]
        weight_slice = weight[row_slice]
        ant1_slice = ant1[row_slice]
        ant2_slice = ant2[row_slice]

        # Current Iteration jones
        jones_slice = gains_reshape(mp, shape)

        # Calculate Augmented Jacobian
        J = aug_jac(model_slice, weight_slice, jones_slice, ant1_slice,
                    ant2_slice)

        # Hermitian of Jacobian
        J_herm = J.conjugate().T

        # Calculate Measure Vector
        y = measure_vector(vis_slice, weight_slice, n_ant, n_chan)

        # Initial state
        mi = mp.copy()

        # Current state
        mn = mp

        # Inverse of Prior Covariance
        Pinv = np.diag(1.0 / np.diag(Pp))

        # Iteration counter
        i = 0

        # State Estimation to reduce bias on
        # estimation
        while i < maxiter:
            # Calculate Augmented Jacobian
            J = aug_jac(model_slice, weight_slice, jones_slice, ant1_slice,
                        ant2_slice)

            # Hermitian of Jacobian
            J_herm = J.conjugate().T

            # Update Step
            v = y - csr_dot_vec(J, mi)
            Tinv = np.linalg.inv(Pinv + J_herm @ Rinv @ J)
            Sinv = Rinv - Rinv @ J @ Tinv @ J_herm @ Rinv
            K = Pp @ J_herm @ Sinv

            # Next state update
            mt = mn + alpha * (mi - mn + K @ v)

            # Stop if tolerance reached
            if np.mean(np.abs(mt - mn)) <= tol:
                break

            # Else iterate next step
            i += 1
            mn = mt

            # Next Iteration jones
            jones_slice = gains_reshape(mn, shape)

        # Record Posterior values from last iterated state
        m[k] = gains_reshape(mt, shape)
        P[k] = np.diag(np.diag(Pp - K @ J @ Pp).real)

    # Newline
    print()

    # Return Posterior states and covariances
    return m, P
def main():   
    # Get configurations from yaml
    yaml_args = parser.yaml_parser('config.yml')
    cms_args = yaml_args['create_ms']
    fms_args = yaml_args['from_ms'] 
    
    # If new ms is made, run `from_ms`
    new_ms = False

    # Create measurement set
    if path.isdir(cms_args.msname):
        s = input(f"==> `{cms_args.msname}` exists, "\
                + "continue with `create_ms`? (y/n) ")
        
        if s == 'y':
            create_ms.new(cms_args) 
            new_ms = True
    else:
        create_ms.new(cms_args)  
        new_ms = True
    
    # Generate jones and data
    if path.isfile(fms_args.out) and not new_ms:
        s = input(f"==> `{fms_args.out}` exists, "\
                + "continue with `generate`? (y/n) ")
        
        if s == 'y':
            from_ms.both(fms_args) 
    else:
        from_ms.both(fms_args) 

    # Load ms and gains data
    tbin_indices, tbin_counts, ant1, ant2,\
            vis, model, weight, jones = loader.get(fms_args)    

    #Get dimension values
    n_time, n_ant, n_chan, n_dir = jones.shape    
    
    # Set parameters for process and measurement noise
    sigma_f = 1.0
    sigma_n = 0.5

    # Select extended kalman filter and smoother algorithms
    ext_kalman_filter = ekf.numpy_algorithm
    ext_kalman_smoother = eks.numpy_algorithm

    # Set random seed
    np.random.seed(666)

    # Create prior state vector
    mp = np.ones((n_ant, n_chan, n_dir, 2), dtype=np.complex128)
    mp = gains_vector(mp)  

    # Create prior covariance matrix
    Pp = np.eye(mp.size, dtype=np.complex128)

    # Process noise matrix
    Q = sigma_f**2 * np.eye(mp.size, dtype=np.complex128)

    # Measurement noise matrix
    R = 2 * sigma_n**2 * np.eye(n_ant * (n_ant - 1) * n_chan, 
                                    dtype=np.complex128) 
    
    # Run EKF
    m, P = ext_kalman_filter(mp, Pp, model, vis, weight, Q, R, 
                                ant1, ant2, tbin_indices, tbin_counts)

    # Run EKS - 3 times
    ms, Ps, _ = ext_kalman_smoother(m, P, Q)
    ms, Ps, _ = ext_kalman_smoother(ms[::-1], Ps[::-1], Q)
    ms, Ps, _ = ext_kalman_smoother(ms[::-1], Ps[::-1], Q)

    # Plot results for antennas 1 to 3, with 0 as reference
    print("==> Finished - creating plots")
    plot_time(
        jones, 'True Jones', '-',
        m, 'EKF', '+',
        ms, 'EKS', '--',
        title='KALCAL: NUMPY|JIT Algorithms',
        show=[1, 2, 3]
    )
    
    plt.show()

    # Done
    print("==> Done.")
示例#10
0
def numba_algorithm(
    mp           : np.ndarray, 
    Pp           : np.ndarray, 
    model        : np.ndarray, 
    vis          : np.ndarray, 
    weight       : np.ndarray, 
    Q            : np.ndarray, 
    R            : np.ndarray, 
    ant1         : np.ndarray, 
    ant2         : np.ndarray, 
    tbin_indices : np.ndarray, 
    tbin_counts  : np.ndarray,
    alpha        : np.float64):  

    """Numpy-matrix implementation of EKF algorithm. It is
    numba-compiled."""

    # Time counts
    n_time = len(tbin_indices)

    # Number of Baselines
    n_bl = model.shape[0]//n_time

    # Number of Antennas
    n_ant = int((np.sqrt(8*n_bl + 1) + 1)/2)

    # Dimensions
    n_chan, n_dir = model.shape[1], model.shape[2]

    # Matrix-size
    axis_length = n_ant * n_chan * n_dir

    # Original matrix size    
    gains_shape = (n_time, n_ant, n_chan, n_dir, 2)

    # Jacobian shape
    shape = gains_shape[1:]

    # Covariance shape
    covs_shape = (n_time, 2*axis_length, 2*axis_length)

    # State vectors
    m = np.zeros(gains_shape, dtype=np.complex128)

    # Covariance matrices
    P = np.zeros(covs_shape, dtype=np.complex128)
    
    # Initial state and covariance
    m[0] = gains_reshape(mp, shape)
    P[0] = Pp
    
    # Select CSR jacobian function 
    aug_jac = compute_aug_np
    
    # Calculate R^{-1} for a diagonal
    Rinv = np.diag(1.0/np.diag(R))
    I = np.eye(Rinv.shape[0])
    # Run Extended Kalman Filter with 
    # NUMPY matrices
    head = "==> Extended Kalman Filter (NUMPY|JIT): "
    for k in range(1, n_time): 
        
        # Progress Bar in object-mode
        with objmode():
            progress_bar(head, n_time, k)
        
        # Predict Step
        mp = gains_vector(m[k - 1])
        Pp = (P[k - 1] + Q)
        
        # Slice indices
        start = tbin_indices[k - 1]
        end = start + tbin_counts[k - 1]
        
        # Calculate Slices
        row_slice = slice(start, end)
        vis_slice = vis[row_slice]
        model_slice = model[row_slice]
        weight_slice = weight[row_slice]
        ant1_slice = ant1[row_slice]
        ant2_slice = ant2[row_slice]
        jones_slice = m[k - 1]        

        # Calculate Augmented Jacobian
        J = aug_jac(model_slice, weight_slice, 
                        jones_slice, ant1_slice, ant2_slice)
        
        # Hermitian of Jacobian
        J_herm = J.conjugate().T

        # Calculate Measure Vector
        y = measure_vector(vis_slice, weight_slice, 
                            n_ant, n_chan)        

        ym = measure_vector(model_slice[:, :, 0], weight_slice, 
                            n_ant, n_chan)
        # Update Step

        #! NORMAL IMPLEMENTATION (FULL)
        # v = y - J @ mp
        # Pinv = np.diag(1.0/np.diag(Pp))       
        # Tinv = np.linalg.inv(Pinv + J_herm @ Rinv @ J)
        # Sinv = Rinv - Rinv @ J @ Tinv @ J_herm @ Rinv
        # K = Pp @ J_herm @ Sinv

        # u = np.diag(J_herm @ J)
        # z = J_herm @ v
        # p = np.diag(Pp)
        # pinv = 1.0/p        
        # mt = mp + alpha * p * (1.0 - np.diag(J_herm @ J)/(pinv + np.diag(J_herm @ J))) * (J_herm @ v)
        # pt = p - alpha * p * (1.0 - u/(pinv + np.diag(J_herm @ J))) * np.diag(J_herm @ J) * p        

        # Record Posterior values
        # m[k] = gains_reshape(mt, shape)
        # P[k] = np.diag(pt.real) 

        # mt = mp + alpha * K @ v
        # m[k] = gains_reshape(mp + alpha * K @ v, shape)
        # P[k] = np.diag(np.diag(Pp - alpha * K @ J @ Pp).real)


        # DIAGONALISATION IMPLEMENTATION
        v = y - J @ mp 
        p = np.diag(Pp)
        pinv = 1.0/p
        u = diag_mat_dot_mat(J_herm, J) # Diagonal of JHJ
        
        z = J_herm @ v # JHr
        updt = alpha / (pinv + u)
        est_m = mp + z * updt
        est_P = (1 - alpha) * p + updt
        
        # Record Posterior values
        m[k] = gains_reshape(est_m, shape)
        P[k] = np.diag(est_P.real)

    # Newline
    print()

    # Return Posterior states and covariances
    return m, P
示例#11
0
def true_state_vec(data_slice):
    jones = data_slice[-1]
    return gains_vector(jones)