コード例 #1
0
    def _calculate_gamma(self, posterior_mean_estimation_risk_level, posterior_covariance_estimation_risk_level, max_volatility, num_assets):
        # pylint: disable=invalid-name
        """
        Calculate the gamma values appearing in the robust bayesian allocation objective and risk constraint.

        :param posterior_mean_estimation_risk_level: (float) Denotes the confidence of investor to estimation risk in posterior mean. Lower value corresponds
                                                             to less confidence and a more conservative investor while a higher value will result in a more
                                                             risky portfolio.
        :param posterior_covariance_estimation_risk_level: (float) Denotes the confidence of investor to estimation risk in posterior covariance. Lower value
                                                                   corresponds to less confidence and a more conservative investor while a higher value will
                                                                   result in a more risky portfolio.
        :param max_volatility: (float) The maximum preferred volatility of the final robust portfolio.
        :param num_assets: (int) Number of assets in the portfolio.
        :return: (float, float) gamma mean, gamma covariance
        """

        mean_risk_aversion = chi2.ppf(posterior_mean_estimation_risk_level, num_assets)
        gamma_mean = np.sqrt((mean_risk_aversion / self.posterior_mean_confidence) *
                             (self.posterior_covariance_confidence / (self.posterior_covariance_confidence - 2)))

        covariance_risk_aversion = chi2.ppf(posterior_covariance_estimation_risk_level,
                                            num_assets * (num_assets + 1) / 2)
        gamma_covariance = max_volatility / \
                           (self.posterior_covariance_confidence / (self.posterior_covariance_confidence + num_assets + 1) +
                            np.sqrt(2 * self.posterior_covariance_confidence * self.posterior_covariance_confidence * covariance_risk_aversion /
                                    ((self.posterior_covariance_confidence + num_assets + 1) ** 3)))
        return gamma_mean, gamma_covariance
コード例 #2
0
def compute_data_association(ekf_state, measurements, sigmas, params):
    '''
    Computes measurement data association.

    Given a robot and map state and a set of (range,bearing) measurements,
    this function should compute a good data association, or a mapping from 
    measurements to landmarks.

    Returns an array 'assoc' such that:
        assoc[i] == j if measurement i is determined to be an observation of landmark j,
        assoc[i] == -1 if measurement i is determined to be a new, previously unseen landmark, or,
        assoc[i] == -2 if measurement i is too ambiguous to use and should be discarded.
    '''
    if ekf_state["num_landmarks"] == 0:
        # set association to init new landmarks for all measurements
        return [-1 for m in measurements]

    #0-----associate/match-----[alpha]-----ambiguous-----[beta]-----new_landmark-----
    #pdb.set_trace()
    alpha = chi2.ppf(0.95, df=2)
    beta = chi2.ppf(0.999, df=2)

    if ekf_state["num_landmarks"] == 0:
        # set association to init new landmarks for all measurements
        return [-1 for m in measurements]

    measurements = np.array(measurements)[:, 0:2]
    zhat = np.zeros([ekf_state["num_landmarks"], 2])
    S = np.zeros([ekf_state["num_landmarks"], 2, 2])
    Q = np.diag(
        np.array([
            sigmas['range'] * sigmas['range'],
            sigmas['bearing'] * sigmas['bearing']
        ]))
    for j in range(ekf_state["num_landmarks"]):
        zhat[j], H = laser_measurement_model(ekf_state, j)
        S[j] = np.matmul(np.matmul(H, ekf_state['P']), H.T) + Q.T

    M = alpha * np.ones((measurements.shape[0],
                         ekf_state["num_landmarks"] + measurements.shape[0]))
    for i in range(measurements.shape[0]):
        residuals = measurements[i] - zhat
        for j in range(ekf_state["num_landmarks"]):
            mahalanobis_dist = np.matmul(
                residuals[j], np.matmul(np.linalg.inv(S[j]), residuals[j].T))
            M[i, j] = mahalanobis_dist

    matches = slam_utils.solve_cost_matrix_heuristic(np.copy(M))
    matches.sort()
    assoc = list(range(measurements.shape[0]))
    for k in range(measurements.shape[0]):
        if (matches[k][1] >= ekf_state['num_landmarks']):
            if (np.amin(M[k, 0:ekf_state['num_landmarks']]) >
                    beta):  #new landmark
                assoc[matches[k][0]] = -1
            else:  #ambiguous
                assoc[matches[k][0]] = -2
        else:  #matched
            assoc[matches[k][0]] = matches[k][1]
    return assoc
コード例 #3
0
ファイル: pycwt.py プロジェクト: ElOceanografo/PyCWT
def wave_signif(t, siglevel=0.95, lag1=0.0, test='local', dof=None, scale_range=None):
    fft_theor = red_spectrum(lag1, t.dt / t.period)
    fft_theor *= t.series.var()  # Include time-series variance
    # No smoothing, DOF = dofmin (Sec. 4)
    if test == 'local':
        return fft_theor * chi2.ppf(siglevel, t.wavelet.dofmin) / t.wavelet.dofmin
    # Time-averaged significance
    elif test == 'global':
        # Eqn. 23
        dof = t.wavelet.dofmin * sp.sqrt(1 + ((len(t.series) * t.dt) / 
                                        (t.wavelet.gamma * t.scales))**2)
        dof[dof < t.wavelet.dofmin] = t.wavelet.dofmin # minimum DOF is dofmin
        return fft_theor * chi2.ppf(siglevel, dof) / dof
    elif test == 'scale':
        if not scale_range:
            raise ValueError("Must supply a scale_range for time-averaged \
                            significance testing.")
        if period:
            scale_indices = (transform.period >= min(scale_range)) \
                            & (transform.period <= max(scale_range))
        else:
            scale_indices = (transform.scales >= min(scale_range)) \
                            & (transform.scales <= max(scale_range))  
        scale_indices = sp.arange(len(scale_indices))[scale_indices]
        na = len(t.series)
        savg = scale_avg(t, min(scale_range), max(scale_range))
        smid = t.minscale * 2 ** (0.5 * (min(scale_range) + max(scale_range)) * t.dscale)
        dof = 2 * len(na) * savg[1] / smid \
                * sp.sqrt(1 + (na * t.dscale / t.wavelet.dj0)**2)
        P = savg[1] * (fft_theor[scale_indices] / t.scales[scale_indices]).sum()
コード例 #4
0
def compute_data_association(ekf_state, measurements, sigmas, params):
    '''
    Computes measurement data association.

    Given a robot and map state and a set of (range,bearing) measurements,
    this function should compute a good data association, or a mapping from 
    measurements to landmarks.

    Returns an array 'assoc' such that:
        assoc[i] == j if measurement i is determined to be an observation of landmark j,
        assoc[i] == -1 if measurement i is determined to be a new, previously unseen landmark, or,
        assoc[i] == -2 if measurement i is too ambiguous to use and should be discarded.
    '''

    if ekf_state["num_landmarks"] == 0:
        # set association to init new landmarks for all measurements
        return [-1 for m in measurements]

    ###
    # Implement this function.
    ###
    Q = np.eye(2, 2)
    Q[0][0] = sigmas['range']**2
    Q[1][1] = sigmas['bearing']**2
    xv, yv, phi = ekf_state['x'][:3]
    landmark_state = ekf_state['x'][3:]
    landmarks = np.zeros((2, ekf_state['num_landmarks']))
    landmarks[0:] = landmark_state[::2]
    landmarks[1:] = landmark_state[1::2]
    landmark_rb = np.zeros((2, ekf_state['num_landmarks']))
    landmark_rb[0, :] = np.sqrt((landmarks[0, :] - xv)**2 +
                                (landmarks[1, :] - yv)**2)
    landmark_rb[1, :] = np.arctan2(landmarks[1, :] - yv,
                                   landmarks[0, :] - xv) - phi + np.pi / 2
    M = np.zeros((len(measurements), ekf_state['num_landmarks']))
    # r = measurements - landmark_rb
    # H = np.zeros((2, 3+2*ekf_state['num_landmarks']))
    r = np.zeros((2, ))
    for i in range(len(measurements)):
        for j in range(ekf_state['num_landmarks']):
            zhat, H = laser_measurement_model(ekf_state, j)
            Sinv = np.linalg.inv(
                np.matmul(np.matmul(H, ekf_state['P']), H.T) + Q)
            r[0] = measurements[i][0] - zhat[0]
            r[1] = measurements[i][1] - zhat[1]
            dist = np.matmul(np.matmul(r.T, Sinv), r)
            M[i, j] = dist
    assoc = [-2] * len(measurements)
    padding = np.zeros(
        (len(measurements), len(measurements))) + chi2.ppf(0.95, df=2)
    cost = np.hstack((M, padding))
    result = slam_utils.solve_cost_matrix_heuristic(cost.copy())

    for index, (i, j) in enumerate(result):
        if j < ekf_state['num_landmarks']:
            assoc[i] = j
        elif np.min(M[i, :ekf_state['num_landmarks']]) > chi2.ppf(0.99, df=2):
            assoc[i] = -1
    return assoc
コード例 #5
0
ファイル: main.py プロジェクト: TheAntoshkaBy/SA
def interval_border(variable, alpha):
    length = len(variable)
    high = chi2.ppf(alpha / 2, df=(length - 1))
    low = chi2.ppf(1 - alpha / 2, df=(length - 1))
    variable_displaced_variance = displaced_variance(variable)
    low_border = variable_displaced_variance * (length - 1) / low
    high_border = variable_displaced_variance * (length - 1) / high
    return low_border, high_border
コード例 #6
0
def compute_data_association(ekf_state, measurements, sigmas, params):
    '''
    Computes measurement data association.

    Given a robot and map state and a set of (range,bearing) measurements,
    this function should compute a good data association, or a mapping from 
    measurements to landmarks.

    Returns an array 'assoc' such that:
        assoc[i] == j if measurement i is determined to be an observation of landmark j,
        assoc[i] == -1 if measurement i is determined to be a new, previously unseen landmark, or,
        assoc[i] == -2 if measurement i is too ambiguous to use and should be discarded.
    '''

    if ekf_state["num_landmarks"] == 0:
        # set association to init new landmarks for all measurements
        return [-1 for m in measurements]

    ###
    # Implement this function.
    ###
    trees = measurements

    append_M = np.ones(
        (len(measurements), len(measurements))) * chi2.ppf(0.95, df=2)

    measurements = [(tree[0], tree[1]) for tree in trees]
    assoc = -1 * np.ones((len(measurements), ))
    Q = np.eye(2, 2)
    Q[0][0] = sigmas['range'] * sigmas['range']
    Q[1][1] = sigmas['bearing'] * sigmas['bearing']
    M = np.zeros((len(measurements), ekf_state['num_landmarks']))

    for i in range(len(measurements)):
        m = np.asarray(measurements[i])
        for j in range(ekf_state['num_landmarks']):
            z_hat, H = laser_measurement_model(ekf_state, j)

            each_r = (m - z_hat).T
            S = np.matmul(np.matmul(H, ekf_state['P']), H.T) + Q.T
            mahal = np.matmul(np.matmul(each_r.T, np.linalg.inv(S)), each_r)
            M[i][j] = mahal

    M = np.hstack((M, append_M))
    result = slam_utils.solve_cost_matrix_heuristic(M.copy())

    assoc = [-2] * len(measurements)
    for (i, j) in result:
        if j < ekf_state['num_landmarks']:
            assoc[i] = j
        else:
            if np.min(M[i, :ekf_state['num_landmarks']]) > chi2.ppf(0.99,
                                                                    df=2):
                assoc[i] = -1

    return assoc
コード例 #7
0
ファイル: slam.py プロジェクト: adarshmodh/EKF_SLAM
def compute_data_association(ekf_state, measurements, sigmas, params):
    '''
    Computes measurement data association.

    Given a robot and map state and a set of (range,bearing) measurements,
    this function should compute a good data association, or a mapping from 
    measurements to landmarks.

    Returns an array 'assoc' such that:
        assoc[i] == j if measurement i is determined to be an observation of landmark j,
        assoc[i] == -1 if measurement i is determined to be a new, previously unseen landmark, or,
        assoc[i] == -2 if measurement i is too ambiguous to use and should be discarded.
    '''

    if ekf_state["num_landmarks"] == 0:
        # set association to init new landmarks for all measurements
        return [-1 for m in measurements]

    ###
    # Implement this function.
    ###
    # print(ekf_state["num_landmarks"])

    P = ekf_state['P']
    R = np.diag([sigmas['range']**2, sigmas['bearing']**2])

    A = np.full((len(measurements), len(measurements)), chi2.ppf(0.96, df=2))
    cost_mat = np.full((len(measurements), ekf_state['num_landmarks']),
                       chi2.ppf(0.96, df=2))

    for k in range(0, len(measurements)):
        for j in range(0, ekf_state['num_landmarks']):
            z_hat, H = laser_measurement_model(ekf_state, j)
            # print(measurements[k][0:2])
            r = np.array(np.array(measurements[k][0:2]) - np.array(z_hat))
            S_inv = np.linalg.inv(
                np.matmul(np.matmul(H, P), np.transpose(H)) + R)
            MD = np.matmul(np.matmul(np.transpose(r), S_inv), r)
            cost_mat[k, j] = MD

    cost_mat_conc = np.concatenate((cost_mat, A), axis=1)
    temp1 = np.copy(cost_mat)
    results = slam_utils.solve_cost_matrix_heuristic(temp1)

    assoc = np.zeros(len(measurements), dtype=np.int32)
    for k in range(0, len(results)):
        # print(cost_mat[results[k][0],results[k][1]])
        if cost_mat_conc[results[k][0], results[k][1]] > chi2.ppf(0.99, df=2):
            assoc[results[k][0]] = -1
        elif cost_mat_conc[results[k][0], results[k][1]] >= chi2.ppf(0.95,
                                                                     df=2):
            assoc[results[k][0]] = -2
        else:
            assoc[results[k][0]] = results[k][1]

    return assoc
コード例 #8
0
ファイル: slam.py プロジェクト: YashTrikannad/EKF_SLAM
def compute_data_association(ekf_state, measurements, sigmas, params):
    '''
    Computes measurement data association.

    Given a robot and map state and a set of (range,bearing) measurements,
    this function should compute a good data association, or a mapping from 
    measurements to landmarks.

    Returns an array 'assoc' such that:
        assoc[i] == j if measurement i is determined to be an observation of landmark j,
        assoc[i] == -1 if measurement i is determined to be a new, previously unseen landmark, or,
        assoc[i] == -2 if measurement i is too ambiguous to use and should be discarded.
    '''

    if ekf_state["num_landmarks"] == 0:
        # set association to init new landmarks for all measurements
        # assoc = np.zeros((len(measurements),1))
        # assoc = np.asarray([-1])
        return [-1 for m in measurements]

    ###
    # Implement this function.
    ###

    Qt = np.diag([sigmas['range']**2, sigmas['bearing']**2])
    zk = np.asarray(measurements)

    n_landmarks = ekf_state['num_landmarks']
    n_measurements = zk.shape[0]
    M = np.zeros((n_measurements, n_landmarks))

    # Thresholds for classifying as New or Ambiguous Landmarks
    alpha = chi2.ppf(0.95, 2)
    beta = chi2.ppf(0.99, 2)

    for k in range(n_landmarks):
        zhat, H = laser_measurement_model(ekf_state, k)
        S = np.matmul(H, np.matmul(ekf_state['P'], H.T)) + Qt
        Sinv = slam_utils.invert_2x2_matrix(S)
        innovation = zk[:, :2] - zhat.T
        M[:, k] = np.sum(innovation.T * np.matmul(Sinv, innovation.T), axis=0)

    # Augmented Matrix with Cost Matrix
    pairs = slam_utils.solve_cost_matrix_heuristic(
        np.hstack((M, alpha * np.ones((n_measurements, n_measurements)))))
    pairs.sort()
    pairs = np.asarray(pairs)
    assoc = pairs[:, 1]
    assoc = np.where(assoc >= n_landmarks, -1, assoc)

    for i in range(assoc.shape[0]):
        if assoc[i] == -1 and np.any(M[i, :] < beta):
            assoc[i] = -2

    return assoc
コード例 #9
0
def main():
    gen_data = gen2D(1, 1, 1e-2,
                     1e-2)  # init real values, measured values etc.
    index = 5  # For saving file purposes
    extKF_T = ExtendedKF(0.5, gen_data.z, gen_data.Q1, gen_data.Q2,
                         gen_data.R1, gen_data.R2)  # T value for initialising
    extKF_T.get_x_hat_0(gen_data.z)
    extKF_T.KalmanFiltering()

    x_hat_c = np.delete(np.asarray(extKF_T.x_hat), (0), axis=0)
    x_hat_minus_c = np.delete(np.asarray(extKF_T.x_hat_minus), (0), axis=0)
    C_c = np.delete(np.asarray(extKF_T.matC), (0), axis=0)

    nees = NEES(gen_data.x.reshape(1507, 4, 1), x_hat_c, extKF_T.P_corr_list)
    nis = NIS(gen_data.z.reshape(1507, 2, 1), x_hat_minus_c,
              np.asarray(extKF_T.s_k), C_c)
    max_nees = chi2.ppf(0.95, df=4)
    max_nis = chi2.ppf(0.95, df=2)

    test = np.asarray(extKF_T.x_hat[:-1])

    plt.plot()
    plt.plot(test[:, 0], test[:, 2], label="Filter est.")
    plt.plot(gen_data.x[:, 0], gen_data.x[:, 2], label="True pos.")
    plt.title("Position Map for Q:[%0.3f %0.3f] R:[%0.3f %0.3f]" %
              (gen_data.Q1, gen_data.Q2, gen_data.R1, gen_data.R2))
    plt.legend(loc='upper right')
    plt.xlabel('x-axis [m]')
    plt.ylabel('y-axis [m]')
    plt.savefig('%i Position.png' % (index), dpi=300)
    plt.show()

    plt.subplot(211)
    plt.plot(nees, linestyle='-', marker='x')
    plt.axhline(max_nees, linestyle='--', color='r', label='5% tail point')
    plt.xlabel('Iteration')
    plt.ylabel('NEES')
    plt.title("NEES Values for Q:[%0.3f %0.3f] R:[%0.3f %0.3f]" %
              (gen_data.Q1, gen_data.Q2, gen_data.R1, gen_data.R2))
    plt.legend(loc='upper right')

    plt.subplot(212)
    plt.plot(nis, linestyle='-', marker='x')
    plt.axhline(max_nis, linestyle='--', color='r', label='5% tail point')
    plt.xlabel('Iteration')
    plt.ylabel('NIS')
    plt.title("NIS Values for Q:[%0.3f %0.3f] R:[%0.3f %0.3f]" %
              (gen_data.Q1, gen_data.Q2, gen_data.R1, gen_data.R2))
    plt.legend(loc='upper right')
    plt.tight_layout()
    plt.savefig('%i NEES-NIS.png' % (index), dpi=300)
    plt.show()
コード例 #10
0
ファイル: searchSpace.py プロジェクト: woxin5295/GCTS
    def _extremePoints(self):
        WRMS = []
        WNA = []
        FNA = []
        pyGCTSpath = "{}".format(os.environ['pyGCTS'])
        for file in glob.iglob(pyGCTSpath + '/metaData/results*',
                               recursive=True):
            with open(file, 'r') as ff:
                for line in ff.readlines():
                    WRMS.append(float(line.split()[1]))
                    WNA.append(float(line.split()[2]))
                    FNA.append(float(line.split()[3]))

        ff.close()
        WRMSnew = (np.asarray(WRMS)).reshape(len(WRMS), 1)
        WNAnew = (np.asarray(WNA)).reshape(len(WNA), 1)
        FNAnew = (np.asarray(FNA)).reshape(len(FNA), 1)

        alphaNew = 1 - np.sqrt(1 - self.alpha)
        pLower_wna = qr(WRMSnew, WNAnew, (1 - alphaNew / 2))
        pUpper_wna = qr(WRMSnew, WNAnew, (alphaNew / 2))

        pLower_fna = qr(WRMSnew, FNAnew, (1 - alphaNew / 2))
        pUpper_fna = qr(WRMSnew, FNAnew, (alphaNew / 2))

        funWRMS = lambda wrms, alp, dof: np.array([
            np.sqrt((dof * wrms**2) / chi2.ppf(1 - alp / 2, dof)),
            np.sqrt((dof * wrms**2) / chi2.ppf(alp / 2, dof))
        ])
        WRMSlims = funWRMS(self.WRMS, alphaNew, self.dof)

        funWNA = lambda horlims, pUpwna, pLowna: np.array([
            pUpwna[0] * horlims[0] + pUpwna[1], pUpwna[0] * horlims[1] +
            pUpwna[1], pLowna[0] * horlims[1] + pLowna[1], pLowna[0] * horlims[
                0] + pLowna[1]
        ])
        extWNAs = funWNA(WRMSlims, pUpper_wna, pLower_wna)

        funFNA = lambda horlims, pUpfna, pLofna: np.array([
            pUpfna[0] * horlims[0] + pUpfna[1], pUpfna[0] * horlims[1] +
            pUpfna[1], pLofna[0] * horlims[1] + pLofna[1], pLofna[0] * horlims[
                0] + pLofna[1]
        ])
        extFNAs = funFNA(WRMSlims, pUpper_fna, pLower_fna)
        #np.savetxt('6-extWNAs.dat', extWNAs, fmt="%10.4f")
        #np.savetxt('7-extFNAs.dat', extFNAs, fmt="%10.4f")
        #np.savetxt('8-WRMSlims.dat', WRMSlims, fmt="%10.4f")
        #np.savetxt('11-wna_lo.dat', pLower_wna, fmt="%10.4f")
        #np.savetxt('12-wna_up.dat', pUpper_wna, fmt="%10.4f")
        #np.savetxt('13-fna_lo.dat', pLower_fna, fmt="%10.4f")
        #np.savetxt('14-fna_up.dat', pUpper_fna, fmt="%10.4f")
        return extWNAs, extFNAs, WRMSlims
コード例 #11
0
def compute_data_association(ekf_state, measurements, sigmas, params):
    '''
    Computes measurement data association.

    Given a robot and map state and a set of (range,bearing) measurements,
    this function should compute a good data association, or a mapping from 
    measurements to landmarks.

    Returns an array 'assoc' such that:
        assoc[i] == j if measurement i is determined to be an observation of landmark j,
        assoc[i] == -1 if measurement i is determined to be a new, previously unseen landmark, or,
        assoc[i] == -2 if measurement i is too ambiguous to use and should be discarded.
    '''

    if ekf_state["num_landmarks"] == 0:
        # set association to init new landmarks for all measurements
        return [-1 for m in measurements]

    ###
    P = ekf_state["P"].copy()
    n = ekf_state['num_landmarks']

    m = len(measurements)
    cost_matrix = np.zeros([m, n + 1])
    cost_matrix[:, n] = chi2.ppf(0.99, 2)

    R = np.diag([sigmas['range']**2, sigmas['bearing']**2])

    for i in range(n):
        zhat, H = laser_measurement_model(ekf_state, i)

        S = np.dot(np.dot(H, P), H.transpose()) + R
        inv_S = slam_utils.invert_2x2_matrix(S)
        for j in range(m):
            update = np.asarray(measurements[j][0:2]) - zhat.flatten()
            cost_matrix[j, i] = np.dot(np.dot(update.transpose(), inv_S),
                                       update)

    result = slam_utils.solve_cost_matrix_heuristic(cost_matrix)

    assoc = [0] * m
    for i, j in result:
        if j < ekf_state["num_landmarks"]:
            assoc[i] = j
        else:
            if min(cost_matrix[i, 0:]) < chi2.ppf(0.99, 2):
                assoc[i] = -2
            else:
                assoc[i] = -1

    return assoc
コード例 #12
0
def compute_data_association(ekf_state, measurements, sigmas, params):
    '''
    Computes measurement data association.

    Given a robot and map state and a set of (range,bearing) measurements,
    this function should compute a good data association, or a mapping from
    measurements to landmarks.

    Returns an array 'assoc' such that:
        assoc[i] == j if measurement i is determined to be an observation of landmark j,
        assoc[i] == -1 if measurement i is determined to be a new, previously unseen landmark, or,
        assoc[i] == -2 if measurement i is too ambiguous to use and should be discarded.
    '''

    if ekf_state["num_landmarks"] == 0:
        return [-1 for m in measurements]

    n_lmark = ekf_state['num_landmarks']
    n_scans = len(measurements)
    M = np.zeros((n_scans, n_lmark))
    Q_t = np.array([[sigmas['range']**2, 0], [0, sigmas['bearing']**2]])

    alpha = chi2.ppf(0.95, 2)
    beta = chi2.ppf(0.99, 2)
    A = alpha*np.ones((n_scans, n_scans))

    for i in range(n_lmark):
        zhat, H = laser_measurement_model(ekf_state, i)
        S = np.matmul(H, np.matmul(ekf_state['P'],H.T)) + Q_t
        Sinv = slam_utils.invert_2x2_matrix(S)
        for j in range(n_scans):
            temp_z = measurements[j][:2]
            res = temp_z - np.squeeze(zhat)
            M[j, i] = np.matmul(res.T, np.matmul(Sinv, res))

    M_new = np.hstack((M, A))
    pairs = slam_utils.solve_cost_matrix_heuristic(M_new)
    pairs.sort()

    pairs = list(map(lambda x:(x[0],-1) if x[1]>=n_lmark else (x[0],x[1]),pairs))
    assoc = list(map(lambda x:x[1],pairs))

    for i in range(len(assoc)):
        if assoc[i] == -1:
            for j in range(M.shape[1]):
                if M[i, j] < beta:
                    assoc[i] = -2
                    break

    return assoc
コード例 #13
0
ファイル: slam.py プロジェクト: adarshmodh/EKF_SLAM
def gps_update(gps, ekf_state, sigmas):
    '''
    Perform a measurement update of the EKF state given a GPS measurement (x,y).

    Returns the updated ekf_state.
    '''

    ###
    # Implement the GPS update.
    ###

    r = np.array(gps - ekf_state['x'][0:2])

    P = ekf_state['P']
    R = np.diag([sigmas['gps']**2, sigmas['gps']**2])
    H = np.zeros((2, ekf_state['x'].size))
    H[0, 0] = 1
    H[1, 1] = 1

    S_inv = np.linalg.inv(P[0:2, 0:2] + R)

    MD = np.matmul(np.matmul(np.transpose(r), S_inv), r)

    if MD < chi2.ppf(0.999, df=2):
        K = np.matmul(np.matmul(P, np.transpose(H)), S_inv)
        ekf_state['x'] = ekf_state['x'] + np.matmul(K, r)
        ekf_state['x'][2] = slam_utils.clamp_angle(ekf_state['x'][2])
        temp1 = np.identity(ekf_state['x'].size) - np.matmul(K, H)
        ekf_state['P'] = slam_utils.make_symmetric(np.matmul(temp1, P))
    # else:
    #     print("no gps")

    return ekf_state
コード例 #14
0
def gps_update(gps, ekf_state, sigmas):
    '''
    Perform a measurement update of the EKF state given a GPS measurement (x,y).

    Returns the updated ekf_state.
    '''

    ##
    #Implement the GPS update.
    ##
    H = np.zeros((2, 3 + 2 * ekf_state['num_landmarks']))
    H[0, 0] = 1
    H[1, 1] = 1
    Q = np.eye(2, 2) * (sigmas['gps']**2)
    Sigma = ekf_state['P']
    r = gps - ekf_state['x'][:2]
    Sinv = np.linalg.inv(np.matmul(np.matmul(H, Sigma), H.T) + Q.T)
    d = np.matmul(np.matmul(r.T, Sinv), r)
    if d > chi2.ppf(0.999, df=2):
        return ekf_state
    K = np.matmul(np.matmul(Sigma, H.T), Sinv)
    ekf_state['x'] = ekf_state['x'] + np.matmul(K, r)
    ekf_state['x'][2] = slam_utils.clamp_angle(ekf_state['x'][2])
    Sigma = np.matmul(
        (np.eye(3 + 2 * ekf_state['num_landmarks'],
                3 + 2 * ekf_state['num_landmarks']) - np.matmul(K, H)), Sigma)
    ekf_state['P'] = slam_utils.make_symmetric(Sigma)
    # print(ekf_state)
    return ekf_state
コード例 #15
0
def test_association_dependent_tracks(track1_mean,
                                      track1_cov,
                                      track2_mean,
                                      track2_cov,
                                      cross_cov_ij,
                                      cross_cov_ji,
                                      alpha=0.05):
    """
    checks whether the tracks are from the same target, when the dependence is accounted for.
    :param track1: track to check for association
    :param track2: track to check for association
    :param cross_cov_ij: cross-covariance of the estimation errors. See article
    :param cross_cov_ji:
    :param alpha: desired test power
    :return: true if the tracks are from the same target, false else
    """
    delta_estimates = track1_mean - track2_mean
    error_delta_estimates = delta_estimates  # as the difference of the true states is 0 if it is the same target
    error_delta_estimates_covar = track1_cov + track2_cov - cross_cov_ij - cross_cov_ji

    d = (error_delta_estimates.transpose()
         @ np.linalg.inv(error_delta_estimates_covar)
         @ error_delta_estimates)[0]

    # 4 degrees of freedom as we have 4 dimensions in the state vector
    d_alpha = chi2.ppf((1 - alpha), df=4)

    # Accept H0 if d <= d_alpha
    return d <= d_alpha
コード例 #16
0
def demonstrated_reliability_sr(val, start, end, beta=1.21, CL=0.9, T=30000, ft=pd.DataFrame, size=10):
    # failures array
    f_arr = np.zeros(size)
    # time points array
    t_arr = np.linspace(start, end, size)

    # populate the number of failures vs time array
    # based on failures Dataframe information
    if not(ft.empty):
        for row in ft.values:
            fl = np.where(t_arr > row[0].timestamp(), row[1], 0)
            f_arr += fl

    # initialize demonstrated Reliability vs. time array
    dr_arr = []

    # exeecute the algorithm
    m = np.array([e.Cylinders for e in val.engines])
    for i, t in enumerate(t_arr):
        tt = np.array([e.oph(t) for e in val.engines])
        tt_max = max(tt)
        if tt_max > 0.0:  # avoid division by zero
            # sum all part's per lipson equality to max hours at time t
            n_lip = sum(m*(tt/tt_max) ** beta)
            # use Lipson equality again to calc n@T hours
            n_lip_T = n_lip * ((tt_max/T) ** beta)
            # calc demonstrated Reliability per Chi.square dist (see A.Kleyner Paper)
            dr = np.exp(-chi2.ppf(CL, 2*(f_arr[i]+1))/(2*n_lip_T)) * 100.0
            # store in list for numpy vector
            dr_arr.append(dr)
        else:
            dr_arr.append(0.0)
    return (t_arr, np.array(dr_arr), f_arr)
コード例 #17
0
 def __init__(self,
              dt,
              sensor_para,
              P_G=0.9,
              P_D=0.9,
              ConfirmationThreshold=[6, 8],
              DeletionThreshold=[7, 10],
              isSimulation=True):
     self.gating_size = chi2.ppf(P_G, df=2)
     self.P_D = P_D  # probability of detection
     # Define gating size, for elliposidal gating, we need parameter P_G
     self.P_G = P_G
     self.dt = dt
     self.F = np.matrix([[1, 0, self.dt, 0], [0, 1, 0, self.dt],
                         [0, 0, 1, 0], [0, 0, 0, 1]])
     self.H = np.matrix([[1, 0, 0, 0], [0, 1, 0, 0]])
     self.Q = 1 * np.diag([0.1, 0.1, 0.5, 0.5])
     self.P = self.Q
     self.R = 1 * np.diag([0.1, 0.1])
     self.common_P = 5 * np.linalg.det(self.R)
     if DeletionThreshold[1] < ConfirmationThreshold[1]:
         print(
             "Deletion threshold period should be larger than confirmation threshold period"
         )
     self.ConfirmationThreshold = ConfirmationThreshold
     self.DeletionThreshold = DeletionThreshold
     self.isSimulation = isSimulation
     self.track_list = []
     self.track_list_next_index = []
     # self.t = time.time()         ## time synchronize will be important
     self.sensor_para = sensor_para
コード例 #18
0
def ellipsoid(E,
              centre=(0, 0, 0),
              scale=1,
              confidence=None,
              npoints=40,
              inverted=False):

    if E.shape != (3, 3):
        raise ValueError('ellipsoid is defined by a 3x3 matrix')

    if confidence:
        # process the probability
        from scipy.stats.distributions import chi2
        s = math.sqrt(chi2.ppf(s, df=2)) * scale
    else:
        s = scale

    if not inverted:
        E = np.linalg.inv(E)

    x, y, z = sphere()  # unit sphere
    e = s * sp.linalg.sqrtm(E) @ np.array(
        [x.flatten(), y.flatten(), z.flatten()]) + np.c_[centre].T
    return e[0, :].reshape(x.shape), e[1, :].reshape(x.shape), e[2, :].reshape(
        x.shape)
コード例 #19
0
 def gating(self, MHD):
     # check if measurement lies inside gate
     limit = chi2.ppf(0.95, df=2)
     if MHD < limit:
         return True
     else:
         return False
コード例 #20
0
def gps_update(gps, ekf_state, sigmas):
    '''
    Perform a measurement update of the EKF state given a GPS measurement (x,y).

    Returns the updated ekf_state.
    '''

    ###
    # Implement the GPS update.
    ###
    P = ekf_state['P']
    dim  = P.shape[0]-2
    H = np.hstack((np.eye(2),np.zeros((2,dim))))
    r = np.transpose([gps - ekf_state['x'][:2]])
    Q = (sigmas['gps']**2)*(np.eye(2))
    S = np.matmul(np.matmul(H,P),H.T) + Q
    S_inv = slam_utils.invert_2x2_matrix(S)
    d = np.matmul(np.matmul(r.T,S_inv),r)
    if d <= chi2.ppf(0.999, 2):
        K = np.matmul(np.matmul(P,H.T),S_inv)
        ekf_state['x'] = ekf_state['x'] + np.squeeze(np.matmul(K,r))
        ekf_state['x'][2] = slam_utils.clamp_angle(ekf_state['x'][2])
        P_temp = np.matmul((np.eye(P.shape[0])- np.matmul(K,H)),P)
        ekf_state['P'] = slam_utils.make_symmetric(P_temp)

    return ekf_state
コード例 #21
0
ファイル: slam.py プロジェクト: YashTrikannad/EKF_SLAM
def gps_update(gps, ekf_state, sigmas):
    '''
    Perform a measurement update of the EKF state given a GPS measurement (x,y).

    Returns the updated ekf_state.
    '''

    ###
    # Implement the GPS update.
    ###

    P = ekf_state['P']
    residual = np.transpose([gps - ekf_state['x'][:2]])

    H_mat = np.matrix(np.zeros([2, P.shape[0]]))
    H_mat[0, 0], H_mat[1, 1] = 1, 1
    R_mat = np.diag([sigmas['gps']**2, sigmas['gps']**2])

    S_mat = H_mat * P * H_mat.T + R_mat
    d = (np.matrix(residual)).T * np.matrix(
        slam_utils.invert_2x2_matrix(np.array(S_mat))) * np.matrix(residual)

    if d <= chi2.ppf(0.999, 2):
        Kt = P * H_mat.T * np.matrix(
            slam_utils.invert_2x2_matrix(np.array(S_mat)))
        ekf_state['x'] = ekf_state['x'] + np.squeeze(
            np.array(Kt * np.matrix(residual)))
        ekf_state['x'][2] = slam_utils.clamp_angle(ekf_state['x'][2])
        ekf_state['P'] = slam_utils.make_symmetric(
            np.array((np.matrix(np.eye(P.shape[0])) - Kt * H_mat) * P))

    return ekf_state
コード例 #22
0
 def gating(self, MHD, sensor): 
     ############
     # TODO Step 3: return True if measurement lies inside gate, otherwise False
     ############
     
     p = 0.9995 #0.999 #params.gating_threshold #0.999 #0.95
     limit = chi2.ppf(p, df = 2)
     return MHD < limit
コード例 #23
0
ファイル: sample.py プロジェクト: hanifsudira/TA
 def find_connection(bucket, test_statistic):
     test_statistic = calculate_test_statistic(bucket)
     pValThreshold = chi2.ppf(.01, subSize ** 2)
     too_similar = test_statistic < pValThreshold
     # blocks are "connected" if they occur by chance < 1% of the time and
     # do not overlap.
     connection = np.any(np.logical_and(overlap, np.logical_not(too_similar)))
     return connection
コード例 #24
0
 def _threshold(self, data: np.ndarray, deltas: np.ndarray,
                precision: Optional[float]) -> Optional[float]:
     if self.confidence is None:
         return precision
     if callable(self.confidence):
         return self.confidence(data, deltas, precision)  # pylint: disable=not-callable
     return (precision *
             chi2.ppf(1. - cast(float, self.confidence), self.window - 1) /
             self.window)
コード例 #25
0
def compute_var_interval_border(random_variable, alpha):
    length = len(random_variable)
    quantile_high = compute_chi2(random_variable, alpha / 2)
    quantile_low = chi2.ppf(1 - alpha / 2, df=(length - 1))
    variance = unbiased_sample_variance(random_variable)

    low_border = variance * (length - 1) / quantile_low
    high_border = variance * (length - 1) / quantile_high

    return low_border, high_border
コード例 #26
0
def plot_ness(ness, number_of_samples):
    chi_squared = np.zeros(number_of_samples)
    chi_squared = chi_squared + chi2.ppf(0.95, df=4)
    plt.figure()
    plt.plot(chi_squared, color='b', linestyle='dashed', label='chi-squared')
    plt.plot(ness, color='r', label='NEES')
    plt.legend()
    plt.title('NEES for Q=0', fontweight='bold')
    plt.xlabel('Step')
    plt.ylabel('NEES values')
    plt.show()
コード例 #27
0
    def _find_connection(subBlocks) -> np.ndarray[bool]:
        '''calculate whether blocks are too similar to have occured by chance
        blocks are "connected" if they occur by chance < 1% of the time and
        do not overlap.'''
        testStatistic = _calculate_test_statistic(subBlocks)
        overlap = _find_overlap()
        pValThreshold = chi2.ppf(.01, (subSize**2))
        tooSimilar = testStatistic < pValThreshold
        connection = np.logical_and(tooSimilar, np.logical_not(overlap))

        return connection
コード例 #28
0
def pirson(g, intervals, h):
    n = len(g)
    k = int(round(1.72 * n**(1 / 3), 0))
    mu = expected_value(g)
    sigma = variance(g)**(1 / 2)
    all_p = norm.cdf(intervals, mu, sigma)
    p = get_interval_p(all_p)
    xhi2_list = []
    for i in range(k):
        xhi2_list.append((h[i] - n * p[i])**2 / n / p[i])
    return sum(xhi2_list), chi2.ppf(0.95, df=k - 3), all_p, p
コード例 #29
0
def test_cpp_splits():
    "test cpp splits"
    np.random.seed(0)
    data = np.random.normal(0, 3e-3, 70).astype('f4')
    data[:10] += 20
    data[10:20] += np.linspace(20, 19, 10)
    data[20:40] += 19
    data[40:45] += np.linspace(19, 18, 5)
    data[45:48] += 18
    data[48:53] += np.linspace(18, 17, 5)
    data[53:] += 17

    der = np.array([
        data[0] - np.mean(data[0:3]), data[0] - np.mean(data[1:4]),
        (data[0] * 2 + data[1]) / 3. - np.mean(data[2:5])
    ] + [
        np.mean(data[i - 3:i]) - np.mean(data[i:i + 3])
        for i in range(3, data.size - 2)
    ] + [
        np.mean(data[-5:-2]) - (data[-1] * 2 + data[-2]) / 3.,
        np.mean(data[-4:-1]) - data[-1]
    ],
                   dtype='f4')
    der = np.abs(der)
    der /= np.percentile(der, 75.) + 3e-3
    out = DerivateSplitDetector().grade(data, 3e-3)
    assert np.max(np.abs(out - der)) < 5e-2
    assert np.max(np.abs(out - der / der.max() * out.max())) < 2e-5

    gx2 = np.array(
        [np.var(data[max(0, i - 2):i + 3]) for i in range(data.size)],
        dtype='f4')
    gx2 = np.sqrt(gx2)
    gx2 /= 3e-3 * chi2.ppf(.9, 4) / 5
    out2 = ChiSquareSplitDetector(gradewindow=5).grade(data, 3e-3)
    assert_allclose(out2[2:-2], gx2[2:-2], rtol=1e-6, atol=1e-5)

    gmu = np.copy(out)
    gmu[13:18] = out2[13:18]
    gmu[42:44] = out2[42:44]
    gmu[50:52] = out2[50:52]
    cnf = MultiGradeSplitDetector(
        chisquare=ChiSquareSplitDetector(gradewindow=5), minpatchwindow=3)
    out3 = cnf.grade(data, 3e-3)
    assert_allclose(out3, gmu)

    ints = MultiGradeSplitDetector()(data, 3e-3)
    assert tuple(tuple(i)
                 for i in ints) == ((0, 12), (19, 41), (44, 49), (52, 70))

    data[1] = data[15] = data[50] = np.NaN
    ints = MultiGradeSplitDetector()(data, 3e-3)
    assert tuple(tuple(i)
                 for i in ints) == ((0, 12), (19, 41), (44, 48), (52, 70))
コード例 #30
0
def main():
    bound = ''.ljust(40, '*')
    start = ' [ STARTED ] '.center(40, '*')
    end = ' [ FINISHED ] '.center(40, '*')
    print(bound + '\n' + start + '\n' + bound)

    fileName = 'lesson7_mahal_diamonds.csv'

    if fileName == 'lesson7_mahal_dataset_0.csv':
        # Read data
        # Taken from https://jamesmccaffrey.wordpress.com/2017/11/09/example-of-calculating-the-mahalanobis-distance/
        # We assumed that the three variables are independent
        data = pd.read_csv(fileName)
        data.head()
        test = pd.DataFrame([[66, 640, 44], [69, 595, 38]],
                            columns=list(['height', 'score', 'age']))
    elif fileName == 'lesson7_mahal_diamonds.csv':
        # Read data
        # Taken from https://www.machinelearningplus.com/statistics/mahalanobis-distance/
        data = pd.read_csv(fileName).iloc[:, [0, 4, 6]]
        data.head()
        test = data[['carat', 'depth', 'price']].head(5)
    else:
        sys.exit('[ERROR]: File not found!')

    # Mahalanobis distance
    test['mahalanobis'] = mahalanobis(test, data, None)
    test.head()

    # Probability (1-p) with which we are certain that, when the
    # squared Mahalanobis distance is greater than the critical
    # value (cv) associated with this probability and the proper DOF,
    # then the test vector (tv) is an outlier
    p = 0.001
    dof = 3
    cv = chi2.ppf(1 - p, dof)

    outlier = []
    critical_value = []
    for i in range(len(test['mahalanobis'])):
        critical_value.append(cv)
        if test['mahalanobis'][i] > cv:
            outlier.append('true')
        else:
            outlier.append('false')

    test['critical-value'] = np.asarray(critical_value)
    test['p_value'] = 1 - chi2.cdf(test['mahalanobis'], dof)
    test['outlier'] = np.asarray(outlier)
    print(test)

    print(bound + '\n' + end + '\n' + bound)

    return 0
コード例 #31
0
ファイル: test.py プロジェクト: GodThyane/TS3-bocadillos-back
def testVarianza(randoms):
    n = len(randoms)
    grade_liberty = n - 1
    aceptation = 0.95
    a = 1 - aceptation

    variance = np.var(randoms, ddof=1)

    a_2 = a / 2
    a_3 = 1 - a_2

    xa_1 = chi2.ppf(a_3, df=grade_liberty)
    xa_2 = chi2.ppf(a_2, df=grade_liberty)

    li = xa_1 / (12 * grade_liberty)
    ls = xa_2 / (12 * grade_liberty)

    if ls > li:
        return (variance <= ls) & (variance >= li)
    else:
        return (variance >= ls) & (variance <= li)
コード例 #32
0
def chi2inv(p, nfft, nperseg, test=None):
    #
    #   External Function: Statistical Analysis
    #   Function to estimate the inverse of cumulative distribution function (percentile)
    #
    if test == None:
        nw2 = 2 * (2.5164 * (nfft / nperseg)) * 1.2
        return chi2.ppf(p, df=nw2) / nw2
    else:
        nw2 = (nfft / nperseg)
        return 2 * sc.gammaincinv(nw2,
                                  p) / nw2  # Inverse incomplete gamma function
コード例 #33
0
ファイル: outliers.py プロジェクト: tpikonen/solution
def filter_matfile(fname, outstem, p_reject=0.001, plot=1):
    stack = read_mat(fname)
    md5 = md5_file(fname)
    print("Rejection probability: %0.3g" % p_reject)
    N = np.sum(np.logical_not(np.isnan(stack[0,0,1,:])))
    print("Number of valid channels: %d" % N)
    threshold = chi2.ppf(1.0 - p_reject, N) / N
    print("Chisq rejection threshold: %0.3g" % threshold)

    for pos in range(stack.shape[0]):
        reps = stack[pos,...]
        incinds, cdm = filter_outliers(reps, threshold=threshold, plot=plot)
        ms = mean_stack(reps[incinds,...])
        disinds = range(reps.shape[0])
        for i in incinds:
            disinds.remove(i)
        print("Pos %d, discarded: %s" % (pos, str(disinds)))
        ad = { 'chi2cutoff' : float(threshold),
            'rejection_prob' : float(p_reject),
            'incinds' : map(int, list(incinds)),
            'disinds' : map(int, list(disinds)),
            'chi2matrix' : map(float, list(cdm)),
            'method' : "filter_outliers",
            'inputfile' : [ fname, md5 ],
            'inputposition' : int(pos),
            'q~unit' : '1/nm',
            'I~unit' : 'arb.',
            'Ierr~unit' : 'arb.',
            'I_first~unit' : 'arb.',
            'Ierr_first~unit' : 'arb.',
            'I_all~unit' : 'arb.',
            'Ierr_all~unit' : 'arb.',
            }
        outarr = np.zeros((7, ms.shape[1]))
        outarr[0:3,:] = ms
        outarr[3:5,:] = reps[0,1:3,:]
        outarr[5:7,:] = mean_stack(reps)[1:3,:]

        outname = "%s.p%02d.out.ydat" % (outstem, pos)
        print(outname)
        write_ydat(outarr, outname, addict=ad,
            cols=['q','I','Ierr','I_first','Ierr_first','I_all','Ierr_all'],
            attributes=['~unit'])
コード例 #34
0
ファイル: Main2.py プロジェクト: hanifsudira/TA
 def findConnection(self,testStatistic,overLap,subSize):
     pvalThreshold = chi2.ppf(.01,subSize**2)
     tooSimilar = testStatistic < self.pvalThreshold
     connection = np.ones_like(tooSimilar)
     connection = np.logical_xor(connection, np.logical_or(overLap, np.logical_not(tooSimilar)))
     return connection