コード例 #1
0
 def check_args(self, chi2_alpha=0.997, fig_name="CHCK_ARGS.png"):
     """Make chi2 test that argument distributions are flat"""
     print("\n\n====== CHECK THAT PHASE IS FLAT ======\n")
     for chan in range(1, self.channels / 2):
         datos = []
         for evt_fft in self.fftset:
             datos.append(np.angle(evt_fft[chan]))
         n, bins, patches = plt.hist(datos,
                                     self.events / 100,
                                     facecolor='green')
         chi2_obs = 0.
         for n_i in n:
             chi2_obs = chi2_obs + float(pow(n_i - 100, 2)) / float(n_i)
         if chi2_obs > chi2.interval(chi2_alpha, len(n))[1]:
             print("-----> CHANNEL " + str(chan) + "\t OBS. :" +
                   str(chi2_obs) + "\t EXP.: " +
                   str(chi2.interval(chi2_alpha, len(n))))
         elif chi2_obs < chi2.interval(chi2_alpha, len(n))[0]:
             print("-----> CHANNEL " + str(chan) + "\t OBS. :" +
                   str(chi2_obs) + "\t TOO GOOD TO BE TRUE")
         else:
             print("-----> CHANNEL " + str(chan) + "\t OBS. :" +
                   str(chi2_obs) + "\t FINE")
         plt.clf()
     print("\n======      END PHASE CHECK     ======\n")
コード例 #2
0
ファイル: stats.py プロジェクト: afcarl/benchmarkThis
def mvn_ellipsoid(mu, sigma, alpha):
    """
    Calculates the parameters for an ellipsoid assuming
    a multivariate normal distribution

    Parameters
    ----------
    mu : np.array
        Mean vector
    sigma : np.array
        Covariance matrix
    alpha : float
        signficance value

    Returns
    -------
    eigvals : np.array
        Eigenvalues of covariance matrix decomposition
    eigvecs : np.array
        Eigenvectors of covariance matrix decomposition
    half_widths : np.array
        Length of ellipsoid along each eigenvector
    """
    D = len(mu)
    eigvals, eigvecs = eigsh(sigma)
    X2 = chi2.interval(alpha, df=D)
    half_widths = np.sqrt(eigvals * X2)
    return eigvals, eigvecs, half_widths
コード例 #3
0
    def check_args_corr(self, chan_min   = 0 , chan_stop=256, chan_max = 256,
                              chan_depth = 10, chan2_shift = 0,
                              chi2_alpha = 0.997, fig_name = "CHCK_DIFF.png"):
        """Make chi2 test that chan1 chan2 argument difference distributions are flat"""
        print("\n\n====== CHECK THAT DIFF. IS FLAT ======\n")

        rdb = shelve.open("temp_store.shelve")
        tmp_arr_2d = rdb["phase_diff_array"]

        if len(tmp_arr_2d)<chan_max:
            arr_2d = np.zeros( (chan_max,chan_max) )
            for ch1 in range(0,chan_max):
                for ch2 in range(0,chan_max):
                    arr_2d[ch1][ch2] = 0.5
            for ch1 in range(len(tmp_arr_2d)):
                for ch2 in range(0,len(tmp_arr_2d)):
                    arr_2d[ch1][ch2] = tmp_arr_2d[ch1][ch2]
        else:
            arr_2d = tmp_arr_2d

        for chan1 in range(chan_min,chan_stop):
            print("-----> CHANNEL " + str(chan1) + " / " + str(chan_stop) )
            for chan2 in range(chan1+chan2_shift,chan1+chan2_shift+chan_depth):
                if chan2 < chan_max and chan1<chan_max:
                    if arr_2d[chan1][chan2]>0. or arr_2d[chan1][chan2]<1.:
                        datos = []
                        for evt_fft in self.fftset:
                            if np.angle( evt_fft[chan1] ) - np.angle( evt_fft[chan2] ) > 0 :
                                datos.append( np.angle( evt_fft[chan1] ) - np.angle( evt_fft[chan2] ) )
                            else :
                                datos.append( np.angle( evt_fft[chan1] ) - np.angle( evt_fft[chan2] ) + pi*2. )
                        n, bins, patches = plt.hist(datos, self.events/100, facecolor='green')
                        plt.title(r'Difference of phases between %d and %d' %(chan1,chan2))
                        plt.savefig("TEMP.png")
                        if chan1 == chan2 :
                            arr_2d[chan1][chan2]=0.
                        else:
                            chi2_obs = 0.
                            for n_i in n:
                                chi2_obs = chi2_obs + float( pow( n_i-100 ,2) )/float(n_i)
                            if chi2_obs > chi2.interval(chi2_alpha,len(n))[1]:
                                arr_2d[chan1][chan2]=1.
                            else:
                                arr_2d[chan1][chan2]=0.
                        plt.clf()

        for ch1 in range(0,chan_max):
            for ch2 in range(ch1+1,chan_max):
                arr_2d[ch2][ch1] = arr_2d[ch1][ch2]

        plt.imshow(arr_2d,cmap="gray", interpolation='none')
        plt.tight_layout()
        plt.xlabel("channel")
        plt.ylabel("channel")
        plt.title(r'Non-flat phases difference between channels')
        plt.savefig(fig_name)
        plt.clf()
        print("\n======      END DIFF. CHECK     ======\n")
        rdb.close()
        return arr_2d
コード例 #4
0
 def get_nhlimits(self, cl=0.68):
     '''
     Returns the limits of the distribution of expected
         results under the null hypothesis.
     '''
     lim = np.array(chi2.interval(cl, self.popt[2]))
     return lim * self.popt[3] + self.popt[1]
コード例 #5
0
 def decide_chi_squared_null_hypothesis(self, alpha):
     endpoints = chi2.interval(alpha, 2)
     print(
         f"endpoints of the 95% chi_squared_interval: {endpoints}, D = {self.D}"
     )
     if self.D < endpoints[0] or self.D > endpoints[1]:
         self.reject_likelihood_ratio_hypothesis = True
     else:
         self.reject_likelihood_ratio_hypothesis = False
コード例 #6
0
 def get_conf_int(self, variance, conf_perc=.95):
     '''
      Return tuple (lower, upper) of confidence interval for variance.
      Assumption: random variables are independent
      (which is, of course, not true in the learning setting)
      '''
     return chi2.interval(alpha=conf_perc,
                          df=self.num_seeds - 1,
                          scale=variance / self.num_seeds)
コード例 #7
0
ファイル: functions.py プロジェクト: bekaiser/TensorOcean
def line_plot_with_errorbars(A, z, fig_title, fig_xlabel, fig_plotname,
                             fig_axis, log_flag, nu):
    # plots the entire profile for bin mean inspection

    # 95% confidence intervals for chi-square distributed random error
    conf = 0.05  # 95% confidence interval
    #nu = 2*Nb-1 # number degrees of freedom
    [hi, lo
     ] = chi2.interval(1 - conf,
                       nu)  # nu/Chi^2_(nu/alpha/2) & nu/Chi^2_(nu/(1-alpha/2))
    lo = nu / lo
    hi = nu / hi
    ub = A * hi  # upper bound
    lb = A * lo  # lower bound
    #print(ub[20],A[20],lb[20])

    colors = ['blue']
    color_mean = 'b'
    color_shading = 'b'
    #legend_name = [r""]

    fig = plt.figure(figsize=(5, 8))

    if log_flag == 'semilogx':
        plt.fill_betweenx(z, ub, lb, color=color_shading, alpha=0.5)
        p1 = plt.semilogx(A, z, color=color_mean, label='bin mean')
    else:
        plt.fill_betweenx(z, ub, lb, color=color_shading, alpha=0.5)
        p1 = plt.plot(A, z, color=color_mean, label='bin mean')
    plt.ylabel(r"$z$ (m)", family='serif', fontsize='13')
    #plt.legend(loc=4);
    #bg = np.array([1,1,1])  # background of the legend is white
    #colors = ['green'] #,'blue'] #,'green','green']
    # with alpha = .5, the faded color is the average of the background and color
    #colors_faded = [(np.array(cc.to_rgb(color)) + bg) / 2.0 for color in colors]
    #plt.legend([4], legend_name,handler_map={0: LegendObject(colors[0], colors_faded[0])},loc=1)
    plt.xlabel(fig_xlabel, family='serif', fontsize='13')
    plt.title(fig_title)
    plt.axis(fig_axis)
    plt.grid()

    plt.savefig(fig_plotname, format="png")
    plt.close(fig)

    return
コード例 #8
0
def test_spc_initialization(seed, shape):
    # Test whether all activations are random normal distributed
    np.random.seed(seed)
    torch.manual_seed(seed)
    net = DummyNetwork(shape)
    paras = net.parameters()
    training.spc_initialize(net)
    result = paras[0].matmul(torch.randn(shape[1:]+(10,))) + paras[1].view((-1,1))
    result = result*np.sqrt(.34) # hack that reduces variance by the same amount as relu
    test_vec = np.array(result**2).sum(axis=1)
    from scipy.stats import chi2
    # need to test for significant deviation
    a,b, = chi2.interval(1-1e-4/test_vec.shape[0]/8, 10)
    print("bounds", a,b)
    print("values", test_vec)
    print("WARNING: Statistical test")
    print("This test might fail on accident in .01% of the cases")
    assert np.all(a<test_vec)
    assert np.all(test_vec<b)
コード例 #9
0
def test_spc_gradient(seed, shape):
    # Test whether the gradient behaves as predicted 
    np.random.seed(seed)
    torch.manual_seed(seed)
    net = DummyNetwork(shape)
    training.spc_initialize(net)
    paras = net.parameters()
    result = relu(torch.randn((10,) + shape[:1])).matmul(paras[0])
    result = result*np.sqrt(.34) # hack that reduces variance by the same amount as relu
    std = training.infer_gradient_magnitude(shape)
    print(std)
    test_vec = np.array(result**2).sum(axis=0)/std
    from scipy.stats import chi2
    # need to test for significant deviation
    a,b, = chi2.interval(1-1e-4/test_vec.shape[0]/8, 10)
    print("bounds", a,b)
    print("values", test_vec)
    print("WARNING: Statistical test")
    print("This test might fail on accident in .01% of the cases")
コード例 #10
0
ファイル: tests.py プロジェクト: JAcciarri/Simulation
def goodness_fit_test(array):
    alpha_critic    = 0.05                              # Signification level for Critic Region validation
    n_total         = len(array)                        # All the numbers
    e_expected_frec = 10                                # Expected numbers in each class (Greater than 5)
    d_total_classes = round(n_total / e_expected_frec)  # Ammount of classes
    c_classes       = np.zeros(d_total_classes)         # All classes (counters of occurrences, stated at zero)
    
    # Calculating every class occurrences
    # It would be approx to e_expected_frec in every class in order to approve the Null Hypothesis through this test
    for number in array:
        i_class = int((number * d_total_classes) // 1)  # Class number = integer part of (n*d)
        c_classes[i_class] += 1
    
    # Chi squared simplified calculation (Pearson)
    pearson = (d_total_classes / n_total) * sum(i*i for i in c_classes) - n_total

    # Critic region interval calculation (1-a, degrees_of_freedom-1)
    critic_min, critic_max = chi2.interval(1 - alpha_critic, d_total_classes - 1)

    # Critic region: printable presentation
    region_str = "{χ2 ≤ " + str(round(critic_min, 6)) + "} ∪ {χ2 ≥ " + str(round(critic_max, 6)) + "}"
    
    # Null Hypothesis approbation or rejection
    if (pearson <= critic_min or pearson >= critic_max):
        result_msg = "Null hypothesis REJECTION, this list doesn't correspond to an uniform U(0,1) distribution\n"
        result_msg += "This is because " + str(round(pearson, 6)) + " is on the region " + region_str
        result = "Rejected"

    else:
        result_msg = "Null hypothesis ACCEPTATION, this list indeed correspond to an uniform U(0,1) distribution\n"
        result_msg += "This is because " + str(round(pearson, 6)) + " is NOT on the region " + region_str
        result = "Approved"
    
    print('------------UNIFORM TEST-----------')
    print(result_msg)
    print()
    return result
コード例 #11
0
ファイル: histogram.py プロジェクト: valerioda/ABALONE_GEANT4
def histogram(x, y, log=False, step=0.2, nbins=None, assym_errors=False, bins=None, returnNbins=False):
    '''
    Compute histogram with mean and stdev per bin.
    '''
    x_min = min(x)
    x_max = max(x)
    if log and x_min <= 0:
        print "min(x) <= 0, setting to 1"
        x_min = 1
    if nbins != None:
        if log:
            step = np.log((x_max - x_min)) / nbins
        else:
            step = (x_max - x_min) / nbins
    if log:
        bins = lseq(x_min, x_max, step)
    else:
        bins = np.arange(x_min, x_max, step)
    n, _ = np.ma.array(np.histogram(x, bins=bins))
    sy, _ = np.ma.array(np.histogram(x, bins=bins, weights=y))
    sy2, _ = np.ma.array(np.histogram(x, bins=bins, weights=y * y))
    mean = sy / n
    std = np.sqrt(sy2 / n - mean * mean)
    if assym_errors:
        # Assymmetric errors: correct the lower bound.
        std_up = std
        std_low = std_up.copy()
        # Error should not go negative.
        std_low[std_up >= mean] = mean[std_up >= mean] * (1.0 - 1e-5)
        # Compute confidence interval assuming the number of fired cells is poisson distributed.
        conf = np.asarray(chi2.interval(.68, 2. * mean)) / 2.0 - mean
        conf[0] *= -1.0
        # return bins, mean, [std_low, std_up]
        return bins, mean, conf
    if returnNbins:
        return bins, mean, std, n
    return bins, mean, std
コード例 #12
0
for k, z_k in tqdm(enumerate(z[:N])):

    eta_hat[k], P_hat[k], NIS[k], a[k] = slam.update(eta_pred[k], P_pred[k],
                                                     z_k)

    if k < K - 1:
        eta_pred[k + 1], P_pred[k + 1] = slam.predict(eta_hat[k], P_hat[k],
                                                      odometry[k])

    assert (eta_hat[k].shape[0] == P_hat[k].shape[0]
            ), "dimensions of mean and covariance do not match"

    num_asso = np.count_nonzero(a[k] > -1)
    Assos[k] = num_asso

    CI[k] = chi2.interval(1 - alpha, 2 * num_asso)

    if num_asso > 0:
        NISnorm[k] = NIS[k] / (2 * num_asso)
        CInorm[k] = CI[k] / (2 * num_asso)

        total_num_asso += num_asso
        NISes[k] = NIS[k]
    else:
        NISnorm[k] = 1
        CInorm[k].fill(1)

    NEESes[k] = slam.NEESes(
        eta_hat[k][:3], P_hat[k][:3, :3],
        poseGT[k])  # TODO, use provided function slam.NEESes
コード例 #13
0
for k, z_k in tqdm(enumerate(z[:N])):

    eta_hat[k], P_hat[k], NIS[k], a[k] = slam.update(eta_pred[k], P_pred[k],
                                                     z_k)  # Update

    if k < K - 1:
        eta_pred[k + 1], P_pred[k + 1] = slam.predict(eta_hat[k], P_hat[k],
                                                      odometry[k])  # Predict

    assert (eta_hat[k].shape[0] == P_hat[k].shape[0]
            ), "dimensions of mean and covariance do not match"

    num_asso = np.count_nonzero(a[k] > -1)
    total_num_asso += num_asso

    CI[k] = chi2.interval(alpha, 2 * num_asso)

    if num_asso > 0:
        NISnorm[k] = NIS[k] / (2 * num_asso)
        CInorm[k] = CI[k] / (2 * num_asso)
    else:
        NISnorm[k] = 1
        CInorm[k].fill(1)

    NEESes[k] = EKFSLAM.NEESes(eta_hat[k][:3], P_hat[k][:3, :3], poseGT[k])

    if doAssoPlot and k > 0:
        axAsso.clear()
        axAsso.grid()
        zpred = slam.h(eta_pred[k]).reshape(-1, 2)
        axAsso.scatter(z_k[:, 0], z_k[:, 1], label="z")
コード例 #14
0
resid_max = pd.Series.rolling(arma_res.resid, window=250).mean().max()
resid_min = pd.Series.rolling(arma_res.resid, window=250).mean().min()
print("平均:              %2.5f" % m, "標準偏差:          %2.4f" % v)
print("250日平均の最大値: %2.5f" % resid_max, "250日平均の最小値: %2.5f" % resid_min)
print("250日平均の95%の信頼区間: ", (t.interval(alpha=0.95, df=250, loc=0, scale=v)))

from scipy.stats import chi2
resid = arma_res.resid.iloc[1:]
m = resid.mean()
v = resid.std()
resid_max = pd.Series.rolling(arma_res.resid, window=250).std().max()
resid_min = pd.Series.rolling(arma_res.resid, window=250).std().min()
print("平均:                 %2.5f" % m, "        標準偏差:    %2.5f" % v)
print("250日標準偏差の最大値:%2.5f" % resid_max, "250日標準偏差の最小値:%2.5f" % resid_min)

cint1, cint2 = chi2.interval(alpha=(0.95), df=249)

bcs = [
    "1949/5/16", "1954/12/1", "1972/1/1", "1986/12/1", "1986/12/1",
    "1993/11/1", "1999/2/1", "2002/2/1", "2009/4/1"
]
bce = [
    "1954/11/30", "1971/12/31", "1986/11/30", "1989/12/31", "1993/10/30",
    "1999/1/31", "2002/1/31", "2009/3/31", "2012/11/30"
]

for i in range(len(bcs)):
    y = lnn225.loc[bcs[i]:bce[i]].dropna()
    fig = plt.figure(figsize=(8, 2))
    ax1 = fig.add_subplot(1, 2, 1)
    fig = sm.graphics.tsa.plot_acf(y.squeeze(),
コード例 #15
0
ファイル: run_simulated_SLAM.py プロジェクト: Olarm/ttk4250
for k, z_k in tqdm(enumerate(z[:N])):

    eta_hat[k], P_hat[k], NIS[k], a[k] = slam.update(eta_pred[k], P_pred[k],
                                                     z[k])  # TODO update

    if k < K - 1:
        eta_pred[k + 1], P_pred[k + 1] = slam.predict(
            eta_hat[k], P_hat[k].copy(), odometry[k])  # TODO predict

    assert (eta_hat[k].shape[0] == P_hat[k].shape[0]
            ), "dimensions of mean and covariance do not match"

    num_asso = np.count_nonzero(a[k] > -1)

    CI[k] = chi2.interval(1 - alpha, 2 * num_asso)

    if num_asso > 0:
        NISnorm[k] = NIS[k] / (2 * num_asso)
        CInorm[k] = CI[k] / (2 * num_asso)
    else:
        NISnorm[k] = 1
        CInorm[k].fill(1)

    NEESes[k] = slam.NEESes(
        eta_hat[k][:3], P_hat[k][:3, :3],
        poseGT[k])  # TODO, use provided function slam.NEESes

    if doAssoPlot and k > 0:
        axAsso.clear()
        axAsso.grid()
コード例 #16
0
        eta, P = slam.predict(eta,P.copy(),odo) #TODO
        
        z = detectTrees(LASER[mk])
        eta, P, NIS[mk], a[mk] = slam.update(eta,P.copy(),z) #TODO
        

        #if k%50==0:
            #print("time spent update:", end2-start2)

        num_asso = np.count_nonzero(a[mk] > -1)

        

        if num_asso > 0:
            NISnorm[mk] = NIS[mk] / (2 * num_asso)
            CInorm[mk] = np.array(chi2.interval(confidence_prob, 2 * num_asso)) / (
                2 * num_asso
            )
            CI[mk] = chi2.interval(1-alpha, 2 * num_asso)

            total_num_asso += num_asso
            NISes[mk] = NIS[mk]
        else:
            NISnorm[mk] = 1
            CInorm[mk].fill(1)
            CI[mk].fill(1)

        xupd[mk] = eta[:3]

        if doPlot:
            sh_lmk.set_offsets(eta[3:].reshape(-1, 2))
コード例 #17
0
            raise ValueError("negative time increment")

        t = timeLsr[
            mk]  # ? reset time to this laser time for next post predict
        odo = odometry(speed[k + 1], steering[k + 1], dt, car)
        eta, P = slam.predict(eta, P, odo)

        z = detectTrees(LASER[mk])
        eta, P, NIS[mk], a[mk] = slam.update(eta, P, z)  # TODO update

        num_asso = np.count_nonzero(a[mk] > -1)
        total_num_asso += num_asso

        if num_asso > 0:
            NISnorm[mk] = NIS[mk] / (2 * num_asso)
            CInorm[mk] = np.array(chi2.interval(confidence_prob,
                                                2 * num_asso)) / (2 * num_asso)
        else:
            NISnorm[mk] = 1
            CInorm[mk].fill(1)

        xupd[mk] = eta[:3]

        if doPlot:
            sh_lmk.set_offsets(eta[3:].reshape(-1, 2))
            if len(z) > 0:
                zinmap = (rotmat2d(eta[2]) @ (z[:, 0] * np.array(
                    [np.cos(z[:, 1]), np.sin(z[:, 1])]) +
                                              slam.sensor_offset[:, None]) +
                          eta[0:2, None])
                sh_Z.set_offsets(zinmap.T)
            lh_pose.set_data(*xupd[mk_first:mk, :2].T)
コード例 #18
0
print("starting sim (" + str(N) + " iterations)")
for k, z_k in tqdm(enumerate(z[:N])):
    if slam.prnt:
        print(f"\n===ITERATION {k}, START===\n \n  z_{k}.shape = {z_k.shape}  <-LANDMARKS = {z_k.shape[0]}")
        print(f"  eta_pred[{k}].shape = {eta_pred[k].shape}\n  P_pred[{k}].shape = {P_pred[k].shape}")

    eta_hat[k], P_hat[k], NIS[k], a[k] = slam.update(eta_pred[k], P_pred[k], z_k)

    if k < K - 1:
        eta_pred[k + 1], P_pred[k + 1] = slam.predict(eta_hat[k], P_hat[k].copy(), odometry[k,:])

    assert (eta_hat[k].shape[0] == P_hat[k].shape[0]
    ), "dimensions of mean and covariance do not match"

    num_asso    = np.count_nonzero(a[k] > -1)
    CI[k]       = chi2.interval(1-alpha, 2 * num_asso)
    total_asso += num_asso

    if num_asso > 0:
        NISnorm[k] = NIS[k] / (2 * num_asso)
        CInorm[k]  = CI[k]  / (2 * num_asso)
    else:
        NISnorm[k] = 1
        CInorm[k].fill(1)

    if k > 0:
        NEESes[k] = slam.NEESes(eta_hat[k][:3], P_hat[k][:3,:3], poseGT[k])

    if doAsso and doAssoPlot and k > 0:
        axAsso.clear()
        axAsso.grid()
コード例 #19
0
        dt = timeLsr[mk] - t
        if dt < 0:  # avoid assertions as they can be optimized avay?
            raise ValueError("negative time increment")

        t = timeLsr[
            mk]  # ? reset time to this laser time for next post predict
        odo = odometry(speed[k + 1], steering[k + 1], dt, car)
        eta, P = slam.predict(eta, P, odo)  # TODO predict

        z = detectTrees(LASER[mk])
        eta, P, NIS[mk], NISrange[mk], NISbearing[mk], a[mk] = slam.update(
            eta, P, z)  # TODO update

        num_asso = np.count_nonzero(a[mk] > -1)

        CI[mk] = chi2.interval(1 - alpha, 2 * num_asso)
        CI_1dim[mk] = chi2.interval(1 - alpha, num_asso)

        if num_asso > 0:
            NISnorm[mk] = NIS[mk] / (2 * num_asso)
            NISrange_norm[mk] = NISrange[mk] / num_asso
            NISbearing_norm[mk] = NISbearing[mk] / num_asso
            CInorm[mk] = CI[mk] / (2 * num_asso)
            CI_1dim_norm[mk] = CI_1dim[mk] / num_asso
        else:
            NISnorm[mk] = 1
            CInorm[mk].fill(1)
            NISrange_norm[mk] = 1
            NISbearing_norm[mk] = 1
            CI_1dim_norm[mk].fill(1)
コード例 #20
0
ファイル: run_real_SLAM.py プロジェクト: Olarm/ttk4250
        if dt < 0:  # avoid assertions as they can be optimized avay?
            raise ValueError("negative time increment")

        t = timeLsr[
            mk]  # ? reset time to this laser time for next post predict
        odo = odometry(speed[k + 1], steering[k + 1], dt, car)
        eta, P = slam.predict(eta, P, odo)  # TODO predict

        z = detectTrees(LASER[mk])
        eta, P, NIS[mk], a[mk] = slam.update(eta, P, z)  # TODO update

        num_asso = np.count_nonzero(a[mk] > -1)

        if num_asso > 0:
            NISnorm[mk] = NIS[mk] / (2 * num_asso)
            CInorm[mk] = np.array(chi2.interval(confidence_prob,
                                                2 * num_asso)) / (2 * num_asso)
        else:
            NISnorm[mk] = 1
            CInorm[mk].fill(1)

        xupd[mk] = eta[:3]

        if doPlot:
            sh_lmk.set_offsets(eta[3:].reshape(-1, 2))
            if len(z) > 0:
                zinmap = (rotmat2d(eta[2]) @ (z[:, 0] * np.array(
                    [np.cos(z[:, 1]), np.sin(z[:, 1])]) +
                                              slam.sensor_offset[:, None]) +
                          eta[0:2, None])
                sh_Z.set_offsets(zinmap.T)
            lh_pose.set_data(*xupd[mk_first:mk, :2].T)
コード例 #21
0
print("starting sim (" + str(N) + " iterations)")

for k, z_k in tqdm(enumerate(z[:N])):

    eta_hat[k], P_hat[k], NIS[k], a[k] = # TODO update

    if k < K - 1:
        eta_pred[k + 1], P_pred[k + 1] = # TODO predict

    assert (
        eta_hat[k].shape[0] == P_hat[k].shape[0]
    ), "dimensions of mean and covariance do not match"

    num_asso = np.count_nonzero(a[k] > -1)

    CI[k] = chi2.interval(alpha, 2 * num_asso)

    if num_asso > 0:
        NISnorm[k] = NIS[k] / (2 * num_asso)
        CInorm[k] = CI[k] / (2 * num_asso)
    else:
        NISnorm[k] = 1
        CInorm[k].fill(1)

    NEESes[k] = # TODO, use provided function slam.NEESes

    if doAssoPlot and k > 0:
        axAsso.clear()
        axAsso.grid()
        zpred = slam.h(eta_pred[k]).reshape(-1, 2)
        axAsso.scatter(z_k[:, 0], z_k[:, 1], label="z")
コード例 #22
0
ファイル: Count_Outlier.py プロジェクト: JihunKwon/PancOCM_ML
def count_outlier(chi_idx, Chi_area, fidx, t_sub_removed, t_sub, s_new,
                  out0_test, out1_test, out2_test, A0, A1, A2):
    _, chi2_interval_max = chi2.interval(alpha=1 - Chi_area, df=1)

    ### Count the number of traces above threshold ###
    if fidx % 2 is 0:
        bh_start = 0
        bh_end = 5
        # OCM0
        for bh in range(bh_start, bh_end):
            for p in range(0, t_sub_removed):
                flag0 = 0
                for depth in range(0, s_new):
                    # if not detected yet
                    if flag0 < 1:  # OCM0
                        # check every depth and count if it's larger than the threshold
                        if A0[depth,
                              bh * t_sub_removed + p] > chi2_interval_max:
                            out0_test[chi_idx, int(fidx / 2),
                                      bh] = out0_test[chi_idx,
                                                      int(fidx / 2), bh] + 1
                            flag0 = 1

        ##OCM1 and OCM2
        for bh in range(bh_start, bh_end):
            for p in range(0, t_sub):
                flag1 = 0
                flag2 = 0
                for depth in range(0, s_new):
                    # if not detected yet
                    if flag1 < 1:  # OCM1
                        if A1[depth, bh * t_sub + p] > chi2_interval_max:
                            out1_test[chi_idx, int(fidx / 2),
                                      bh] = out1_test[chi_idx,
                                                      int(fidx / 2), bh] + 1
                            flag1 = 1
                    if flag2 < 1:  # OCM2
                        if A2[depth, bh * t_sub + p] > chi2_interval_max:
                            out2_test[chi_idx, int(fidx / 2),
                                      bh] = out2_test[chi_idx,
                                                      int(fidx / 2), bh] + 1
                            flag2 = 1
    else:  # State 2
        bh_start = 5
        bh_end = 10
        # OCM0
        for bh in range(bh_start, bh_end):
            for p in range(0, t_sub_removed):
                flag0 = 0
                for depth in range(0, s_new):
                    # if not detected yet
                    if flag0 < 1:  # OCM0
                        # check every depth and count if it's larger than the threshold
                        if A0[depth, (bh - 5) * t_sub_removed +
                              p] > chi2_interval_max:
                            out0_test[chi_idx, int(fidx / 2),
                                      bh] = out0_test[chi_idx,
                                                      int(fidx / 2), bh] + 1
                            flag0 = 1

        ##OCM1 and OCM2
        for bh in range(bh_start, bh_end):
            for p in range(0, t_sub):
                flag1 = 0
                flag2 = 0
                for depth in range(0, s_new):
                    # if not detected yet
                    if flag1 < 1:  # OCM1
                        if A1[depth, (bh - 5) * t_sub + p] > chi2_interval_max:
                            out1_test[chi_idx, int(fidx / 2),
                                      bh] = out1_test[chi_idx,
                                                      int(fidx / 2), bh] + 1
                            flag1 = 1
                    if flag2 < 1:  # OCM2
                        if A2[depth, (bh - 5) * t_sub + p] > chi2_interval_max:
                            out2_test[chi_idx, int(fidx / 2),
                                      bh] = out2_test[chi_idx,
                                                      int(fidx / 2), bh] + 1
                            flag2 = 1

    return out0_test, out1_test, out2_test
コード例 #23
0
    "C:\\Users\\Kwon\\Documents\\Panc_OCM\\Subject_01_20180928\\run2.npy"
)  #After water
sr_list = ['s1r1', 's1r1']
rep_list = [500, 500]

num_train = 3
num_test = 10 - num_train
num_ocm = 3
num_bh = 5  # number of bh in each state

# plot color setting
colors = ['#1f77b4', '#ff7f0e']

# Get max of 90% confidence interval
_, chi2_interval_max = chi2.interval(
    alpha=0.9,
    df=2)  # Make sure the df is correct. Eventually we need to use 3.
print('90% confidence level: ' + chi2_interval_max)

# Start loop for fidx
# fidx = 0
for fidx in range(0, np.size(rep_list), 2):
    ## Start for pre data (state 1) ##
    Sub_run_name = sr_list[fidx]
    in_filename = out_list[fidx]
    ocm = np.load(in_filename)

    #crop data
    ocm = ocm[300:650, :]
    s, t = np.shape(ocm)
コード例 #24
0
ファイル: run_real_SLAM.py プロジェクト: aHaugl/sensorfusjon
        P = (P + P.T) / 2
        dt = timeLsr[mk] - t
        if dt < 0:  # avoid assertions as they can be optimized avay?
            raise ValueError("negative time increment")

        t = timeLsr[mk]  # ? reset time to this laser time for next post predict
        odo = odometry(speed[k + 1], steering[k + 1], dt, car)
        eta, P = slam.predict(eta, P, z_odo=odo)  # TODO predict

        z = detectTrees(LASER[mk])
        eta, P, NIS[mk], a[mk] = slam.update(eta, P, z)  # TODO update

        num_asso = np.count_nonzero(a[mk] > -1)

        if num_asso > 0:
            CI[mk] = chi2.interval(confidence_prob, 2 * num_asso)
            NISnorm[mk] = NIS[mk] / (2 * num_asso)
            CInorm[mk] = CI[mk] / (2 * num_asso)
            # Only associated measurements
            NISnorm_asso_only[mk] = NIS[mk] / (2 * num_asso)
            CInorm_asso_only[mk] = CI[mk] / (2 * num_asso)
            total_num_asso += 2 * num_asso

        else:
            NISnorm[mk] = 1
            CInorm[mk].fill(1)

        xupd[mk] = eta[:3]

        if doPlot:
            sh_lmk.set_offsets(eta[3:].reshape(-1, 2))
コード例 #25
0
#s_new = 297  # 2.3cm to 4.5cm
#s_new = 231  # 2.3cm to 4.0cm
s_new = 269  # 2.2cm to 4.2cm
threshold = 0.01  # Threshold for Chi^2
interval = 1  # Interval for averaging
plot_interval = 1000
#interval_list = [2, 4, 5, 10]
#interval_list = [5, 10, 50]
length_list = [5, 10, 20, 50]

#### Set the threshold based on Chi^2 ####
Chi_list = [
    0.000000001, 0.0000000001, 0.000000001, 0.000000000001, 0.0000000000001,
    0.00000000000001, 0.000000000000001, 0.0000000000000001
]
_, chi2_interval_max_0000001 = chi2.interval(alpha=1 - 0.000001, df=1)
_, chi2_interval_max_00000001 = chi2.interval(alpha=1 - 0.0000001, df=1)
_, chi2_interval_max_000000001 = chi2.interval(alpha=1 - 0.00000001, df=1)
_, chi2_interval_max_0000000001 = chi2.interval(alpha=1 - 0.000000001, df=1)
print('chi2_interval_max_00000001: ' + str(chi2_interval_max_00000001))
print('chi2_interval_max_000000001: ' + str(chi2_interval_max_000000001))
print('chi2_interval_max_0000000001: ' + str(chi2_interval_max_0000000001))

out0_test = np.zeros([len(Chi_list), int(len(rep_list) / 2),
                      10])  # output test result
out1_test = np.zeros([len(Chi_list), int(len(rep_list) / 2), 10])
out2_test = np.zeros([len(Chi_list), int(len(rep_list) / 2), 10])
out_mean = np.zeros([len(Chi_list), int(len(rep_list) / 2),
                     10])  # Mean of all OCM
'''
for interval_idx in range(0, np.size(rep_list)):
コード例 #26
0
        if dt < 0:  # avoid assertions as they can be optimized avay?
            raise ValueError("negative time increment")

        t = timeLsr[
            mk]  # ? reset time to this laser time for next post predict
        odo = odometry(speed[k + 1], steering[k + 1], dt, car)
        eta, P = slam.predict(eta, P, odo)  # TODO predict

        z = detectTrees(LASER[mk])
        eta, P, NIS[mk], a[mk] = slam.update(eta, P, z)  # TODO update

        num_asso = np.count_nonzero(a[mk] > -1)

        if num_asso > 0:
            NISnorm[mk] = NIS[mk] / (2 * num_asso)
            CInorm[mk] = np.array(chi2.interval(confidence_prob,
                                                2 * num_asso)) / (2 * num_asso)
        else:
            NISnorm[mk] = 1
            CInorm[mk].fill(1)

        xupd[mk] = eta[:3]

        if doPlot:
            sh_lmk.set_offsets(eta[3:].reshape(-1, 2))
            if len(z) > 0:
                zinmap = (rotmat2d(eta[2]) @ (z[:, 0] * np.array(
                    [np.cos(z[:, 1]), np.sin(z[:, 1])]) +
                                              slam.sensor_offset[:, None]) +
                          eta[0:2, None])
                sh_Z.set_offsets(zinmap.T)
            lh_pose.set_data(*xupd[mk_first:mk, :2].T)
コード例 #27
0
            raise ValueError("negative time increment")

        t = timeLsr[mk]  # ? reset time to this laser time for next post predict
        odo = odometry(speed[k + 1], steering[k + 1], dt, car)
        eta, P = slam.predict(eta,P,odo)# TODO predict
        
        z = detectTrees(LASER[mk])
        
        eta, P, NIS[mk], a[mk] = slam.update(eta,P,z)# TODO update

        num_asso = np.count_nonzero(a[mk] > -1)
        total_asso += num_asso

        if num_asso > 0:
            NISnorm[mk] = NIS[mk] / (2 * num_asso)
            CInorm[mk] = np.array(chi2.interval(confidence_prob, 2 * num_asso)) / (
                2 * num_asso
            )
        else:
            NISnorm[mk] = 1
            CInorm[mk].fill(1)

        xupd[mk] = eta[:3]

        if doPlot:
            sh_lmk.set_offsets(eta[3:].reshape(-1, 2))
            if len(z) > 0:
                zinmap = (
                    rotmat2d(eta[2])
                    @ (
                        z[:, 0] * np.array([np.cos(z[:, 1]), np.sin(z[:, 1])])
コード例 #28
0
ファイル: pan8.py プロジェクト: jettom/SoftArsenal
plt.ylabel('$\hat{z_t}$')


# In[8]:


from scipy.stats import chi2
resid=arma_res.resid.iloc[1:]
m=resid.mean()
v=resid.std()
resid_max=pd.Series.rolling(arma_res.resid,window=250).std().max()
resid_min=pd.Series.rolling(arma_res.resid,window=250).std().min()
print("平均:                 %2.5f"%m,"        標準偏差:    %2.5f"%v)
print("250日標準偏差の最大値:%2.5f"%resid_max,"250日標準偏差の最小値:%2.5f"%resid_min)

cint1,cint2=chi2.interval(alpha=(0.95), df=249)

print("250日標準偏差の95pctの信頼区間:%2.4f"%(np.sqrt(cint1/249)*v),)
print("<= \sigma <=%2.4f"%(np.sqrt(cint2/249)*v))


# In[9]:


pd.Series.rolling(arma_res.resid.iloc[1:],250).std().plot(figsize=(6,4),color='darkgray')
plt.ylabel('$std$')


# In[10]:

コード例 #29
0
 def chi2(self, alpha=0.95, scale=1.0):
     dof = self.degrees_of_freedom
     lower, upper = np.asarray(chi2.interval(alpha=alpha, df=dof))
     chisq = self.s0**2 / scale**2 * dof
     return chisq, lower, upper
コード例 #30
0
ファイル: KLebeprozessBeispiel.py プロジェクト: LukasStu/DFSS
""" Bibliotheken importieren"""
from scipy.stats import t  # Normalverteitung
from scipy.stats import chi2  # Normalverteitung
import numpy as np
"""Werte aus Aufgabe übernehmen"""
gamma = 0.95
N = 10
m = [4.3, 4.5, 4.2, 4.3, 4.3, 4.7, 4.4, 4.2, 4.3, 4.5]
mquer = np.mean(m)
s = np.std(m, ddof=1)
"""Bstimmung der Kontanten C1 und c2 für den Mittelwert"""
c1 = t.ppf((1 - gamma) / 2, N - 1)
c2 = t.ppf((1 + gamma) / 2, N - 1)
mu_min = mquer - ((c2 * s) / np.sqrt(N))
mu_max = mquer - ((c1 * s) / np.sqrt(N))

print(' ')
print('Untere Grenze für Mittelwert \u03bc: ', mu_min)
print('Obere Grenze für Mittelwert \u03bc:', mu_max)
""" Bestimung der Konstanten für die Varianz """
c1 = chi2.ppf((1 - gamma) / 2, N - 1)
c2 = chi2.ppf((1 + gamma) / 2, N - 1)
sig_min = np.sqrt((N - 1) / c2) * s
sig_max = np.sqrt((N - 1) / c1) * s

print(' ')
print('Untere Grenze für Stabdardabweichung: ', sig_min)
print('Obere Grenze für Standardabweichung: ', sig_max)

ci = chi2.interval(gamma, N - 1, loc=0, scale=1)
コード例 #31
0
 def muexcl_from_Chi2(self, Nobs):
     Nobs = np.asarray(Nobs, dtype=int)
     alpha = 1 - self._CL / 100.
     return chi2.interval(
         1 - 2 * alpha, df=2 * Nobs +
         np.ones_like(Nobs))[1] / 2.  # we get 2muexcl from df = 2 N + 1