Esempio n. 1
0
 def motion_model(self, samples, point, su):
     for i in range(self.np):
         dx = point[0] - normrnd(0, su[0])
         dy = point[1] - normrnd(0, su[1])
         if self.dim == 2:
             pose = [samples[i].pose[0] + dx, samples[i].pose[1] + dy]
         if self.dim == 3: dz = point[2] - normrnd(0, su[2])
         if self.dim == 3:
             pose = [
                 samples[i].pose[0] + dx, samples[i].pose[1] + dy,
                 samples[i].pose[2] + dz
             ]
         samples[i].pose = pose
Esempio n. 2
0
    def readDistances(self):
        wifi_file = open(self.TX, 'r')
        reader = csv.reader(wifi_file)
        measures = []

        for row in reader:
            rowint = [float(x) for x in row]
            measures.append(rowint)

        for i in range(self.numPts):
            pt = self.wayPts[i]
            APMap = []
            for j in range(self.numAPs):
                name = self.TXName[j]
                tx = self.name2Pos[name]
                rssiVal = measures[i][j]
                label = 1

                if name == '203-H' or name == '125-H': label = 0
                euclid = self.distance(pt, tx)

                if rssiVal == 0: rssiDist = normrnd(20, 3)
                else: rssiDist = self.rssi2Dist(rssiVal)

                APMap.append(distanceMap(rssiDist, euclid, label, name))

            self.distMap.append(APMap)

        print("All distances mapped on grid!")
Esempio n. 3
0
        def jumpfun(x, jstd):
            x = fabs(x + normrnd(0.0, jstd))
            if x > 1.0:
                x = x % 1

            assert x > 0 and x < 1

            return x
Esempio n. 4
0
def mvnrnd_pre(mu, Lambda):
    src = normrnd(size=(mu.shape[0], ))
    return solve_ut(cholesky_upper(
        Lambda, overwrite_a=True, check_finite=False),
                    src,
                    lower=False,
                    check_finite=False,
                    overwrite_b=True) + mu
Esempio n. 5
0
        def jumpfun(x, jstd):
            x = fabs(x + normrnd(0.0, jstd))
            if x > 1.0:
                x = x % 1

            assert x > 0 and x < 1

            return x
Esempio n. 6
0
def mvnrnd_pre(mu, Lambda):
    # normal()函数生成一个size大小的np.array,采用正态分布随机采样,默认均值mu=0,标准差sigma=1.0
    src = normrnd(size=(mu.shape[0],))
    # solve_triangular() Solve the equation a x = b for x, assuming a is a triangular matrix.
    # lower = False,使用上三角,check_finite=False,不做无穷大检查,能提升运行速度,overwrite_b=True,允许对b参数进行重写.

    # Compute the Cholesky decomposition of a matrix.
    # Returns the Cholesky decomposition, A=LL* or A=U*U  of a Hermitian positive-definite matrix A.
    return solve_ut(cholesky_upper(Lambda, overwrite_a=True, check_finite=False),
                    src, lower=False, check_finite=False, overwrite_b=True) + mu
Esempio n. 7
0
def mh_sample(x, log_pdf_lambda, jump_std, D, num_samples=1, burn=1, lag=1):
    """
    uses MH to sample from log_pdf_lambda
    rguments:
    ... x: seed point
    ... log_pdf_lambda: function that evaluates the log pdf at x
    ... jump_std: standard deviation of jump distance (tunes itself)
    ... D: domain
    Keyword arguments:
    ... num_samples: number of samples to take
    ... burn: samples to throw out before any are collected
    ... lag: moves between samples
    Returns:
    ... If num_samples == 1, returns a float, else resturns an num_samples length list
    Example:
    >>> # Sample from posterior of CRP(x) with exponential(1) prior
    >>> x = 1.0
    >>> log_pdf_lambda = lambda x : utils.lcrp(10, [5,3,2] , x) - x
    >>> jump_std = 0.5
    >>> D = (0.0,float('Inf'))
    >>> sample = mh_sample(x log_pdf_lambda, jump_std, D)
    """
    num_collected = 0
    iters = 0
    samples = []

    t_samples = num_samples * lag + burn

    checkevery = max(20, int(t_samples / 100.0))
    accepted = 0.0
    acceptance_rate = 0.0
    iters = 1.0
    aiters = 1.0

    if D[0] >= 0.0 and D[1] == float('Inf'):
        jumpfun = lambda x, jstd: fabs(x + normrnd(0.0, jstd))
    elif D[0] == 0 and D[1] == 1:

        def jumpfun(x, jstd):
            x = fabs(x + normrnd(0.0, jstd))
            if x > 1.0:
                x = x % 1

            assert x > 0 and x < 1

            return x
    else:
        jumpfun = lambda x, jstd: x + normrnd(0.0, jstd)

    logp = log_pdf_lambda(x)
    while num_collected < num_samples:

        # every now and then propose wild jumps incase there very distant modes
        x_prime = jumpfun(x, jump_std)
        assert (x_prime > D[0] and x_prime < D[1])

        logp_prime = log_pdf_lambda(x_prime)

        # if log(random.random()) < logp_prime - logp:
        if log(random.random()) < logp_prime - logp:
            x = x_prime
            logp = logp_prime
            accepted += 1.0
            acceptance_rate = accepted / aiters

        if iters > burn and iters % lag == 0:
            num_collected += 1
            samples.append(x)

        # keep the acceptance rate around .3 +/- .1
        if iters % checkevery == 0:
            if acceptance_rate >= .4:
                jump_std *= 1.1
            elif acceptance_rate <= .2:
                jump_std *= .9019
            # print("j : %1.4f, AR: %1.4f" % (jump_std, acceptance_rate))
            accepted = 0.0
            acceptance_rate = 0.0
            aiters = 0.0

        iters += 1.0
        aiters += 1.0

    if num_samples == 1:
        return samples[0]
    else:
        return samples
Esempio n. 8
0
def mh_sample(x, log_pdf_lambda, jump_std, D, num_samples=1, burn=1, lag=1):
    """
    uses MH to sample from log_pdf_lambda
    rguments:
    ... x: seed point
    ... log_pdf_lambda: function that evaluates the log pdf at x
    ... jump_std: standard deviation of jump distance (tunes itself)
    ... D: domain
    Keyword arguments:
    ... num_samples: number of samples to take
    ... burn: samples to throw out before any are collected
    ... lag: moves between samples
    Returns:
    ... If num_samples == 1, returns a float, else resturns an num_samples length list
    Example:
    >>> # Sample from posterior of CRP(x) with exponential(1) prior
    >>> x = 1.0
    >>> log_pdf_lambda = lambda x : utils.lcrp(10, [5,3,2] , x) - x
    >>> jump_std = 0.5
    >>> D = (0.0,float('Inf'))
    >>> sample = mh_sample(x log_pdf_lambda, jump_std, D)
    """
    num_collected = 0
    iters = 0
    samples = []

    t_samples = num_samples * lag + burn

    checkevery = max(20, int(t_samples / 100.0))
    accepted = 0.0
    acceptance_rate = 0.0
    iters = 1.0
    aiters = 1.0

    if D[0] >= 0.0 and D[1] == float("Inf"):
        jumpfun = lambda x, jstd: fabs(x + normrnd(0.0, jstd))
    elif D[0] == 0 and D[1] == 1:

        def jumpfun(x, jstd):
            x = fabs(x + normrnd(0.0, jstd))
            if x > 1.0:
                x = x % 1

            assert x > 0 and x < 1

            return x

    else:
        jumpfun = lambda x, jstd: x + normrnd(0.0, jstd)

    logp = log_pdf_lambda(x)
    while num_collected < num_samples:

        # every now and then propose wild jumps incase there very distant modes
        x_prime = jumpfun(x, jump_std)
        assert x_prime > D[0] and x_prime < D[1]

        logp_prime = log_pdf_lambda(x_prime)

        # if log(random.random()) < logp_prime - logp:
        if log(random.random()) < logp_prime - logp:
            x = x_prime
            logp = logp_prime
            accepted += 1.0
            acceptance_rate = accepted / aiters

        if iters > burn and iters % lag == 0:
            num_collected += 1
            samples.append(x)

        # keep the acceptance rate around .3 +/- .1
        if iters % checkevery == 0:
            if acceptance_rate >= 0.4:
                jump_std *= 1.1
            elif acceptance_rate <= 0.2:
                jump_std *= 0.9019
            # print("j : %1.4f, AR: %1.4f" % (jump_std, acceptance_rate))
            accepted = 0.0
            acceptance_rate = 0.0
            aiters = 0.0

        iters += 1.0
        aiters += 1.0

    if num_samples == 1:
        return samples[0]
    else:
        return samples
Esempio n. 9
0
    def fast_measure_model(self, samples, wpID):
        Qt = np.diag([5, 5])
        Qt = Qt.tolist()
        totWt = 0

        print("Iteration: ", wpID, end='\r')

        for i in range(self.np):
            for j in range(len(self.name2Pos)):
                name = self.TXName[j]
                tx = np.array(self.name2Pos[name])
                pos = np.array(samples[i].pose)

                # initialize particle map
                if name not in samples[i].mapID:
                    samples[i].mapMu.append(tx[:2])
                    samples[i].mapSigma.append(Qt)
                    samples[i].mapID.append(name)
                    samples[i].hashMap[name] = len(samples[i].mapID) - 1
                    samples[i].w = 1 / self.np

                # update particle map
                else:
                    ID = samples[i].hashMap[name]

                    # prediction step
                    muHat = samples[i].mapMu[ID]
                    sigHat = np.array(samples[i].mapSigma[ID])

                    # update step
                    dHat = self.distance(pos, muHat)
                    rssiDist = self.dists[wpID][j].rssi

                    # use classifier or not
                    if self.use:
                        if self.hard:
                            label = self.classify(rssiDist, dHat)
                            if label == 0:
                                innov = abs(rssiDist - dHat)
                            else:
                                continue
                        else:
                            inp = torch.tensor([rssiDist, dHat])
                            out = self.model(inp.float()).detach().numpy()
                            innov = out[0] * abs(rssiDist - dHat) + out[
                                1] * abs(rssiDist - normrnd(15, 3))

                    else:
                        innov = abs(rssiDist - dHat)

                    dx = muHat[0] - pos[0]
                    dy = muHat[1] - pos[1]
                    den = math.sqrt(dx**2 + dy**2)
                    H = np.array([dx / den, dy / den])

                    try:
                        Q = np.matmul(np.matmul(H, sigHat), H) + self.sz[j]
                    except:
                        bp()

                    # Kalman Gain
                    K = np.matmul(sigHat, H) / Q

                    # update pose/ covar
                    mu = muHat + innov * K
                    K = K.reshape((self.dim, 1))
                    sig = (np.identity(self.dim) - K * H) * sigHat
                    samples[i].mapMu[ID] = mu.reshape((self.dim, ))
                    samples[i].mapSigma[ID] = sig.tolist()
                    samples[i].w = max(
                        samples[i].w,
                        math.sqrt(2 * math.pi * Q) * math.exp(-0.5 *
                                                              (innov**2) / Q))
                    totWt += samples[i].w

        # normalize the weights
        if totWt == 0:
            for i in range(self.np):
                samples[i].w = 1 / self.np
        else:
            for i in range(self.np):
                samples[i].w = samples[i].w / totWt
Esempio n. 10
0
 def motion_model(self, samples, point, su):
     for i in range(self.np):
         dx = point[0] - normrnd(0, su[0])
         dy = point[1] - normrnd(0, su[1])
         pose = [samples[i].pose[0] + dx, samples[i].pose[1] + dy]
         samples[i].pose = pose
Esempio n. 11
0
    def fast_measure_model(self, samples, z):
        if self.dim == 2: Qt = np.diag([10, 10])
        if self.dim == 3: Qt = np.diag([10, 10, 10])
        Qt = Qt.tolist()
        nAP = len(z)
        totWt = 0

        for i in range(self.np):
            for j in range(nAP):
                tx = np.array(self.tx[j])
                pos = np.array(samples[i].pose)
                d = self.distance(tx, pos)
                if d <= self.R:
                    # initialize particle map
                    if j not in samples[i].mapID:
                        samples[i].mapMu.append(tx)
                        samples[i].mapSigma.append(Qt)
                        samples[i].mapID.append(j)
                        samples[i].hashMap[j] = len(samples[i].mapID) - 1
                        samples[i].w = 1 / self.np

                    # update particle map
                    else:
                        ID = samples[i].hashMap[j]

                        # prediction step
                        muHat = samples[i].mapMu[ID]
                        sigHat = np.array(samples[i].mapSigma[ID])

                        # update step
                        dHat = self.distance(pos, muHat)

                        # use classifier or not
                        if self.use:
                            if self.hard:
                                label = self.classify(z[j].rssi, dHat)

                                # confidence matrix calculation
                                if label == 0 and z[j].label == 0:
                                    self.confidence[0] = self.confidence[
                                        0] + 1  # true positive
                                elif label == 0 and z[j].label == 1:
                                    self.confidence[1] = self.confidence[
                                        1] + 1  # false negative
                                elif label == 1 and z[j].label == 1:
                                    self.confidence[2] = self.confidence[
                                        2] + 1  # true negative
                                elif label == 1 and z[j].label == 0:
                                    self.confidence[3] = self.confidence[
                                        3] + 1  # false positive

                                if label == 0:
                                    innov = abs(z[j].rssi - dHat)
                                else:
                                    continue
                            else:
                                inp = torch.tensor([z[j].rssi, dHat])
                                out = self.model(inp.float()).detach().numpy()
                                innov = out[0] * abs(z[j].rssi - dHat) + out[
                                    1] * abs(z[j].rssi - normrnd(self.R, 3))

                                # confidence matrix calculation
                                if out[0] > out[1] and z[j].label == 0:
                                    self.confidence[0] = self.confidence[
                                        0] + 1  # true positive
                                elif out[0] > out[1] and z[j].label == 1:
                                    self.confidence[1] = self.confidence[
                                        1] + 1  # false negative
                                elif out[0] < out[1] and z[j].label == 1:
                                    self.confidence[2] = self.confidence[
                                        2] + 1  # true negative
                                elif out[0] < out[1] and z[j].label == 0:
                                    self.confidence[3] = self.confidence[
                                        3] + 1  # false positive

                        else:
                            innov = abs(z[j].rssi - dHat)

                        dx = muHat[0] - pos[0]
                        dy = muHat[1] - pos[1]
                        den = math.sqrt(dx**2 + dy**2)
                        H = np.array([dx / den, dy / den])

                        if self.dim == 3:
                            dz = muHat[2] - pos[2]
                            den = math.sqrt(dx**2 + dy**2 + dz**2)
                            H = np.array([dx / den, dy / den, dz / den])

                        try:
                            Q = np.matmul(np.matmul(H, sigHat), H) + self.sz[j]
                        except:
                            bp()

                        # Kalman Gain
                        K = np.matmul(sigHat, H) / Q

                        # update pose/ covar
                        mu = muHat + innov * K
                        K = K.reshape((self.dim, 1))
                        sig = (np.identity(self.dim) - K * H) * sigHat
                        samples[i].mapMu[ID] = mu.reshape((self.dim, ))
                        samples[i].mapSigma[ID] = sig.tolist()
                        samples[i].w = max(
                            samples[i].w,
                            math.sqrt(2 * math.pi * Q) *
                            math.exp(-0.5 * (innov**2) / Q))
                        totWt += samples[i].w

        # normalize the weights
        if totWt == 0:
            for i in range(self.np):
                samples[i].w = 1 / self.np
        else:
            for i in range(self.np):
                samples[i].w = samples[i].w / totWt
Esempio n. 12
0
    def measure_model(self, samples, z):
        totalWt = 0
        nAP = len(z)
        for i in range(self.np):
            dz = [0 for x in range(nAP)]
            for j in range(nAP):
                tx = self.tx[j]
                pos = samples[i].pose
                d = self.distance(tx, pos)
                if d <= self.R:
                    if self.use:
                        if self.hard:
                            label = self.classify(z[j].rssi, d)

                            # confidence matrix calculation
                            if label == 0 and z[j].label == 0:
                                self.confidence[0] = self.confidence[
                                    0] + 1  # true positive
                            elif label == 0 and z[j].label == 1:
                                self.confidence[1] = self.confidence[
                                    1] + 1  # false negative
                            elif label == 1 and z[j].label == 1:
                                self.confidence[2] = self.confidence[
                                    2] + 1  # true negative
                            elif label == 1 and z[j].label == 0:
                                self.confidence[3] = self.confidence[
                                    3] + 1  # false positive

                            if label == 0:
                                dz[j] = abs(z[j].rssi - d)
                        else:
                            inp = torch.tensor([z[j].rssi, d])
                            out = self.model(inp.float()).detach().numpy()
                            dz[j] = out[0] * abs(z[j].rssi - d) + out[1] * abs(
                                z[j].rssi - normrnd(self.R, 3))

                            # confidence matrix calculation
                            if out[0] > out[1] and z[j].label == 0:
                                self.confidence[0] = self.confidence[
                                    0] + 1  # true positive
                            elif out[0] > out[1] and z[j].label == 1:
                                self.confidence[1] = self.confidence[
                                    1] + 1  # false positive
                            elif out[0] < out[1] and z[j].label == 1:
                                self.confidence[2] = self.confidence[
                                    2] + 1  # true negative
                            elif out[0] < out[1] and z[j].label == 0:
                                self.confidence[3] = self.confidence[
                                    3] + 1  # false negative
                    else:
                        dz[j] = abs(z[j].rssi - d)

            wt = self.getWeight(dz)
            samples[i].w *= wt
            totalWt += wt

        if totalWt != 0:
            for i in range(self.np):
                samples[i].w = samples[i].w / totalWt
        else:
            for i in range(self.np):
                samples[i].w = 1 / self.np
Esempio n. 13
0
def my_kalman_filter(data, Ts, stds, accels):

    # Ts = 0.1; # define the sample time

    # A = np.matrix([[1, 0, 0, Ts, 0,  0], # define the state matrix
    #                 [0, 1, 0, 0,  Ts, 0],
    #                 [0, 0, 1, 0,  0, Ts],
    #                 [0, 0, 0, 1,  0,  0],
    #                 [0, 0, 0, 0,  1,  0],
    #                 [0, 0, 0, 0,  0,  1]])
    A = np.matrix([[1, 0, dt, 0], [0, 1, 0, dt], [0, 0, 1, 0], [0, 0, 0, 1]])
    # A = np.matrix([[1, 0, 0, 0, Ts, 0,  0], # define the state matrix
    #                [0, 1, 0, 0,  Ts, 0],
    #                [0, 0, 1, 0,  0, Ts],
    #                [0, 0, 0, 1,  0,  0],
    #                [0, 0, 0, 0,  1,  0],
    #                [0, 0, 0, 0,  0,  1]])

    # C = np.matrix([[1, 0, 0, 0, 0, 0], # define the output matrix
    #                [0, 1, 0, 0, 0, 0],
    #                [0, 0, 1, 0, 0, 0]])
    C = np.matrix([[1, 0, 0, 0], [0, 1, 0, 0]])

    # B = np.matrix([[0.5*Ts**2, 0,         0], # define the input matrix
    #                [0,         0.5*Ts**2, 0],
    #                [0,         0,         0.5*Ts**2],
    #                [Ts,        0,         0],
    #                [0,         Ts,        0],
    #                [0,         0,         Ts]])
    B = np.matrix([
        [0.5 * Ts**2, 0],  # define the input matrix
        [0, 0.5 * Ts**2],
        [Ts, 0],
        [0, Ts]
    ])

    # x0 = np.matrix([0, 0, 0, 0, 0, 0]).T # define the initial conditions
    x0 = np.matrix([0, 0, 0, 0]).T  # define the initial conditions
    # sys = ss(A, B, np.eye(6), np.zeros(B.shape), Ts) # define a system to generate true data
    sys = ss(A, B, np.eye(4), np.zeros(B.shape),
             Ts)  # define a system to generate true data
    # t = np.arange(0, 40 ,Ts) # define the time interval
    t = Ts * np.arange(len(data))

    # assuming that the uncertanities in the accelerations are equal, we define
    # them as follow:
    segmaux = 5  # standard deviation ax
    segmauy = 5  # standard deviation ay
    segmaualpha = 5  # standard deviation angular acceleration

    # In practice, these values are determined experimentally.
    # define the input(accelerations):
    ux = np.concatenate([
        np.zeros((1, 30)), 25 * np.ones((1, 20)), -20 * np.ones(
            (1, 20)), 15 * np.ones((1, len(t) - 70))
    ],
                        axis=1) + normrnd(0, segmaux, (1, len(t)))

    uy = np.concatenate([
        np.zeros((1, 10)), 60 * np.ones((1, 60)), -20 * np.ones(
            (1, len(t) - 70))
    ],
                        axis=1) + normrnd(0, segmauy, (1, len(t)))
    ualpha = np.concatenate([
        np.zeros((1, 30)), 25 * np.ones((1, 20)), -20 * np.ones(
            (1, 20)), 15 * np.ones((1, len(t) - 70))
    ],
                            axis=1) + normrnd(0, segmaualpha, (1, len(t)))
    u = np.concatenate([ux, uy, ualpha], axis=0)
    # generating the true data:
    Xtrue = lsim(sys, u, t, x0)
    xtrue = Xtrue[0][:, 0]
    ytrue = Xtrue[0][:, 1]
    thtrue = Xtrue[0][:, 2]
    vxtrue = Xtrue[0][:, 3]
    vytrue = Xtrue[0][:, 4]
    wtrue = Xtrue[0][:, 5]
    # defining V:
    # measurmentsV = np.matrix([[200**2, 0,      0],
    #                           [0,      200**2, 0],
    #                           [0,      0,      300**2]])
    measurmentsV = np.matrix([[stds[0]**2, 0], [0, stds[1]**2]])
    # generating measurment data by adding noise to the true data:
    # xm = xtrue + normrnd(0, 200, (len(xtrue),))
    # ym = ytrue + normrnd(0, 200, (len(ytrue),))
    # thm = thtrue+normrnd(0, 300, (len(ytrue),))

    xm = data[:, 0]
    ym = data[:, 1]
    # thm = data[:, 2]

    # initializing the matricies for the for loop (this will make the matlab run
    # the for loop faster.
    # Xest = np.zeros((6, len(t)))
    Xest = np.zeros((4, len(t)))
    Xest[:, 0] = x0.T
    # defining R and Q
    R = measurmentsV * C * C.T
    Q = np.matrix([[segmaux**2, 0, 0], [0, segmauy**2, 0],
                   [0, 0, segmaualpha**2]])
    # Initializing P
    P = B * Q * B.T

    for i in range(1, len(t)):
        P = A * P * A.T + B * Q * B.T  # predicting P
        Xest[:, i] = np.squeeze(A * np.expand_dims(Xest[:, i-1], axis=-1) + \
        B * np.expand_dims(u[:, i-1], axis=-1)) # Predicitng the state
        # num = P * C.T
        # den = np.tile((C * P * C.T + R), (2,1))
        # K = num/den # calculating the Kalman gains
        K = P * C.T * (C * P * C.T + R).I
        K = np.nan_to_num(K)
        # Xest[:, i] = Xest[:, i] + np.squeeze(K * (np.matrix([xm[i], ym[i], thm[i]]).T -
        #                                C * np.expand_dims(Xest[:, i], axis=-1))) # Correcting: estimating the state

        Xest[:, i] = Xest[:, i] + np.squeeze(
            K * (np.matrix([xm[i], ym[i]]).T - C * np.expand_dims(
                Xest[:, i], axis=-1)))  # Correcting: estimating the state
        # P = (np.eye(6) - K * C) * P # Correcting: estimating P
        P = (np.eye(4) - K * C) * P  # Correcting: estimating P