示例#1
0
    def update(self, odom):
        # assume odom = (v,w,dt)
        # if odom = (dx,dy,dh)
        dx, dy, dh = odom
        # elif odom = (v,w,dt)
        h = self.particles_[:, 2]
        #dx = v * np.cos(h) * dt
        #dy = v * np.sin(h) * dt
        #dh = w * dt

        c_ = np.cos(self.particles_[:, 2])
        s_ = np.sin(self.particles_[:, 2])

        dx_ = c_ * dx - s_ * dy
        dy_ = s_ * dx + c_ * dy

        self.particles_[:, :2] += np.stack([dx_, dy_], axis=-1)
        self.particles_[:, 2] = U.anorm(self.particles_[:, 2] + dh)

        if self.best_ is None:
            self.recalc_ = True
        else:
            # apply transform
            x, y, h = self.best_
            bc, bs = np.cos(h), np.sin(h)
            x += bc * dx - bs * dy
            y += bs * dx + bc * dy
            h = U.anorm(h + dh)
            self.best_ = np.asarray([x, y, h])
    def detect(self, thresh, max_lines=np.inf):
        #print self.acc_.max()
        peak_idx = peak_local_max_wrap(
            self.acc_,
            min_distance=self.detect_args_['dmin'],
            threshold_abs=thresh,
            #threshold_rel=0.25,
            num_peaks=max_lines)
        #peak_idx = np.asarray(np.where(self.acc_ > thresh)).T
        #print peak_idx.shape
        if len(peak_idx) > 0:
            #print 'indices', peak_idx
            ri, ti = peak_idx.T
            r, t = self.dr_ * ri, U.anorm(self.dt_ * ti)
            #print 'r', r, 't', np.rad2deg(t)
            return np.stack([r, t], axis=-1), self.acc_[ri, ti]
            #for r_,t_ in zip(r,t):
            #    print('r-t', r_, t_)
            #return r, t

            #(np.abs(r - r.T) < self.dr_) & (U.adiff(t-t.T) < self.dt_):
            #print 'r-t', self.dr_*ri, U.anorm(self.dt_*ti)
            #return self.dr_*ri, self.dt_*ti
        else:
            return None
示例#3
0
    def initialize(self,
                   size,
                   seed=None,
                   spread=[10.0, 0.5]):  #seed = initial pose.
        """ Initializes particles.

        Note:
            The size_ and particles_ property is modified internally every time initialize is called.
            Each generated particle is an array composed of three elements: x,y, theta. Theta is in radians.

        Args:
            size(int): number of particles to generate.
            seed(list): an array of [x,y,h] that indicates initial pose; (0,0,0) if None. (default:None)
            spread(list): an array of (radius, theta_std).
                radius(float): maximum radius of particle generation in a uniform distribution.
                theta_std(float): Standard deviation of the heading in a normal distribution.

        Returns:
            None (internally modified)

        """
        init_x = 0.0
        init_y = 0.0
        init_theta = 0.0

        if seed:
            init_x = seed[0]
            init_y = seed[1]
            init_theta = seed[2]

        particle_field = []

        if seed:
            x = np.random.normal(loc=init_x, scale=spread[0], size=size)
            y = np.random.normal(loc=init_y, scale=spread[0], size=size)
            #x, y = np.random.normal(loc=[[init_x],[init_y]], scale=spread, size=(2,size))
            #x, y = np.random.normal(loc=[[init_x],[init_y]], scale=spread, size=(2,size))
            h = np.random.normal(loc=init_theta, scale=spread[1], size=size)
        else:
            x, y = np.random.normal(scale=spread[0], size=(2, size))
            h = np.random.uniform(-np.pi, np.pi, size=size)

        h = U.anorm(h)

        self.particles_ = np.stack([x, y, h], axis=-1)
        if seed:
            delta = self.particles_ - np.reshape(seed, [1, 3])
            cost = np.linalg.norm(delta, axis=-1)
            self.weights_ = np.full(size, 1.0 / size)
            #self.weights_ = (1.0 / (cost + 1.0/size))
            #self.weights_ /= self.weights_.sum()
        else:
            self.weights_ = np.full(size, 1.0 / size)
        self.size_ = size
        self.recalc_ = True
示例#4
0
def add_p3d_batch(a, b):
    x0,y0,h0 = a.T
    dx,dy,dh = b.T
    c, s = np.cos(h0), np.sin(h0)
    dx_R = (c*dx - s*dy)
    dy_R = (s*dx + c*dy)

    x = x0+dx_R
    y = y0+dy_R
    h = anorm(h0+dh)
    return np.stack([x,y,h], axis=-1)
示例#5
0
def add_p3d(a,b):
    # final p3d composition -- mostly for verification
    x0,y0,h0 = a
    dx,dy,dh = b
    c, s = np.cos(h0), np.sin(h0)
    R = np.reshape([c,-s,s,c], [2,2]) # [2,2,N]
    dp = R.dot([dx,dy])
    x1 = x0 + dp[0]
    y1 = y0 + dp[1]
    h1 = anorm(h0 + dh)
    return [x1,y1,h1]
示例#6
0
def sub_p3d(b,a):
    x0,y0,h0 = a
    x1,y1,h1 = b

    c, s = np.cos(h0), np.sin(h0)
    R = np.reshape([c,-s,s,c], [2,2]) # [2,2,N]

    delta = np.reshape([x1-x0, y1-y0], (2,1))
    dx, dy = R.T.dot(delta).reshape(2)
    dh = anorm(h1-h0)
    #dx = x1-x0
    #dy = y1-y0
    return [dx,dy,dh]
示例#7
0
 def append(self, data):
     check = [(e is not None) for e in data]
     rospy.loginfo_throttle(1.0, 'check : {}'.format(check))
     if np.alltrue(check):
         if (self.last_data_ is not None):
             # check against last data for minimum motion
             (x0,y0,h0) = self.last_data_[2]
             (x1,y1,h1) = data[2]
             dr = np.linalg.norm([x1-x0, y1-y0])
             dh = anorm(np.abs(h1-h0))
             if(dr <= self.min_dr_ and dh <= self.min_dh_):
                 return
         self.dataset_.append(data)
         self.last_data_ = data
示例#8
0
    def format(self, pos):
        # format the data to be compatible with the training network
        prv = pos[:-1]
        nxt = pos[1:]
        #x1,y1,h1 w.r.t x0,y0,h0
        delta = nxt - prv  #[N,2]
        h0 = prv[:, 2]
        c, s = np.cos(h0), np.sin(h0)
        R = np.reshape([c, -s, s, c], [2, 2, -1])  # [2,2,N]
        dp = np.einsum('ijk,ki->kj', R, delta[:, :2])
        dh = anorm(delta[:, 2:])

        dps = np.concatenate([dp, dh], axis=-1)
        dps = np.concatenate([0 * delta[0:1], delta], axis=0)

        return dps
示例#9
0
    def search(self, srv, seg, skip=None, pad=0.05, min_l=0.08, min_d=0.02):
        """ search along segment for orthogonal segments.

        Note:
            search does not exhaustively exclude already discovered segments.
            take care not to process segments twice, which would result in a loop.

        Args:
            srv(function): see cast_ray argument for reference.
            seg(np.ndarray): source segment, [2,2] array formatted (x,y)
            skip(np.ndarray): skip point, [2] array formatted (x,y) (default:None)
            pad(float): distance to pad segment for search (default:0)
            min_l(float): minimum segment length to return (default:0.02)

        Returns:
            segs(list): list of discovered segments
        """
        # TODO : maybe min_l is why it fails?
        print('currently considering {}'.format(seg_str(seg)))
        segs = []

        d = seg[1] - seg[0]
        l = np.linalg.norm(d)
        if np.isclose(l, 0):
            # seg[1] == seg[0], single-point
            return []
        d /= l

        # set search angle to be  perpendicular to source segment
        ang = U.anorm(np.arctan2(d[1], d[0]))

        # define search steps from seg[0] to seg[1] with resolution _r
        steps = np.arange(-pad,l+pad,step=self._r).reshape(-1,1)
        diffs = d.reshape(1,2) * steps

        # seg[:1] = (1,2)
        # steps = (N,2)
        srcs = seg[:1] + diffs # = (N,2)

        #for src in srcs:
        #    print('src : {}'.format(pt_str(src)))

        #if not np.allclose(srcs[-1], seg[1]):
        #    # account for endpoints since they tend to be important
        #    srcs = np.concatenate( (srcs, [seg[1]]), axis=0)
        #    steps = np.concatenate( (srcs, 

        s_0 = None
        s_1 = None
        new_seg = None

        for src in srcs:
            #if (skip is not None) and np.allclose(src, skip):
            #    # skip source
            #    continue
            new_seg = self.expand(srv, src, ang)
            # print(seg_str(new_seg))

            l_seg = np.linalg.norm(new_seg[1] - new_seg[0])

            # filter by length
            if l_seg > min_l:
                # filter by previous seg.
                if s_0 is None:
                    s_0 = new_seg
                    s_1 = new_seg
                else:
                    s_j = seg_join(s_1, new_seg)
                    if s_j is not None:
                        s_1 = s_j
                    else:
                        prv_seg = s_1 #np.mean([s_0,s_1], axis=0)
                        segs.append(prv_seg)
                        # form new segment
                        s_0 = new_seg
                        s_1 = new_seg

        if s_1 is not None:
            segs.append(s_1)

        print('len(segs) : {}'.format(len(segs)))
        print('segs : {}'.format(segs))

        l = len(segs)
        mask = [False for _ in range(l)]
        new_segs = []

        for i in range(l):
            s0 = segs[i]
            if mask[i]: continue
            mask[i] = True
            for j in range(i+1,l):
                if mask[j]: continue
                s1 = segs[j]
                s2 = seg_join(s0,s1)
                if s2 is not None:
                    s0 = s2
                    mask[j] = True # j already used
            new_segs.append(s0)
        return new_segs