예제 #1
0
    def start_test(self, challenge):
        m = self.RX.match(challenge)
        if m is None:
            raise CourseraException(
                "Unknown challenge format. Please contact developers for assistance."
            )
        try:
            v = float(m.group('v'))
            w = float(m.group('w'))
        except ValueError:
            raise CourseraException(
                "Unknown challenge format. Please contact developers for assistance."
            )

        from supervisors.week2 import QuickBotSupervisor
        from robots.quickbot import QuickBot
        from pose import Pose

        info = QuickBot(Pose()).get_info()
        info.color = 0
        s = QuickBotSupervisor(Pose(), info)

        vl, vr = s.uni2diff((v, w))

        self.testsuite.respond("{:0.3f},{:0.3f}".format(
            vr, vl))  # Note the inverse order
def updatePoseList(lastPose, u_in, nextTimestamp_s, dT, path=None):
    """
    Update vehicle pose with given longtitude acceleration and timestamp
    Args:
        lastPose: the lastest pose
        u_in: current acceleration as input
        nextTimestamp_s: next timestamp
    Return:
        l_pose(timestamp: pose): discreted pose list to given timestamp
        l_u(timestamp: u): input list to given timestamp
    """
    # assert nextTimestamp_s > lastPose.timestamp_s
    l_pose = {}
    dyaw = 0
    step = int((nextTimestamp_s - lastPose.timestamp_s) / dT)
    vx = lastPose.vdy.vx_ms

    for k in range(1, step + 1, 1):

        dTime = k * dT
        nextT = round(lastPose.timestamp_s + dTime, 2)
        next_vdy = VehicleDynamic(vx + u_in * dTime, dyaw)

        dP = vx * dTime + 0.5 * u_in * (dTime**2)
        dP = max(dP, 0)

        dX = dP * math.cos(lastPose.yaw_rad)
        dY = dP * math.sin(lastPose.yaw_rad)

        # update covariance matrix from the nearest pose
        next_covLatLong = 0
        if not l_pose:
            next_covLatLong = updateCovLatlong(lastPose.covLatLong, dT, dP, 0)
        else:
            nearestPose = l_pose[max(l_pose)]
            dPNearest = np.linalg.norm([
                lastPose.x_m + dX - nearestPose.x_m,
                lastPose.y_m + dY - nearestPose.y_m
            ])
            next_covLatLong = updateCovLatlong(nearestPose.covLatLong, dT,
                                               dPNearest, 0)

        if param._TEST and path is not None:
            ix, iy, iyaw = path.getDs(dP)
            nextPose = Pose(x_m=ix,
                            y_m=iy,
                            yaw_rad=iyaw,
                            vdy=next_vdy,
                            covLatLong=next_covLatLong,
                            timestamp_s=nextT)
        else:
            nextPose = Pose(x_m=lastPose.x_m + dX,
                            y_m=lastPose.y_m + dY,
                            yaw_rad=lastPose.yaw_rad,
                            vdy=next_vdy,
                            covLatLong=next_covLatLong,
                            timestamp_s=nextT)
        l_pose[nextT] = nextPose

    return l_pose
예제 #3
0
    def start_test(self, challenge):
        m = self.RX.match(challenge)
        if m is None:
            raise CourseraException(
                "Unknown challenge format. Please contact developers for assistance."
            )
        try:
            d1 = float(m.group('d1'))
            d2 = float(m.group('d2'))
        except ValueError:
            raise CourseraException(
                "Unknown challenge format. Please contact developers for assistance."
            )

        from supervisors.week2 import QuickBotSupervisor
        from robots.quickbot import QuickBot, QuickBot_IRSensor
        from pose import Pose

        bot = QuickBot(Pose())
        sensor = QuickBot_IRSensor(Pose(), bot)

        id1 = sensor.distance_to_value(d1)
        id2 = sensor.distance_to_value(d2)

        info = bot.get_info()
        info.color = 0
        s = QuickBotSupervisor(Pose(), info)
        # Just in case a student iterates in a weird way
        s.robot.ir_sensors.readings = [id1, id2, id1, id2, id1]
        ird = s.get_ir_distances()

        self.testsuite.respond("{:0.3f},{:0.3f}".format(
            abs((d1 - ird[0]) / d1), abs((d2 - ird[1]) / d2)))
예제 #4
0
    def start_test(self, challenge):
        vals = self.parseChallenge(challenge)

        if 'd1' not in vals or 'd2' not in vals:
            raise CourseraException(
                "Unknown challenge format. Please contact developers for assistance."
            )

        d1 = vals['d1']
        d2 = vals['d2']

        from supervisors.week2 import QuickBotSupervisor
        from robots.quickbot import QuickBot, QuickBot_IRSensor
        from pose import Pose

        bot = QuickBot(Pose())
        sensor = QuickBot_IRSensor(Pose(), bot)

        id1 = sensor.distance_to_value(d1)
        id2 = sensor.distance_to_value(d2)

        info = bot.get_info()
        info.color = 0
        s = QuickBotSupervisor(Pose(), info)
        # Just in case a student iterates in a weird way
        s.robot.ir_sensors.readings = [id1, id2, id1, id2, id1]
        ird = s.get_ir_distances()

        self.testsuite.respond("{:0.3f},{:0.3f}".format(
            abs((d1 - ird[0]) / d1), abs((d2 - ird[1]) / d2)))
예제 #5
0
    def start_test(self, challenge):
        vals = self.parseChallenge(challenge)

        if 'v' not in vals or 'w' not in vals:
            raise CourseraException(
                "Unknown challenge format. Please contact developers for assistance."
            )

        vd = vals['v']
        wd = vals['w']

        QuickBotSupervisor = helpers.load_by_name('week3.QBGTGSupervisor',
                                                  'supervisors')
        QuickBot = helpers.load_by_name('QuickBot', 'robots')
        from pose import Pose

        bot = QuickBot(Pose())
        info = bot.get_info()
        info.color = 0
        s = QuickBotSupervisor(Pose(), info)

        vld, vrd = s.uni2diff((vd, wd))
        vl, vr = s.ensure_w((vld, vrd))
        # Clamp to robot maxima
        vl = max(-info.wheels.max_velocity, min(info.wheels.max_velocity, vl))
        vr = max(-info.wheels.max_velocity, min(info.wheels.max_velocity, vr))
        v, w = bot.diff2uni((vl, vr))

        self.testsuite.respond("{:0.3f}".format(abs(w - wd) / wd))
예제 #6
0
def calculate_pose(old_pose, motion, timedelta, map):
    """Calculates a possible new position, with added gaussian noise."""

    distance_left = motion.left * timedelta * FACTOR
    distance_right = motion.right * timedelta * FACTOR

    if distance_left == distance_right:
        dx = distance_left * math.cos(old_pose.theta)
        dy = distance_left * math.sin(old_pose.theta)
        dtheta = 0
    elif distance_left == -distance_right:
        dx, dy = 0, 0
        #dtheta = #timedelta * (math.pi / 4) / MS_PER_45_DEGREES
        dtheta = RADIANS_DURING_1_turn
        if distance_left > 0:
            dtheta *= -1
            # dtheta=distance_left/(WHEEL_DISTANCE_IN_CM/2) #left turn results in positive dtheta, uses formula length arc


    if dx == 0.0 and dtheta == 0.0:
        return Pose(old_pose.x, old_pose.y, old_pose.theta)

    if GAUSSIAN_NOISE:

        if dx == 0.0 and dy == 0.0:
            return sample_rotation(old_pose, dtheta)

        elif dtheta == 0.0:
            return sample_forward_move(old_pose, dx, dy, map)
        else:
            print('dx, dy and dtheta are not zero ??')
            exit()
    else:
        return Pose(old_pose.x + dx, old_pose.y + dy, old_pose.theta + dtheta)
예제 #7
0
    def test_transform(self):
        random_e = np.random.rand(3) * np.pi * 2
        random_t = np.random.rand(3)
        pose = Pose(random_t, random_e)
        matrix = ttf.compose_matrix(angles=random_e, translate=random_t)

        random_e = np.random.rand(3) * np.pi * 2
        random_t = np.random.rand(3)
        pose_2 = Pose(random_t, random_e)
        matrix_2 = ttf.compose_matrix(angles=random_e, translate=random_t)

        matrix_t = matrix_2.dot(matrix)
        pose_t = pose_2.transform(pose)
        self.assert_pose_matrix_eq(pose_t, matrix_t)
예제 #8
0
    def start_test(self, challenge):
        m = self.RX.match(challenge)
        if m is None:
            raise CourseraException(
                "Unknown challenge format. Please contact developers for assistance."
            )
        try:
            v = float(m.group('v'))
            theta = float(m.group('theta'))
        except ValueError:
            raise CourseraException(
                "Unknown challenge format. Please contact developers for assistance."
            )

        from supervisors.week2 import QuickBotSupervisor
        from robots.quickbot import QuickBot
        from pose import Pose
        from helpers import Struct
        from math import pi

        bot = QuickBot(Pose())
        info = bot.get_info()
        info.color = 0
        s = QuickBotSupervisor(Pose(), info)
        params = Struct()
        params.goal = theta * 180 / pi
        params.velocity = v
        params.pgain = 1
        s.set_parameters(params)

        tc = 0.033  # 0.033 sec' is the SimIAm time step

        for step in range(25):  # 25 steps
            bot.move(tc)
            bot.set_inputs(s.execute(bot.get_info(), tc))

        xe, ye, te = s.pose_est
        xr, yr, tr = bot.get_pose()

        if xr == 0:
            xr = 0.0000001
        if yr == 0:
            yr = 0.0000001
        if tr == 0:
            tr = 0.0000001

        self.testsuite.respond("{:0.3f},{:0.3f},{:0.3f}".format(
            abs((xr - xe) / xr), abs((yr - ye) / yr),
            abs(abs(tr - te) % (2 * pi) / tr)))
예제 #9
0
 def test_inverse(self):
     random_e = np.random.rand(3) * np.pi * 2
     random_t = np.random.rand(3)
     pose = Pose(random_t, random_e).inverse()
     matrix = np.linalg.inv(
         ttf.compose_matrix(angles=random_e, translate=random_t))
     self.assert_pose_matrix_eq(pose, matrix)
예제 #10
0
    def draw(self, renderer):
        """Draw a circular goal, path and ir_sensors"""
        
        # Draw goal
        renderer.set_pose(Pose(self.pose_est.x,self.pose_est.y,pi*self.parameters.goal/180))
        renderer.set_pen(0)
#        renderer.draw_line(0.03,0,100,0)
        renderer.draw_arrow(0.05,0,0.5,0,close=True)
        
        # Draw robot path
        self.tracker.draw(renderer)
        
        renderer.set_pose(self.pose_est)
        renderer.set_brush(0)
        renderer.draw_ellipse(0,0,0.01,0.01)
               
        # Draw IR distances
        crosses = array([ dot(p.get_transformation(), [d,0,1])
                          for d, p in zip(self.get_ir_distances(), self.robot.ir_sensors.poses) ] )
                                
        renderer.set_pen(0)
        for c in crosses:
            x,y,z = c
            
            renderer.push_state()
            renderer.translate(x,y)
            renderer.rotate(atan2(y,x))
        
            renderer.draw_line(0.01,0.01,-0.01,-0.01)
            renderer.draw_line(0.01,-0.01,-0.01,0.01)
            
            renderer.pop_state()                                   
예제 #11
0
    def estimate_pose(self):
        """Update self.pose_est using odometry"""
        
        # Get tick updates
        dtl = self.robot.wheels.left_ticks - self.left_ticks
        dtr = self.robot.wheels.right_ticks - self.right_ticks
        
        # Save the wheel encoder ticks for the next estimate
        self.left_ticks += dtl
        self.right_ticks += dtr
        
        x, y, theta = self.pose_est

        R = self.robot.wheels.radius
        L = self.robot.wheels.base_length
        m_per_tick = (2*pi*R)/self.robot.wheels.ticks_per_rev
            
        # distance travelled by left wheel
        dl = dtl*m_per_tick
        # distance travelled by right wheel
        dr = dtr*m_per_tick
            
        theta_dt = (dr-dl)/L
        theta_mid = theta + theta_dt/2
        dst = (dr+dl)/2
        x_dt = dst*cos(theta_mid)
        y_dt = dst*sin(theta_mid)
            
        theta_new = theta + theta_dt
        x_new = x + x_dt
        y_new = y + y_dt
           
        return Pose(x_new, y_new, (theta_new + pi)%(2*pi)-pi)
예제 #12
0
 def draw(self, renderer):
     """Draw a circular goal"""
     renderer.set_pose(Pose(self.parameters.goal.x, self.parameters.goal.y))
     renderer.set_pen(0)
     renderer.set_brush(self.robot_color)
     r = self.robot_size/2
     renderer.draw_ellipse(0,0,r,r)
예제 #13
0
 def draw_background(self, renderer):
     """Draw a circular goal"""
     renderer.set_pose(Pose(self.parameters.goal.x, self.parameters.goal.y))
     renderer.set_pen(0)
     renderer.set_brush(self.robot_color)
     for r in numpy.arange(self.robot_size/2,0,-0.01):
         renderer.draw_ellipse(0,0,r,r)
예제 #14
0
def set_animation():

    pose = Pose()
    animation = Animation()
    animation.add_pose(pose)

    return animation
예제 #15
0
    def __init__(self):

        self.lwheel_prev = int(0)
        self.rwheel_prev = int(0)
        self.lwheel_cur = int(0)
        self.rwheel_cur = int(0)

        self.leftTicks = 0
        self.rightTicks = 0

        self.x_prev = 0
        self.y_prev = 0
        self.x_cur = 0
        self.y_cur = 0

        self.theta_prev = 0  # in rad
        self.theta_cur = 0  # in rad

        self.lin_x_vel = 0
        self.lin_y_vel = 0

        self.yaw_vel = 0

        self.wheelRad = 0.0325  ##in mts
        self.gearRatio = 120 / 1
        self.ticksPerOneMotorRot = 8
        self.ticksPerOneWheelRot = self.gearRatio * self.ticksPerOneMotorRot  #960
        self.oneMeterToWheelRot = 1 / (2 * pi * self.wheelRad)
        self.oneMeterToEncoderTicks = self.oneMeterToWheelRot * self.ticksPerOneWheelRot
        self.oneTickInMeters = 1 / self.oneMeterToEncoderTicks
        self.wheelSeparation = 14.5 * 0.01  # metres
        self.pose = Pose()
        self.baseFrameID = 'base_link'
        self.odomFrameID = 'odom'
예제 #16
0
    def get_pose(self, pose_id):
        if (pose_id >= len(self.data["poses"])):
            return None

        tmp = self.data["poses"][pose_id]
        time = tmp[0]
        pose = Pose(tmp[1][0], tmp[1][1], tmp[1][2])
        return (time, pose)
예제 #17
0
    def set_pose(self, rpose):
        RealBot.set_pose(self, rpose)

        # We have to update walls here. WHY?
        for pose, dst in zip(
                self.info.ir_sensors.poses,
                np.polyval(self.ir_coeff, self.info.ir_sensors.readings)):
            if dst / self.info.ir_sensors.rmax <= 0.99:
                self.walls.add_point(Pose(dst) >> pose >> self.get_pose())
예제 #18
0
def draw_annotations(coords_folder, video_folder, output_folder, cam_ids):

    for cam_id in cam_ids:
        cam_coords_path = os.path.join(coords_folder, "cam_{}".format(cam_id),
                                       "coords_cam_{}.csv".format(cam_id))
        cam_video_path = os.path.join(video_folder, "cam_{}".format(cam_id),
                                      "cam_{}.mp4".format(cam_id))

        print("Loading {}".format(cam_video_path))

        cam_video_output_folder = os.path.join(output_folder,
                                               "cam_{}".format(cam_id))
        os.makedirs(cam_video_output_folder, exist_ok=True)
        output_cam_video_path = os.path.join(
            cam_video_output_folder, "annotations_cam_{}.mp4".format(cam_id))

        print("Writing {}".format(output_cam_video_path))

        cam_coords = pd.read_csv(cam_coords_path)
        fourcc = cv2.VideoWriter_fourcc(*'mp4v')

        out = cv2.VideoWriter(output_cam_video_path, fourcc, 41.0,
                              (1920, 1080))

        #Get frames until the video ends
        cam_video_capture = cv2.VideoCapture(cam_video_path)
        total_frames = int(cam_video_capture.get(cv2.CAP_PROP_FRAME_COUNT))
        tqdm_progress = tqdm(total=total_frames)

        current_frame_no = 0

        while True:
            ret, frame = cam_video_capture.read()
            tqdm_progress.update()
            coords_frame = cam_coords[cam_coords["frame_no_cam"] ==
                                      current_frame_no]
            for person_id in list(coords_frame["person_id"]):
                coords_one_person_frame = coords_frame[
                    coords_frame["person_id"] == person_id]
                person_joints = get_joints_from_person_coords(
                    coords_one_person_frame)
                pose = Pose(person_joints)
                pose.drawBoundingBox(image=frame, color=(255, 0, 0))
                pose.drawPersonId(image=frame)
                pose.drawPose(image=frame, color=[0, 255, 0])

            if not ret:
                out.release()
                cam_video_capture.release()
                break

            cv2.imshow("", mat=frame)
            cv2.waitKey(1)

            out.write(image=frame)

            current_frame_no += 1
예제 #19
0
    def __init__(self, n_particles, copy=False):
        self.map = OccupancyGridMap(copy)
        if not copy:
            x_co = random.gauss(0, 0.2)
            y_co = random.gauss(0, 0.2)
            theta = random.gauss(0, 0.05)
            self.pose = Pose(x_co, y_co, theta)

            self.weight = 1.0 / n_particles
    def __init__(self,
                 idx,
                 length,
                 width,
                 from_x_m,
                 from_y_m,
                 to_x_m,
                 to_y_m,
                 covLong,
                 covLat,
                 vx_ms,
                 startTime,
                 isStop=False,
                 appearRate=1,
                 interactRate=1,
                 dT=param._dT):
        self._idx = idx
        self._length = length
        self._width = width
        self._lw_std = np.array([self._length / 2,
                                 self._width / 2])  # std offset
        self._isDetected = False
        self._detectedTime = None  # position where first detected
        self._Pcoll = 0  # maximum collision probability with ego

        startTime = round(round(startTime / dT, 2) * dT, 2)
        self._startTime = startTime
        self.theta = math.atan2(to_y_m - from_y_m, to_x_m - from_x_m)
        startPose = Pose(x_m=from_x_m,
                         y_m=from_y_m,
                         yaw_rad=self.theta,
                         covLatLong=np.diag([covLong, covLat]),
                         vdy=VehicleDynamic(vx_ms, 0),
                         timestamp_s=startTime)

        if isStop:
            # deaccleration if stop
            self._u = pfnc.computeAccToStop(from_x_m=from_x_m,
                                            from_y_m=from_y_m,
                                            to_x_m=to_x_m,
                                            to_y_m=to_y_m,
                                            vx_ms=vx_ms)
        else:
            self._u = 0

        # for hypothetical object
        self._appearRate = appearRate
        self._interactRate = interactRate

        # pose
        self._p_pose = {}
        self._currentPose = startPose
        self._l_pose = {startPose.timestamp_s: startPose}

        # first states prediction
        self.predict(dT=param._PREDICT_STEP, pT=param._PREDICT_TIME)
예제 #21
0
    def insert_scan_only_endpoint(self, scan, robot_pose):
        rays = scan.get_rays(robot_pose)

        for ray in rays:
            pose, dist = ray
            if dist < self.min_dist or dist > self.max_dist:
                continue

            end_pose = pose.clone().add_relative_pose(Pose(dist, 0, 0))
            self.add_density(end_pose, self.positive_scan_weight)
예제 #22
0
def get_pose(frame_data, person_id):
    # type: (np.ndarray, int) -> Pose
    """
	:param frame_data: data of the current frame
	:param person_id: person identifier
	:return: list of joints in the current frame with the required person ID
	"""
    pose = [Joint(j) for j in frame_data[frame_data[:, 1] == person_id]]
    pose.sort(key=(lambda j: j.type))
    return Pose(pose, "JTA")
예제 #23
0
파일: airrace.py 프로젝트: ASTARCHEN/heidi
def stripPose( rect, highResolution=True, scale=None ):
  "return relative pose of image rectangle"
  (x,y),(w,h),a = rect
  assert w > 3*h, (w,h)  # 30cm long, 5cm wide ... i.e. should be 6 times
  a = -a-90
  if a > 90:
    a -= 180
  if a < -90:
    a += 180
  if scale == None:
    if w >= 5*h: # it should be 6x, but width is more precise
      scale = 0.3/float(w)
    else:
      scale = 0.05/float(h)
  if highResolution:
    return Pose( scale*(720/2-y), scale*(1280/2-x), math.radians( a ) )
  else:
#    return Pose( scale*(360/2-y), scale*(640/2-x), math.radians( a ) )
    return Pose( scale*(360/2-y), scale*(270-x), math.radians( a ) )
예제 #24
0
    def copy_particle(self, old_p):
        p = Particle(self.n_particles, True)
        p.pose = Pose(old_p.pose.x, old_p.pose.y, old_p.pose.theta)
        # Reset the weight of the particle after selection ??
        p.weight = old_p.weight
        p.map.minrange = old_p.map.minrange
        p.map.maxrange = old_p.map.maxrange
        p.map.grid = old_p.map.grid.copy()
        p.map.path = old_p.map.path.copy()

        return p
예제 #25
0
    def draw_foreground(self, renderer):
        """Draw controller info"""
        QuickBotSupervisor.draw_foreground(self, renderer)

        renderer.set_pose(Pose(self.pose_est.x, self.pose_est.y))
        arrow_length = self.robot_size * 5

        # Draw arrow to goal
        renderer.set_pen(0x00FF00)
        renderer.draw_arrow(0, 0, arrow_length * cos(self.gtg.heading_angle),
                            arrow_length * sin(self.gtg.heading_angle))
예제 #26
0
 def __init__(self):
     self.lefthandle = Pose("Left")
     self.righthandle = Pose("Right")
     self.HMD = Pose("HMD")
     self.eyehandle = PoseEye("Eye")
     self.currentDevice = 0
     self.systemcmds = ["", "Ls", "Rs"]
     self.triggercmds = ["", "Lt", "Rt"]
     self.devicecmds = ["H ", "L ", "R ", "E "]
     self.devices = [
         self.HMD, self.lefthandle, self.righthandle, self.eyehandle
     ]  ## order corresponds to currendDevie
     self.ZMQhmd = zmqsocket.ZMQsocket(5577)
     self.ZMQeye = zmqsocket.ZMQsocket(5555)
     self.testcaseinprogress = False
     # System button S/L grip G/H app:F/J trig:D/K
     self.allowedKeysLeft = "SDFG"
     self.allowedKeysRight = "LKJH"
     self.lastcmd = ""
     self.statusstring = "No Test Case Loaded"
예제 #27
0
    def get_heading(self, state):
        """Get the direction along the wall as a vector."""

        # Calculate vectors for the sensors
        if state.direction == 'left':  # 0-2
            d, i = min(zip(state.sensor_distances[:3], [0, 1, 2]))
            if i == 0 or (i == 1 and state.sensor_distances[0] <=
                          state.sensor_distances[2]):
                i, j, k = 1, 0, 2

            else:
                i, j, k = 2, 1, 0

        else:  # 2-4
            d, i = min(zip(state.sensor_distances[2:], [2, 3, 4]))
            if i == 4 or (i == 3 and state.sensor_distances[4] <=
                          state.sensor_distances[2]):
                i, j, k = 3, 4, 2
            else:
                i, j, k = 2, 3, 4

        p_front = Pose(state.sensor_distances[i]) >> self.sensor_poses[i]
        p_back = Pose(state.sensor_distances[j]) >> self.sensor_poses[j]

        self.vectors = [(p_front.x, p_front.y, 1), (p_back.x, p_back.y, 1)]

        # Calculate the two vectors:
        ds = ((p_front.x - p_back.x)**2 + (p_front.y - p_back.y)**2)
        ms = (p_front.x * p_back.y - p_front.y * p_back.x)
        self.to_wall_vector = numpy.array([(p_back.y - p_front.y) * ms / ds,
                                           (p_front.x - p_back.x) * ms / ds,
                                           1])
        self.along_wall_vector = numpy.array(
            [p_front.x - p_back.x, p_front.y - p_back.y, 1])

        # Calculate and return the heading vector:
        offset = abs(ms / math.sqrt(ds)) - state.distance
        if offset > 0:
            return 0.3 * self.along_wall_vector + 2 * offset * self.to_wall_vector
        else:
            return 0.3 * self.along_wall_vector + 3 * offset * self.to_wall_vector
예제 #28
0
    def get_heading(self, state):
        """Get the direction away from the obstacles as a vector."""

        # Calculate heading:
        x, y = 0, 0
        for d, p, w in zip(state.sensor_distances, self.sensor_poses,
                           self.weights):
            pose = Pose(d) >> p
            x += pose.x * w
            y += pose.y * w

        return numpy.array([x, y, 1])
예제 #29
0
 def set_view_rect(self, x, y, width, height):
     """Zoom on the rectangle to fit it into the view
     """
     self.__view_rect = (x, y, width, height)
     zoom = min(self.size[0] / float(width), self.size[1] / float(height))
     xtra_width = self.size[0] / zoom - float(width)
     xtra_height = self.size[1] / zoom - float(height)
     self._defpose = Pose(x - xtra_width / 2, y - xtra_height / 2, 0)
     self._zoom = zoom
     self._zoom_c = False
     self._adjust_grid(zoom)
     self._update_default_state()
예제 #30
0
    def __init__(self):
        """
        
        """

        ## params
        # tf frames
        self._odom_frame = rospy.get_param('~odom_frame', 'odom')
        self._base_frame = rospy.get_param('~base_frame', 'base_footprint')
        # 2 wheel dolly's wheel separation is ~30.5 cm
        self._wheel_separation = float(
            rospy.get_param('~wheel_separation', 0.305))
        # 2 wheel dolly's wheel diameter is ~12 cm
        self._wheel_diameter = float(rospy.get_param('~wheel_diameter', 0.12))
        # wheel directions
        self._left_wheel_forward = rospy.get_param('~left_wheel_forward', True)
        self._right_wheel_forward = rospy.get_param('~right_wheel_forward',
                                                    False)
        # keigan motors BLE mac addresses
        self._left_wheel_mac = rospy.get_param('~left_wheel_mac',
                                               'D8:BA:37:4A:88:A1')
        self._right_wheel_mac = rospy.get_param('~right_wheel_mac',
                                                'DD:7A:96:3C:E1:51')
        # update rate, default 30 Hz
        self._update_rate = rospy.get_param('~update_rate', 30.0)
        # lock for synchronizing between read and write to BLE
        self._lock = threading.Lock()

        ## vars
        self._pose = Pose()
        self._left_last_pos = 0.0
        self._right_last_pos = 0.0
        self._last_time = rospy.Time.now()
        # max rotation is 250 rpm, or ~26 rad/s
        self._max_rot = 26.0  #rad/s
        # wheels BLE device handle
        self._left_wheel_dev = KMControllers.BLEController(
            self._left_wheel_mac)
        self._right_wheel_dev = KMControllers.BLEController(
            self._right_wheel_mac)

        ## tf
        self._tfb = tf.TransformBroadcaster()

        ## pubs
        self._odom_pub = rospy.Publisher('odom', Odometry, queue_size=1)

        ## subs
        self._twist_sub = rospy.Subscriber('cmd_vel',
                                           Twist,
                                           self._twist_callback,
                                           queue_size=1)