예제 #1
0
class get_Score(object):
	def __init__(self, lookup='lookup.pickle'):
		self.a = Pose()
		self.s = Score()
		self.b = pickle.load(open(lookup, 'rb'))
		self.input_test = []

	def get_action_coords_from_dict(self,action):
			for (k,v) in self.b.items():
				if k==action:
					(model_array,no_of_frames) = (v,v.shape[0])
			return model_array,no_of_frames
	
	def calculate_Score(self,video,action):
		with tf.compat.v1.Session() as sess:
			model_cfg, model_outputs = posenet.load_model(101, sess)
			model_array,j = self.get_action_coords_from_dict(action)
			cap = cv2.VideoCapture(video)
			i = 0
			if cap.isOpened() is False:
				print("error in opening video")
			while cap.isOpened():
				ret_val, image = cap.read()
				if ret_val:         
					input_points= self.a.getpoints(cv2.resize(image,(372,495)),sess,model_cfg,model_outputs)
					input_new_coords = np.asarray(self.a.roi(input_points)[0:34]).reshape(17,2)
					self.input_test.append(input_new_coords)
					i = i + 1
				else:
					break
			cap.release()
			final_score,score_list = self.s.compare(np.asarray(self.input_test),np.asarray(model_array),j,i)
		return final_score,score_list
예제 #2
0
    def start_test(self, challenge):
        m = self.RX.match(challenge)
        if m is None:
            raise CourseraException(
                "Unknown challenge format. Please contact developers for assistance."
            )
        try:
            d1 = float(m.group('d1'))
            d2 = float(m.group('d2'))
        except ValueError:
            raise CourseraException(
                "Unknown challenge format. Please contact developers for assistance."
            )

        from supervisors.week2 import QuickBotSupervisor
        from robots.quickbot import QuickBot, QuickBot_IRSensor
        from pose import Pose

        bot = QuickBot(Pose())
        sensor = QuickBot_IRSensor(Pose(), bot)

        id1 = sensor.distance_to_value(d1)
        id2 = sensor.distance_to_value(d2)

        info = bot.get_info()
        info.color = 0
        s = QuickBotSupervisor(Pose(), info)
        # Just in case a student iterates in a weird way
        s.robot.ir_sensors.readings = [id1, id2, id1, id2, id1]
        ird = s.get_ir_distances()

        self.testsuite.respond("{:0.3f},{:0.3f}".format(
            abs((d1 - ird[0]) / d1), abs((d2 - ird[1]) / d2)))
예제 #3
0
    def start_test(self, challenge):
        m = self.RX.match(challenge)
        if m is None:
            raise CourseraException(
                "Unknown challenge format. Please contact developers for assistance."
            )
        try:
            v = float(m.group('v'))
            w = float(m.group('w'))
        except ValueError:
            raise CourseraException(
                "Unknown challenge format. Please contact developers for assistance."
            )

        from supervisors.week2 import QuickBotSupervisor
        from robots.quickbot import QuickBot
        from pose import Pose

        info = QuickBot(Pose()).get_info()
        info.color = 0
        s = QuickBotSupervisor(Pose(), info)

        vl, vr = s.uni2diff((v, w))

        self.testsuite.respond("{:0.3f},{:0.3f}".format(
            vr, vl))  # Note the inverse order
예제 #4
0
    def start_test(self, challenge):
        vals = self.parseChallenge(challenge)

        if 'd1' not in vals or 'd2' not in vals:
            raise CourseraException(
                "Unknown challenge format. Please contact developers for assistance."
            )

        d1 = vals['d1']
        d2 = vals['d2']

        from supervisors.week2 import QuickBotSupervisor
        from robots.quickbot import QuickBot, QuickBot_IRSensor
        from pose import Pose

        bot = QuickBot(Pose())
        sensor = QuickBot_IRSensor(Pose(), bot)

        id1 = sensor.distance_to_value(d1)
        id2 = sensor.distance_to_value(d2)

        info = bot.get_info()
        info.color = 0
        s = QuickBotSupervisor(Pose(), info)
        # Just in case a student iterates in a weird way
        s.robot.ir_sensors.readings = [id1, id2, id1, id2, id1]
        ird = s.get_ir_distances()

        self.testsuite.respond("{:0.3f},{:0.3f}".format(
            abs((d1 - ird[0]) / d1), abs((d2 - ird[1]) / d2)))
예제 #5
0
    def start_test(self, challenge):
        vals = self.parseChallenge(challenge)

        if 'v' not in vals or 'w' not in vals:
            raise CourseraException(
                "Unknown challenge format. Please contact developers for assistance."
            )

        vd = vals['v']
        wd = vals['w']

        QuickBotSupervisor = helpers.load_by_name('week3.QBGTGSupervisor',
                                                  'supervisors')
        QuickBot = helpers.load_by_name('QuickBot', 'robots')
        from pose import Pose

        bot = QuickBot(Pose())
        info = bot.get_info()
        info.color = 0
        s = QuickBotSupervisor(Pose(), info)

        vld, vrd = s.uni2diff((vd, wd))
        vl, vr = s.ensure_w((vld, vrd))
        # Clamp to robot maxima
        vl = max(-info.wheels.max_velocity, min(info.wheels.max_velocity, vl))
        vr = max(-info.wheels.max_velocity, min(info.wheels.max_velocity, vr))
        v, w = bot.diff2uni((vl, vr))

        self.testsuite.respond("{:0.3f}".format(abs(w - wd) / wd))
def updatePoseList(lastPose, u_in, nextTimestamp_s, dT, path=None):
    """
    Update vehicle pose with given longtitude acceleration and timestamp
    Args:
        lastPose: the lastest pose
        u_in: current acceleration as input
        nextTimestamp_s: next timestamp
    Return:
        l_pose(timestamp: pose): discreted pose list to given timestamp
        l_u(timestamp: u): input list to given timestamp
    """
    # assert nextTimestamp_s > lastPose.timestamp_s
    l_pose = {}
    dyaw = 0
    step = int((nextTimestamp_s - lastPose.timestamp_s) / dT)
    vx = lastPose.vdy.vx_ms

    for k in range(1, step + 1, 1):

        dTime = k * dT
        nextT = round(lastPose.timestamp_s + dTime, 2)
        next_vdy = VehicleDynamic(vx + u_in * dTime, dyaw)

        dP = vx * dTime + 0.5 * u_in * (dTime**2)
        dP = max(dP, 0)

        dX = dP * math.cos(lastPose.yaw_rad)
        dY = dP * math.sin(lastPose.yaw_rad)

        # update covariance matrix from the nearest pose
        next_covLatLong = 0
        if not l_pose:
            next_covLatLong = updateCovLatlong(lastPose.covLatLong, dT, dP, 0)
        else:
            nearestPose = l_pose[max(l_pose)]
            dPNearest = np.linalg.norm([
                lastPose.x_m + dX - nearestPose.x_m,
                lastPose.y_m + dY - nearestPose.y_m
            ])
            next_covLatLong = updateCovLatlong(nearestPose.covLatLong, dT,
                                               dPNearest, 0)

        if param._TEST and path is not None:
            ix, iy, iyaw = path.getDs(dP)
            nextPose = Pose(x_m=ix,
                            y_m=iy,
                            yaw_rad=iyaw,
                            vdy=next_vdy,
                            covLatLong=next_covLatLong,
                            timestamp_s=nextT)
        else:
            nextPose = Pose(x_m=lastPose.x_m + dX,
                            y_m=lastPose.y_m + dY,
                            yaw_rad=lastPose.yaw_rad,
                            vdy=next_vdy,
                            covLatLong=next_covLatLong,
                            timestamp_s=nextT)
        l_pose[nextT] = nextPose

    return l_pose
예제 #7
0
def calculate_pose(old_pose, motion, timedelta, map):
    """Calculates a possible new position, with added gaussian noise."""

    distance_left = motion.left * timedelta * FACTOR
    distance_right = motion.right * timedelta * FACTOR

    if distance_left == distance_right:
        dx = distance_left * math.cos(old_pose.theta)
        dy = distance_left * math.sin(old_pose.theta)
        dtheta = 0
    elif distance_left == -distance_right:
        dx, dy = 0, 0
        #dtheta = #timedelta * (math.pi / 4) / MS_PER_45_DEGREES
        dtheta = RADIANS_DURING_1_turn
        if distance_left > 0:
            dtheta *= -1
            # dtheta=distance_left/(WHEEL_DISTANCE_IN_CM/2) #left turn results in positive dtheta, uses formula length arc


    if dx == 0.0 and dtheta == 0.0:
        return Pose(old_pose.x, old_pose.y, old_pose.theta)

    if GAUSSIAN_NOISE:

        if dx == 0.0 and dy == 0.0:
            return sample_rotation(old_pose, dtheta)

        elif dtheta == 0.0:
            return sample_forward_move(old_pose, dx, dy, map)
        else:
            print('dx, dy and dtheta are not zero ??')
            exit()
    else:
        return Pose(old_pose.x + dx, old_pose.y + dy, old_pose.theta + dtheta)
예제 #8
0
    def test_transform(self):
        random_e = np.random.rand(3) * np.pi * 2
        random_t = np.random.rand(3)
        pose = Pose(random_t, random_e)
        matrix = ttf.compose_matrix(angles=random_e, translate=random_t)

        random_e = np.random.rand(3) * np.pi * 2
        random_t = np.random.rand(3)
        pose_2 = Pose(random_t, random_e)
        matrix_2 = ttf.compose_matrix(angles=random_e, translate=random_t)

        matrix_t = matrix_2.dot(matrix)
        pose_t = pose_2.transform(pose)
        self.assert_pose_matrix_eq(pose_t, matrix_t)
예제 #9
0
def main():
    pose = Pose()
    coordinate_list = []

    # importing the module
    import json

    # Opening JSON file
    with open('weights.json') as json_file:
        weights = json.load(json_file)["weights"]

        s = sum(weights)
        weights = [w / s for w in weights]

    with tf.compat.v1.Session() as sess:
        model_cfg, model_outputs = posenet.load_model(101, sess)

        cap = cv2.VideoCapture(args["video"])
        i = 1

        if cap.isOpened() is False:
            print("error in opening video")
        while cap.isOpened():
            ret_val, image = cap.read()
            if ret_val:
                input_points, input_black_image = pose.getpoints_vis(
                    image, sess, model_cfg, model_outputs)
                cv2.imwrite('./test_video' + str(i) + '.jpg',
                            input_black_image)
                input_points = input_points[0:34]
                # print(input_points)
                input_new_coords = pose.roi(input_points)
                input_new_coords = input_new_coords[0:34]
                input_new_coords = np.asarray(input_new_coords).reshape(17, 2)
                coordinate_list.append(input_new_coords)
                i = i + 1
            else:
                break
        cap.release()

        coordinate_list = np.array(coordinate_list)

        # print(coordinate_list)
        # print(coordinate_list.shape)
        print("Lookup Table Created")
        file = open(args["lookup"], 'wb')
        pickle.dump({
            args["activity"]: coordinate_list,
            "weights": weights
        }, file)
예제 #10
0
    def start_test(self, challenge):
        m = self.RX.match(challenge)
        if m is None:
            raise CourseraException(
                "Unknown challenge format. Please contact developers for assistance."
            )
        try:
            v = float(m.group('v'))
            theta = float(m.group('theta'))
        except ValueError:
            raise CourseraException(
                "Unknown challenge format. Please contact developers for assistance."
            )

        from supervisors.week2 import QuickBotSupervisor
        from robots.quickbot import QuickBot
        from pose import Pose
        from helpers import Struct
        from math import pi

        bot = QuickBot(Pose())
        info = bot.get_info()
        info.color = 0
        s = QuickBotSupervisor(Pose(), info)
        params = Struct()
        params.goal = theta * 180 / pi
        params.velocity = v
        params.pgain = 1
        s.set_parameters(params)

        tc = 0.033  # 0.033 sec' is the SimIAm time step

        for step in range(25):  # 25 steps
            bot.move(tc)
            bot.set_inputs(s.execute(bot.get_info(), tc))

        xe, ye, te = s.pose_est
        xr, yr, tr = bot.get_pose()

        if xr == 0:
            xr = 0.0000001
        if yr == 0:
            yr = 0.0000001
        if tr == 0:
            tr = 0.0000001

        self.testsuite.respond("{:0.3f},{:0.3f},{:0.3f}".format(
            abs((xr - xe) / xr), abs((yr - ye) / yr),
            abs(abs(tr - te) % (2 * pi) / tr)))
예제 #11
0
 def test_inverse(self):
     random_e = np.random.rand(3) * np.pi * 2
     random_t = np.random.rand(3)
     pose = Pose(random_t, random_e).inverse()
     matrix = np.linalg.inv(
         ttf.compose_matrix(angles=random_e, translate=random_t))
     self.assert_pose_matrix_eq(pose, matrix)
예제 #12
0
    def draw(self, renderer):
        """Draw a circular goal, path and ir_sensors"""
        
        # Draw goal
        renderer.set_pose(Pose(self.pose_est.x,self.pose_est.y,pi*self.parameters.goal/180))
        renderer.set_pen(0)
#        renderer.draw_line(0.03,0,100,0)
        renderer.draw_arrow(0.05,0,0.5,0,close=True)
        
        # Draw robot path
        self.tracker.draw(renderer)
        
        renderer.set_pose(self.pose_est)
        renderer.set_brush(0)
        renderer.draw_ellipse(0,0,0.01,0.01)
               
        # Draw IR distances
        crosses = array([ dot(p.get_transformation(), [d,0,1])
                          for d, p in zip(self.get_ir_distances(), self.robot.ir_sensors.poses) ] )
                                
        renderer.set_pen(0)
        for c in crosses:
            x,y,z = c
            
            renderer.push_state()
            renderer.translate(x,y)
            renderer.rotate(atan2(y,x))
        
            renderer.draw_line(0.01,0.01,-0.01,-0.01)
            renderer.draw_line(0.01,-0.01,-0.01,0.01)
            
            renderer.pop_state()                                   
예제 #13
0
    def estimate_pose(self):
        """Update self.pose_est using odometry"""
        
        # Get tick updates
        dtl = self.robot.wheels.left_ticks - self.left_ticks
        dtr = self.robot.wheels.right_ticks - self.right_ticks
        
        # Save the wheel encoder ticks for the next estimate
        self.left_ticks += dtl
        self.right_ticks += dtr
        
        x, y, theta = self.pose_est

        R = self.robot.wheels.radius
        L = self.robot.wheels.base_length
        m_per_tick = (2*pi*R)/self.robot.wheels.ticks_per_rev
            
        # distance travelled by left wheel
        dl = dtl*m_per_tick
        # distance travelled by right wheel
        dr = dtr*m_per_tick
            
        theta_dt = (dr-dl)/L
        theta_mid = theta + theta_dt/2
        dst = (dr+dl)/2
        x_dt = dst*cos(theta_mid)
        y_dt = dst*sin(theta_mid)
            
        theta_new = theta + theta_dt
        x_new = x + x_dt
        y_new = y + y_dt
           
        return Pose(x_new, y_new, (theta_new + pi)%(2*pi)-pi)
예제 #14
0
def set_animation():

    pose = Pose()
    animation = Animation()
    animation.add_pose(pose)

    return animation
예제 #15
0
    def __init__(self):

        self.lwheel_prev = int(0)
        self.rwheel_prev = int(0)
        self.lwheel_cur = int(0)
        self.rwheel_cur = int(0)

        self.leftTicks = 0
        self.rightTicks = 0

        self.x_prev = 0
        self.y_prev = 0
        self.x_cur = 0
        self.y_cur = 0

        self.theta_prev = 0  # in rad
        self.theta_cur = 0  # in rad

        self.lin_x_vel = 0
        self.lin_y_vel = 0

        self.yaw_vel = 0

        self.wheelRad = 0.0325  ##in mts
        self.gearRatio = 120 / 1
        self.ticksPerOneMotorRot = 8
        self.ticksPerOneWheelRot = self.gearRatio * self.ticksPerOneMotorRot  #960
        self.oneMeterToWheelRot = 1 / (2 * pi * self.wheelRad)
        self.oneMeterToEncoderTicks = self.oneMeterToWheelRot * self.ticksPerOneWheelRot
        self.oneTickInMeters = 1 / self.oneMeterToEncoderTicks
        self.wheelSeparation = 14.5 * 0.01  # metres
        self.pose = Pose()
        self.baseFrameID = 'base_link'
        self.odomFrameID = 'odom'
예제 #16
0
 def draw(self, renderer):
     """Draw a circular goal"""
     renderer.set_pose(Pose(self.parameters.goal.x, self.parameters.goal.y))
     renderer.set_pen(0)
     renderer.set_brush(self.robot_color)
     r = self.robot_size/2
     renderer.draw_ellipse(0,0,r,r)
예제 #17
0
 def draw_background(self, renderer):
     """Draw a circular goal"""
     renderer.set_pose(Pose(self.parameters.goal.x, self.parameters.goal.y))
     renderer.set_pen(0)
     renderer.set_brush(self.robot_color)
     for r in numpy.arange(self.robot_size/2,0,-0.01):
         renderer.draw_ellipse(0,0,r,r)
예제 #18
0
def draw_annotations(coords_folder, video_folder, output_folder, cam_ids):

    for cam_id in cam_ids:
        cam_coords_path = os.path.join(coords_folder, "cam_{}".format(cam_id),
                                       "coords_cam_{}.csv".format(cam_id))
        cam_video_path = os.path.join(video_folder, "cam_{}".format(cam_id),
                                      "cam_{}.mp4".format(cam_id))

        print("Loading {}".format(cam_video_path))

        cam_video_output_folder = os.path.join(output_folder,
                                               "cam_{}".format(cam_id))
        os.makedirs(cam_video_output_folder, exist_ok=True)
        output_cam_video_path = os.path.join(
            cam_video_output_folder, "annotations_cam_{}.mp4".format(cam_id))

        print("Writing {}".format(output_cam_video_path))

        cam_coords = pd.read_csv(cam_coords_path)
        fourcc = cv2.VideoWriter_fourcc(*'mp4v')

        out = cv2.VideoWriter(output_cam_video_path, fourcc, 41.0,
                              (1920, 1080))

        #Get frames until the video ends
        cam_video_capture = cv2.VideoCapture(cam_video_path)
        total_frames = int(cam_video_capture.get(cv2.CAP_PROP_FRAME_COUNT))
        tqdm_progress = tqdm(total=total_frames)

        current_frame_no = 0

        while True:
            ret, frame = cam_video_capture.read()
            tqdm_progress.update()
            coords_frame = cam_coords[cam_coords["frame_no_cam"] ==
                                      current_frame_no]
            for person_id in list(coords_frame["person_id"]):
                coords_one_person_frame = coords_frame[
                    coords_frame["person_id"] == person_id]
                person_joints = get_joints_from_person_coords(
                    coords_one_person_frame)
                pose = Pose(person_joints)
                pose.drawBoundingBox(image=frame, color=(255, 0, 0))
                pose.drawPersonId(image=frame)
                pose.drawPose(image=frame, color=[0, 255, 0])

            if not ret:
                out.release()
                cam_video_capture.release()
                break

            cv2.imshow("", mat=frame)
            cv2.waitKey(1)

            out.write(image=frame)

            current_frame_no += 1
예제 #19
0
    def get_pose(self, pose_id):
        if (pose_id >= len(self.data["poses"])):
            return None

        tmp = self.data["poses"][pose_id]
        time = tmp[0]
        pose = Pose(tmp[1][0], tmp[1][1], tmp[1][2])
        return (time, pose)
예제 #20
0
    def __init__(self, n_particles, copy=False):
        self.map = OccupancyGridMap(copy)
        if not copy:
            x_co = random.gauss(0, 0.2)
            y_co = random.gauss(0, 0.2)
            theta = random.gauss(0, 0.05)
            self.pose = Pose(x_co, y_co, theta)

            self.weight = 1.0 / n_particles
예제 #21
0
    def set_pose(self, rpose):
        RealBot.set_pose(self, rpose)

        # We have to update walls here. WHY?
        for pose, dst in zip(
                self.info.ir_sensors.poses,
                np.polyval(self.ir_coeff, self.info.ir_sensors.readings)):
            if dst / self.info.ir_sensors.rmax <= 0.99:
                self.walls.add_point(Pose(dst) >> pose >> self.get_pose())
def inference_image(model, parser):
    print("image")
    # preparing
    list_image_paths = glob(os.path.join(parser.input, "*.jpg"))
    preprocess = InferenceTransformation(320, 320)
    pose_parser = Pose(image_scale=0.125)
    hand_model = HandPoseDetector(static_model=True)

    average_time = 0
    for ind, image_path in tqdm.tqdm(enumerate(list_image_paths)):
        origin_image = cv2.imread(image_path)

        height, width, _ = origin_image.shape
        ratio_scale = height / 320
        add_width = (320 - int(320 * width / height)) // 2
        current_time = cv2.getTickCount()
        process_image = preprocess(origin_image)
        image = preprocess_tensor(process_image.copy())

        paf, heatmap = model(image)
        # paf = paf[0]
        # heatmap = heatmap[0]
        paf = paf.reshape((38, 40, 40))
        heatmap = heatmap.reshape((19, 40, 40))
        pose_parser.parser_pose(paf, heatmap)
        draw_pose(process_image, pose_parser.poses_list,
                  pose_parser.hand_window, body_edges)
        hand_images = pose_parser.get_hand_head_images(origin_image,
                                                       ratio_scale, add_width)

        if hand_images is not None:
            image, _ = hand_model(hand_images)

        current_time = (cv2.getTickCount() -
                        current_time) / cv2.getTickFrequency()
        # cv2.imwrite("./_image/temp_hand_%d.jpg"%ind, image)
        if hand_images is not None:
            process_image[-200:, :100] = cv2.resize(image, (100, 200))

        average_time += current_time
        cv2.putText(process_image, 'parsing time: {}'.format(current_time),
                    (10, 20), cv2.FONT_HERSHEY_COMPLEX, 1, (0, 0, 255))
        cv2.imwrite("./_image/img_with_pose_%d.jpg" % ind, process_image)
    print("avg time: %f" % (average_time / len(list_image_paths)))
    def __init__(self,
                 idx,
                 length,
                 width,
                 from_x_m,
                 from_y_m,
                 to_x_m,
                 to_y_m,
                 covLong,
                 covLat,
                 vx_ms,
                 startTime,
                 isStop=False,
                 appearRate=1,
                 interactRate=1,
                 dT=param._dT):
        self._idx = idx
        self._length = length
        self._width = width
        self._lw_std = np.array([self._length / 2,
                                 self._width / 2])  # std offset
        self._isDetected = False
        self._detectedTime = None  # position where first detected
        self._Pcoll = 0  # maximum collision probability with ego

        startTime = round(round(startTime / dT, 2) * dT, 2)
        self._startTime = startTime
        self.theta = math.atan2(to_y_m - from_y_m, to_x_m - from_x_m)
        startPose = Pose(x_m=from_x_m,
                         y_m=from_y_m,
                         yaw_rad=self.theta,
                         covLatLong=np.diag([covLong, covLat]),
                         vdy=VehicleDynamic(vx_ms, 0),
                         timestamp_s=startTime)

        if isStop:
            # deaccleration if stop
            self._u = pfnc.computeAccToStop(from_x_m=from_x_m,
                                            from_y_m=from_y_m,
                                            to_x_m=to_x_m,
                                            to_y_m=to_y_m,
                                            vx_ms=vx_ms)
        else:
            self._u = 0

        # for hypothetical object
        self._appearRate = appearRate
        self._interactRate = interactRate

        # pose
        self._p_pose = {}
        self._currentPose = startPose
        self._l_pose = {startPose.timestamp_s: startPose}

        # first states prediction
        self.predict(dT=param._PREDICT_STEP, pT=param._PREDICT_TIME)
예제 #24
0
    def insert_scan_only_endpoint(self, scan, robot_pose):
        rays = scan.get_rays(robot_pose)

        for ray in rays:
            pose, dist = ray
            if dist < self.min_dist or dist > self.max_dist:
                continue

            end_pose = pose.clone().add_relative_pose(Pose(dist, 0, 0))
            self.add_density(end_pose, self.positive_scan_weight)
예제 #25
0
def get_pose(frame_data, person_id):
    # type: (np.ndarray, int) -> Pose
    """
	:param frame_data: data of the current frame
	:param person_id: person identifier
	:return: list of joints in the current frame with the required person ID
	"""
    pose = [Joint(j) for j in frame_data[frame_data[:, 1] == person_id]]
    pose.sort(key=(lambda j: j.type))
    return Pose(pose, "JTA")
예제 #26
0
파일: airrace.py 프로젝트: ASTARCHEN/heidi
def stripPose( rect, highResolution=True, scale=None ):
  "return relative pose of image rectangle"
  (x,y),(w,h),a = rect
  assert w > 3*h, (w,h)  # 30cm long, 5cm wide ... i.e. should be 6 times
  a = -a-90
  if a > 90:
    a -= 180
  if a < -90:
    a += 180
  if scale == None:
    if w >= 5*h: # it should be 6x, but width is more precise
      scale = 0.3/float(w)
    else:
      scale = 0.05/float(h)
  if highResolution:
    return Pose( scale*(720/2-y), scale*(1280/2-x), math.radians( a ) )
  else:
#    return Pose( scale*(360/2-y), scale*(640/2-x), math.radians( a ) )
    return Pose( scale*(360/2-y), scale*(270-x), math.radians( a ) )
예제 #27
0
def main():
	a = Pose()
	b = []
	c = {}
	
	
	with tf.compat.v1.Session() as sess:
		model_cfg, model_outputs = posenet.load_model(101, sess)
		
		cap = cv2.VideoCapture(args["video"])
		i = 1

		if cap.isOpened() is False:
			print("error in opening video")
		while cap.isOpened():
			ret_val, image = cap.read()
			if ret_val:
				image = cv2.resize(image,(372,495))			
				input_points,input_black_image = a.getpoints_vis(image,sess,model_cfg,model_outputs)
				input_points = input_points[0:34]
				print(input_points)
				input_new_coords = a.roi(input_points)
				input_new_coords = input_new_coords[0:34]
				input_new_coords = np.asarray(input_new_coords).reshape(17,2)
				b.append(input_new_coords)
				cv2.imshow("black", input_black_image)
				cv2.waitKey(1)
				i = i + 1
			else:
				break
		cap.release()
		

		b = np.array(b)
		
		cv2.destroyAllWindows
		print(b)
		print(b.shape)
		print("Lookup Table Created")
		c[args["activity"]] = b
		f = open(args["lookup"],'wb')
		pickle.dump(c,f)
예제 #28
0
class get_Score(object):
    def __init__(self, lookup='lookup.pickle'):
        self.pose = Pose()
        self.score = Score()
        self.saved_pose = pickle.load(open(lookup, 'rb'))
        self.weights = self.saved_pose["weights"]
        self.new_video_coordinates = []

    def get_action_coords_from_dict(self, action):
        for k, v in self.saved_pose.items():
            if k == action:
                (model_array, no_of_frames) = (v, v.shape[0])
        return model_array, no_of_frames

    def calculate_Score(self, video, action):
        with tf.compat.v1.Session() as sess:
            model_cfg, model_outputs = posenet.load_model(101, sess)
            reference_coordinates, reference_video_frames = self.get_action_coords_from_dict(
                action)
            cap = cv2.VideoCapture(video)
            new_video_frames = 0
            if cap.isOpened() is False:
                print("error in opening video")
            while cap.isOpened():
                ret_val, image = cap.read()
                if ret_val:
                    input_points = self.pose.getpoints(image, sess, model_cfg,
                                                       model_outputs)
                    if len(input_points) == 0:
                        continue
                    input_new_coords = np.asarray(
                        self.pose.roi(input_points)[0:34]).reshape(17, 2)
                    self.new_video_coordinates.append(input_new_coords)
                    new_video_frames = new_video_frames + 1
                else:
                    break
            cap.release()
            final_score, score_list = self.score.compare_34dim(
                np.asarray(self.new_video_coordinates),
                np.asarray(reference_coordinates), new_video_frames,
                reference_video_frames, self.weights)
        return final_score, score_list
예제 #29
0
def inference_image(model, parser):
    print("image")
    # preparing
    list_image_paths = glob(os.path.join(parser.input, "*.jpg"))
    preprocess = InferenceTransformation(368, 368)
    pose_parser = Pose(image_scale=0.125)
    hand_model = HandPoseDetector(static_model=True)
    face_model = FaceMeshDetector(static_model=True)

    average_time = 0
    for ind, image_path in tqdm.tqdm(enumerate(list_image_paths)):
        origin_image = cv2.imread(image_path)

        height, width,_ = origin_image.shape
        ratio_scale = height / 368
        add_width = (368 - int(368 * width / height))//2
        current_time = cv2.getTickCount()
        process_image = preprocess(origin_image)
        image = torch.Tensor(preprocess_tensor(process_image.copy())).unsqueeze(0).cuda()

        paf, heatmap = model(image)
        paf = paf.detach().cpu().numpy()[0]
        heatmap = heatmap.detach().cpu().numpy()[0]

        pose_parser.parser_pose(paf, heatmap)
        draw_pose(process_image, pose_parser.poses_list, pose_parser.hand_window, body_edges)
        hand_images = pose_parser.get_hand_head_images(origin_image,ratio_scale, add_width)

        for hand_ind, image in enumerate(hand_images):
            image, _ = hand_model(image)
            # cv2.imwrite("./_image/temp_hand_%d.jpg"%ind, image)
            if hand_ind == 1:
                process_image[-100:,:100] = cv2.resize(image,(100,100))
            elif hand_ind == 0:
                process_image[-100:,-100:] = cv2.resize(image,(100,100))

        current_time = (cv2.getTickCount() - current_time) / cv2.getTickFrequency()
        average_time += current_time
        cv2.putText(process_image, 'parsing time: {}'.format(current_time),
                    (10, 20), cv2.FONT_HERSHEY_COMPLEX, 1, (0, 0, 255))
        cv2.imwrite("./_image/img_with_pose_%d.jpg" % ind, process_image)
    print("avg time: %f" % (average_time / len(list_image_paths)))
def legacy_get_waypoints(data):
    """Extract the waypoints from move_group/display_planned_path.

    Legacy function of get_waypoints, this is a way neater implimentation.

    Args:
        aruco_coords (bool): true if trajectory needed in aruco coords
        visualise_trajectory (bool): true if visualisaiton needed
    """
    global waypoints, done_waypoints

    points_list = data.trajectory[0].multi_dof_joint_trajectory.points
    p = Pose()

    for transform in points_list:
        # create a list of waypoints in pose.Pose after convertion from
        # planned trajectory
        p.convert_geometry_transform_to_pose(transform.transforms[0])
        waypoints.append(list(p.as_waypoints()))
    done_waypoints = True
예제 #31
0
	def __init__(self, filename):
		rospy.init_node('goal_sender')
		self.stuck_buffer = 10
		self.stuck_count = self.stuck_buffer
		self.dpt = discretePolicyTranslator(filename)
		self.pose = Pose('',[0,0,0],'tf',None)
		self.current_status = 3
		self.tf_exist = False
		self.tf_exception_wrapper()
		self.goal_point = self.get_new_goal(self.pose._pose)
		self.pub = rospy.Publisher('/deckard/move_base_simple/goal',PoseStamped,queue_size=10)
		rospy.sleep(1) #<>TODO: figure out why the hell this works --> solves issue where robot would not move on initialization
		rospy.Subscriber('/deckard/move_base/status',GoalStatusArray,self.callback)
		print("initial position: " + str(self.pose._pose))
예제 #32
0
	def __init__(self, filename, avoidance_type):
		rospy.init_node('goal_sender')
		self.stuck_buffer = 5
		self.stuck_count = self.stuck_buffer
		self.dpt = discretePolicyTranslator(filename)
		self.pose = Pose('',[0,0,0],'tf',None)
		self.current_status = 3
		self.last_position = self.pose._pose
		self.tf_exist = False
		self.tf_exception_wrapper()
		self.goal_point = self.pose._pose
		self.avoidance = sys.argv[2]
		self.pub = rospy.Publisher('/deckard/move_base_simple/goal',PoseStamped,queue_size=10)
		rospy.sleep(1) #<>TODO: figure out why the hell this works --> solves issue where robot would not move on initialization
		rospy.Subscriber('/deckard/move_base/status',GoalStatusArray,self.callback)
		#print("initial position: " + str(self.pose._pose))

		logging.basicConfig(filename='goalHandler_debug.log',level=logging.DEBUG)
예제 #33
0
 def random_particle(neg_x_limit, x_limit, neg_y_limit, y_limit, weight):
     '''Returns a random particle within the limits of a map.
     '''
     return Particle(Pose.random_pose(neg_x_limit, x_limit, neg_y_limit, y_limit), weight)
예제 #34
0
def competeAirRace( drone, desiredHeight = 1.7 ):
  loops = []
  drone.speed = 0.1
  maxVideoDelay = 0.0
  maxControlGap = 0.0
  loc = StripsLocalisation()
  
  remainingFastStrips = 0
  desiredSpeed = TRANSITION_SPEED
  updateFailedCount = 0

  try:
    drone.wait(1.0)
    drone.setVideoChannel( front=False )    
    drone.takeoff()
    poseHistory = []
    startTime = drone.time
    while drone.time < startTime + 1.0:
      drone.update("AT*PCMD=%i,0,0,0,0,0\r") # drone.hover(1.0)
      poseHistory.append( (drone.time, (drone.coord[0], drone.coord[1], drone.heading), (drone.angleFB, drone.angleLR)) )
    magnetoOnStart = drone.magneto[:3]
    print "NAVI-ON"
    pathType = PATH_TURN_LEFT
    virtualRefCircle = None
    startTime = drone.time
    sx,sy,sz,sa = 0,0,0,0
    lastUpdate = None
    while drone.time < startTime + 600.0:
      sz = max( -0.2, min( 0.2, desiredHeight - drone.coord[2] ))
      sx = max( 0, min( drone.speed, desiredSpeed - drone.vx ))

      if drone.lastImageResult:
        lastUpdate = drone.time
        assert len( drone.lastImageResult ) == 2 and len( drone.lastImageResult[0] ) == 2, drone.lastImageResult
        (frameNumber, timestamp), rects = drone.lastImageResult
        viewlog.dumpCamera( "tmp_%04d.jpg" % (frameNumber/15,), 0 )

        # keep history small
        videoTime = correctTimePeriod( timestamp/1000., ref=drone.time )
        videoDelay = drone.time - videoTime
        if videoDelay > 1.0:
          print "!DANGER! - video delay", videoDelay
        maxVideoDelay = max( videoDelay, maxVideoDelay )
        toDel = 0
        for oldTime, oldPose, oldAngles in poseHistory:
          toDel += 1
          if oldTime >= videoTime:
            break
        poseHistory = poseHistory[:toDel]

        tiltCompensation = Pose(desiredHeight*oldAngles[0], desiredHeight*oldAngles[1], 0) # TODO real height?
        print "FRAME", frameNumber/15, "[%.1f %.1f]" % (math.degrees(oldAngles[0]), math.degrees(oldAngles[1])),
        loc.updateFrame( Pose( *oldPose ).add(tiltCompensation), allStripPoses( rects, highResolution=drone.videoHighResolution ) )
        if loc.pathType != pathType:
          print "TRANS", pathType, "->", loc.pathType
          if pathType == PATH_TURN_LEFT and loc.pathType == PATH_STRAIGHT:
            if len(loops) > 0:
              print "Loop %d, time %d" % (len(loops), drone.time-loops[-1])
            print "-----------------------------------------------"
            loops.append( drone.time )
          if drone.magneto[:3] == magnetoOnStart:
            print "!!!!!!!! COMPASS FAILURE !!!!!!!!"
          pathType = loc.pathType
          print "----"
          remainingFastStrips = NUM_FAST_STRIPS

        if loc.crossing:
          print "X", True, remainingFastStrips
        else:
          print pathType, loc.pathUpdated, remainingFastStrips
        if not loc.pathUpdated:
          updateFailedCount += 1
          if updateFailedCount > 1:
            print "UPDATE FAILED", updateFailedCount
        else:
          updateFailedCount = 0

        if remainingFastStrips > 0:
          remainingFastStrips -= 1
          desiredSpeed = MAX_ALLOWED_SPEED
          if not loc.pathUpdated and not loc.crossing:
            desiredSpeed = STRIPS_FAILURE_SPEED
        else:
          desiredSpeed = TRANSITION_SPEED
        if videoDelay > MAX_ALLOWED_VIDEO_DELAY:
          desiredSpeed = 0.0

        if drone.battery < 10:
          print "BATTERY LOW!", drone.battery

        # height debugging
        #print "HEIGHT\t%d\t%d\t%.2f\t%d\t%d\t%d\t%d\t%d\t%d" % tuple([max([0]+[w for ((x,y),(w,h),a) in rects])] + list(drone.altitudeData[:4]) + list(drone.pressure) )

        for sp in allStripPoses( rects, highResolution=drone.videoHighResolution ):
          sPose = Pose( *oldPose ).add(tiltCompensation).add( sp )
          viewlog.dumpBeacon( sPose.coord(), index=3 )
          viewlog.dumpObstacles( [[(sPose.x-0.15*math.cos(sPose.heading), sPose.y-0.15*math.sin(sPose.heading)), 
                                       (sPose.x+0.15*math.cos(sPose.heading), sPose.y+0.15*math.sin(sPose.heading))]] )

      refCircle,refLine = loc.getRefCircleLine( Pose(drone.coord[0], drone.coord[1], drone.heading) )
      if refCircle == None and refLine == None and virtualRefCircle != None:
        refCircle = virtualRefCircle
      # error definition ... if you substract that you get desired position or angle
      # error is taken from the path point of view, x-path direction, y-positive left, angle-anticlockwise
      errY, errA = 0.0, 0.0
      assert refCircle == None or refLine == None # they cannot be both active at the same time
      if refCircle:
        if pathType == PATH_TURN_LEFT:
          errY = refCircle[1] - math.hypot( drone.coord[0]-refCircle[0][0], drone.coord[1]-refCircle[0][1] )
          errA = normalizeAnglePIPI( - math.atan2( refCircle[0][1] - drone.coord[1], refCircle[0][0] - drone.coord[0] ) 
                                      - math.radians(-90) + drone.heading )
        if pathType == PATH_TURN_RIGHT:
          errY = math.hypot( drone.coord[0]-refCircle[0][0], drone.coord[1]-refCircle[0][1] ) - refCircle[1]
          errA = normalizeAnglePIPI( - math.atan2( refCircle[0][1] - drone.coord[1], refCircle[0][0] - drone.coord[0] ) 
                                      - math.radians(90) + drone.heading )
      if refLine:
        errY = refLine.signedDistance( drone.coord )
        errA = normalizeAnglePIPI( drone.heading - refLine.angle )

      # get the height first
      if drone.coord[2] < desiredHeight - 0.1 and drone.time-startTime < 5.0:
        sx = 0.0
      if refCircle == None and refLine == None and virtualRefCircle == None:
        sx = 0.0 # wait for Z-up
        if drone.coord[2] > desiredHeight - 0.1:
          print "USING VIRTUAL LEFT TURN CIRCLE!"
          circCenter = Pose( drone.coord[0], drone.coord[1], drone.heading ).add( Pose(0.0, REF_CIRCLE_RADIUS, 0 )).coord()
          viewlog.dumpBeacon( circCenter, index=0 )
          virtualRefCircle = circCenter, REF_CIRCLE_RADIUS

      # error correction
      # the goal is to have errY and errA zero in 1 second -> errY defines desired speed at given distance from path
      sy = max( -0.2, min( 0.2, -errY-drone.vy ))/2.0
      
      # there is no drone.va (i.e. derivative of heading) available at the moment ... 
      sa = max( -0.1, min( 0.1, -errA/2.0 ))*1.35*(desiredSpeed/0.4) # originally set for 0.4=OK

#      print "%0.2f\t%d\t%0.2f\t%0.2f\t%0.2f" % (errY, int(math.degrees(errA)), drone.vy, sy, sa)
      prevTime = drone.time
      drone.moveXYZA( sx, sy, sz, sa )
      maxControlGap = max( drone.time - prevTime, maxControlGap )
      poseHistory.append( (drone.time, (drone.coord[0], drone.coord[1], drone.heading), (drone.angleFB, drone.angleLR)) )
    print "NAVI-OFF", drone.time - startTime
    drone.hover(0.5)
    drone.land()
    drone.setVideoChannel( front=True )    
  except ManualControlException, e:
    print "ManualControlException"
    if drone.ctrlState == 3: # CTRL_FLYING=3 ... i.e. stop the current motion
      drone.hover(0.1)
    drone.land()
예제 #35
0
class goalHandler(object):

	def __init__(self, filename, avoidance_type):
		rospy.init_node('goal_sender')
		self.stuck_buffer = 5
		self.stuck_count = self.stuck_buffer
		self.dpt = discretePolicyTranslator(filename)
		self.pose = Pose('',[0,0,0],'tf',None)
		self.current_status = 3
		self.last_position = self.pose._pose
		self.tf_exist = False
		self.tf_exception_wrapper()
		self.goal_point = self.pose._pose
		self.avoidance = sys.argv[2]
		self.pub = rospy.Publisher('/deckard/move_base_simple/goal',PoseStamped,queue_size=10)
		rospy.sleep(1) #<>TODO: figure out why the hell this works --> solves issue where robot would not move on initialization
		rospy.Subscriber('/deckard/move_base/status',GoalStatusArray,self.callback)
		#print("initial position: " + str(self.pose._pose))

		logging.basicConfig(filename='goalHandler_debug.log',level=logging.DEBUG)

	def tf_exception_wrapper(self):
		"""waits for transforms to become available and handles interim exceptions
		"""
		tries = 0
		while not self.tf_exist and tries < 10:
			try:
				self.pose.tf_update()
				self.tf_exist = True
				#self.current_position = self.pose._pose
				#self.goal_point = self.pose._pose
			except tf.LookupException as error:
				tries = tries + 1
				self.tf_exist = False
				# print("\nError!")
				# print(error)
				# print("Waiting for transforms to become available. Will retry 10 times.")
				# print("Try: " + str(tries) + "  Retrying in 2 seconds.\n")
				error_str = "\nError!\n" + error + "\nWaiting for transforms to become available. Will retry 10 times." \
							+ "\nTry: " + str(tries) + " Retrying in 2 seconds.\n"
				print(error_str)
				logging.error(error_str)
				rospy.sleep(2)

	def callback(self,msg):
		"""callback function that runs when messages are published to /move_base/status.
		The function updates its knowledge of its position using tf data, then
		checks if the robot is stuck and sends the appropriate goal pose.
		"""

		self.pose.tf_update()
		#self.last_position = self.pose._pose
		if self.is_stuck():
			self.send_goal(True)
			while not self.is_at_goal():
				continue
		else:
			self.send_goal()
		#self.is_at_goal()
		#print("status: " + str(self.current_status) + "\tcheck: " + str(self.current_status==3) + "\tcurrent position: " + str(self.pose._pose))
		#if self.current_status == 3:
		#self.stuck_count = self.stuck_buffer
			#self.current_status = 1
		rospy.sleep(1)

	def is_at_goal(self):
		"""checks if robot has arrived at its goal pose
		"""

		tol = 0.25
		#print("Checking if arrived at goal")
		try:
			#print("X goal diff: " + str(abs(self.goal_point[0] - self.current_position[0])) + "\tY goal diff: " + str(abs(self.goal_point[1] - self.current_position[1])))
			if abs(self.goal_point[0] - self.pose._pose[0]) < tol and abs(self.goal_point[1] - self.pose._pose[1]) < tol:
				self.current_status = 3
				return True
		except TypeError:
			print("Goal pose does not yet exist!")
			self.current_status = 3

	def is_stuck(self):
		"""re-sends goal pose if robot is mentally or physically stuck for self.stuck_buffer number of iterations
		"""

		if self.stuck_count > 0: #check buffer
			self.stuck_count += -1
			#print("stuck count "+str(self.stuck_count))
			return False #return not stuck
		self.stuck_count = self.stuck_buffer
		self.stuck_distance = math.sqrt(((self.pose._pose[0] - self.last_position[0]) ** 2) + ((self.pose._pose[1] - self.last_position[1]) ** 2))
		self.last_position = self.pose._pose
		logging.debug('stuck distance: ' + str(self.stuck_distance))
		if self.stuck_distance < 0.5:
			print("Robot stuck; resending goal.")
			logging.info("Robot stuck; resending goal.")
			return True
		else:
			return False

	def get_new_goal(self,current_position,stuck_flag): #<>TODO: refine stuck vs blocked, as the robot can get stuck without technically beiing blocked
		"""get new goal pose from discretePolicyTranslator module
		"""

		point = current_position
		point[0] = round(point[0])
		point[1] = round(point[1])
		next_point = self.dpt.getNextPose
		if stuck_flag and self.avoidance == '-b': #<>TODO: talk to luke about whether or not to keep any of the 'blocked' code
			return self.dpt.getNextPose(point,stuck_flag)
		elif stuck_flag and self.avoidance == '-s':
			return self.dpt.getNextPose(point,False,stuck_flag)
		else:
			return self.dpt.getNextPose(point)

	def send_goal(self,stuck_flag=False):
		"""get and send new goal pose. Returns false without sending pose if pose to send
		is the same as the current pose and the robot is not stuck (meaning it is enroute
		to that pose)
		"""

		new_pose = self.get_new_goal(self.pose._pose,stuck_flag)
		if self.goal_point == new_pose and not stuck_flag:
			return False
		else:
			self.goal_point = new_pose

		new_goal = PoseStamped()
		new_goal.pose.position.x = self.goal_point[0]
		new_goal.pose.position.y = self.goal_point[1]
		new_goal.pose.position.z = self.goal_point[2]
		theta = self.goal_point[3]

		if self.goal_point == [2,2,0,0]: #<>TODO: Fix this gross hack, it makes puppies cry
			theta = 180

		quat = tf.transformations.quaternion_from_euler(0,0,np.deg2rad(theta))
		new_goal.pose.orientation.x = quat[0]
		new_goal.pose.orientation.y = quat[1]
		new_goal.pose.orientation.z = quat[2]
		new_goal.pose.orientation.w = quat[3]
		#<>NOTE: random spinning that occured in gazebo sim does not occur in when run of physical robot

		new_goal.header.stamp = rospy.Time.now()
		new_goal.header.frame_id = 'map'

		self.pub.publish(new_goal)
		logging.debug("sent goal: " + str(self.goal_point))
예제 #36
0
파일: fre_drone.py 프로젝트: Vibri/heidi
def stayAtPosition( drone, desiredHeight = 1.5, timeout = 10.0 ):
  maxControlGap = 0.0
  desiredSpeed = MAX_ALLOWED_SPEED
  refPoint = (0,0)
  foundTagTime = None
  searchSeq = [(0,0), (2,0), (0,-2), (-2,0), (0,2), (0,0)]
  startTime = drone.time
  sx,sy,sz,sa = 0,0,0,0
  stepTime = drone.time
  while drone.time < startTime + timeout:
      altitude = desiredHeight
      if drone.altitudeData != None:
        altVision = drone.altitudeData[0]/1000.0
        altSonar = drone.altitudeData[3]/1000.0
        altitude = (altSonar+altVision)/2.0
        # TODO selection based on history? panic when min/max too far??
        if abs(altSonar-altVision) > 0.5:
          print altSonar, altVision
          altitude = max( altSonar, altVision ) # sonar is 0.0 sometimes (no ECHO)

      sz = max( -0.2, min( 0.2, desiredHeight - altitude ))
      if altitude > 2.5:
        # wind and "out of control"
        sz = max( -0.5, min( 0.5, desiredHeight - altitude ))

      sx = 0.0 #max( 0, min( drone.speed, desiredSpeed - drone.vx ))

#        tiltCompensation = Pose(desiredHeight*oldAngles[0], desiredHeight*oldAngles[1], 0) # TODO real height?
#        print "FRAME", frameNumber/15, "[%.1f %.1f]" % (math.degrees(oldAngles[0]), math.degrees(oldAngles[1])),

#        if drone.battery < 10:
#          print "BATTERY LOW!", drone.battery


      # error definition ... if you substract that you get desired position or angle
      # error is taken from the path point of view, x-path direction, y-positive left, angle-anticlockwise
      errX, errY, errA = 0.0, 0.0, 0.0
#      if refLine:
#        errY = refLine.signedDistance( drone.coord )
#        errA = normalizeAnglePIPI( drone.heading - refLine.angle )

      if len(drone.visionTag) > 0:
        SCALE = 0.17/(2*74)
        tagX, tagY, tagDist = drone.visionTag[0][0], drone.visionTag[0][1], drone.visionTag[0][4]/100.0
        tiltCompensation = Pose(tagDist*drone.angleFB, tagDist*drone.angleLR, 0)
        pose = Pose(drone.coord[0], drone.coord[1], drone.heading).add(tiltCompensation)
        offset = Pose(tagDist*(480-tagY)*SCALE, tagDist*(tagX-640)*SCALE, 0.0)
        pose = pose.add( offset )
        refPoint = (pose.x, pose.y)
        if foundTagTime == None:
          print drone.visionTag
          print "%.2f\t%.2f\t%.2f\t%.1f\t%.1f" % (drone.time, tagDist*(tagY-480)*SCALE, tagDist*(tagX-640)*SCALE, math.degrees(drone.angleFB), math.degrees(drone.angleLR))
          print refPoint
        foundTagTime = drone.time
      else:
        if foundTagTime != None and drone.time - foundTagTime > 3.0:
          foundTagTime = None
          print "LOST TAG"
          searchSeq = [(x+drone.coord[0], y+drone.coord[1]) for (x,y) in [(0,0), (2,0), (0,-2), (-2,0), (0,2), (0,0)]]

      if foundTagTime == None and len(searchSeq) > 0 and drone.time - stepTime > 3.0:
        refPoint = searchSeq[0]
        searchSeq = searchSeq[1:]
        print "STEP", refPoint
        stepTime = drone.time

      if refPoint:
        pose = Pose(drone.coord[0], drone.coord[1], drone.heading)
        landPose = Pose( refPoint[0], refPoint[1], drone.heading )  # ignore heading for the moment
        diff = pose.sub( landPose )
        #print diff
        errX, errY = diff.x, diff.y



      # get the height first
#      if drone.coord[2] < desiredHeight - 0.1 and drone.time-startTime < 5.0:
#        sx = 0.0
      # error correction
      # the goal is to have errY and errA zero in 1 second -> errY defines desired speed at given distance from path
      sx = max( -0.2, min( 0.2, -errX-drone.vx ))/2.0
      sy = max( -0.2, min( 0.2, -errY-drone.vy ))/2.0
      
      # there is no drone.va (i.e. derivative of heading) available at the moment ... 
      sa = max( -0.1, min( 0.1, -errA/2.0 ))*1.35*(desiredSpeed/0.4) # originally set for 0.4=OK

#      print "%0.2f\t%d\t%0.2f\t%0.2f\t%0.2f" % (errY, int(math.degrees(errA)), drone.vy, sy, sa)
      prevTime = drone.time
      drone.moveXYZA( sx, sy, sz, sa )
      maxControlGap = max( drone.time - prevTime, maxControlGap )
  return maxControlGap
예제 #37
0
class goalHandler(object):

	def __init__(self, filename):
		rospy.init_node('goal_sender')
		self.stuck_buffer = 10
		self.stuck_count = self.stuck_buffer
		self.dpt = discretePolicyTranslator(filename)
		self.pose = Pose('',[0,0,0],'tf',None)
		self.current_status = 3
		self.tf_exist = False
		self.tf_exception_wrapper()
		self.goal_point = self.get_new_goal(self.pose._pose)
		self.pub = rospy.Publisher('/deckard/move_base_simple/goal',PoseStamped,queue_size=10)
		rospy.sleep(1) #<>TODO: figure out why the hell this works --> solves issue where robot would not move on initialization
		rospy.Subscriber('/deckard/move_base/status',GoalStatusArray,self.callback)
		print("initial position: " + str(self.pose._pose))

	def tf_exception_wrapper(self):
		#waits for transforms to become available and handles interim exceptions
		tries = 0
		while not self.tf_exist and tries < 10:
			try: 
				self.pose.tf_update()	
				self.tf_exist = True
				#self.current_position = self.pose._pose
				#self.goal_point = self.pose._pose
			except tf.LookupException as error:
				tries = tries + 1
				self.tf_exist = False
				print("\nError!")
				print(error)
				print("Waiting for transforms to become available. Will retry 10 times.") 
				print("Try: " + str(tries) + "  Retrying in 2 seconds.\n")
				rospy.sleep(2)		

	def callback(self,msg):
		self.last_position = self.pose._pose
		self.pose.tf_update()
		self.stuck_distance = math.sqrt((self.pose._pose[0] - self.last_position[0]) ** 2 + (self.pose._pose[1] - self.last_position[1]) ** 2)
		self.is_stuck()
		self.is_at_goal()		
		print("status: " + str(self.current_status) + "\tcheck: " + str(self.current_status==3) + "\tcurrent position: " + str(self.pose._pose))
		if self.current_status == 3:
			self.send_goal()
			self.stuck_count = self.stuck_buffer
			self.current_status = 1
		rospy.sleep(1)	

	def is_at_goal(self):
		#checks if robot has arrived at its goal pose
		tol = 0.25
		print("Checking if arrived at goal")
		try:
			#print("X goal diff: " + str(abs(self.goal_point[0] - self.current_position[0])) + "\tY goal diff: " + str(abs(self.goal_point[1] - self.current_position[1])))
			if abs(self.goal_point[0] - self.pose._pose[0]) < tol and abs(self.goal_point[1] - self.pose._pose[1]) < tol:
				self.current_status = 3
		except TypeError:
			print("Goal pose does not yet exist!")
			self.current_status = 3	

	def is_stuck(self):
		#re-sends goal pose if robot is mentally or physically stuck for 10 iterations
		if self.stuck_count > 0: #check buffer
			self.stuck_count += -1
			return False #return not stuck
		self.stuck_count = self.stuck_buffer
		if self.stuck_distance < 0.5:
			print("Robot stuck; resending goal.")
			self.send_goal()
			return True
		else:
			return False	

	def get_new_goal(self,current_position):
		#get new goal pose from module
		point = current_position
		#if self.current_status == 3:
		point[0] = round(point[0])
		point[1] = round(point[1])
		print(self.dpt.getNextPose(point))
		return self.dpt.getNextPose(point)

	def send_goal(self):
		#get and send new goal pose
		self.goal_point = self.get_new_goal(self.pose._pose)
		print("sent goal: " + str(self.goal_point))

		new_goal = PoseStamped()
		new_goal.pose.position.x = self.goal_point[0]
		new_goal.pose.position.y = self.goal_point[1]
		new_goal.pose.position.z = self.goal_point[2]
		theta = self.goal_point[3]

		quat = tf.transformations.quaternion_from_euler(0,0,np.deg2rad(theta))
		new_goal.pose.orientation.x = quat[0]
		new_goal.pose.orientation.y = quat[1]
		new_goal.pose.orientation.z = quat[2]
		new_goal.pose.orientation.w = quat[3] #<>TODO change this to variable orientation so robot doesn't spin as it travels
		#<>NOTE: random spinning that occured in gazebo sim does not occur in when run of physical robot

		new_goal.header.stamp = rospy.Time.now()
		new_goal.header.frame_id = 'map'

		self.pub.publish(new_goal)