Ejemplo n.º 1
0
 def get_legs_pose2d_in_map(self):
     m_a_T = self.pose_2d_in_map_frame
     if self.type == "legs":
         leg_radius = self.leg_radius # [m]
         leg_side_offset = 0.1 # [m]
         leg_side_amplitude = 0.1 # [m] half amplitude
         leg_front_amplitude = 0.3 # [m]
         # get position of each leg w.r.t agent (x is 'forward')
         # travel is a sine function relative to how fast the agent is moving in x y
         front_travel =  leg_front_amplitude * np.cos(
                 self.state[0] * 2. / leg_front_amplitude # assuming dx = 2 dphi / A
                 + self.state[2] # adds a little movement when rotating
                 )
         side_travel =  leg_side_amplitude * np.cos(
                 self.state[1] * 2. / leg_side_amplitude
                 + self.state[2]
                 )
         right_leg_pose2d_in_agent_frame = np.array([
             front_travel,
             side_travel + leg_side_offset,
             0])
         left_leg_pose2d_in_agent_frame =  -right_leg_pose2d_in_agent_frame
         left_leg_pose2d_in_map_frame = pose2d.apply_tf_to_pose(
                 left_leg_pose2d_in_agent_frame, m_a_T)
         right_leg_pose2d_in_map_frame = pose2d.apply_tf_to_pose(
                 right_leg_pose2d_in_agent_frame, m_a_T)
         return left_leg_pose2d_in_map_frame, right_leg_pose2d_in_map_frame
     else:
         raise NotImplementedError
Ejemplo n.º 2
0
 def _convert_obs(self):
     robot = self.soadrl_sim.robot
     # lidar obs
     lidar_pos = np.array([robot.px, robot.py, robot.theta], dtype=np.float32)
     ranges = np.ones((self.n_angles,), dtype=np.float32) * 25.
     angles = np.linspace(self.kLidarMergedMinAngle,
                          self.kLidarMergedMaxAngle-self.kLidarAngleIncrement,
                          self.n_angles) + lidar_pos[2]
     render_contours_in_lidar(ranges, angles, self.flat_contours, lidar_pos[:2])
     # agents
     other_agents = []
     for i, human in enumerate(self.soadrl_sim.humans):
         pos = np.array([human.px, human.py, human.theta], dtype=np.float32)
         dist = self.distances_travelled_in_base_frame[i].astype(np.float32)
         vel = np.array([human.vx, human.vy], dtype=np.float32)
         if self.lidar_legs:
             agent = CSimAgent(pos, dist, vel)
         else:
             agent = CSimAgent(pos, dist, vel, type_="trunk", radius=human.radius)
         other_agents.append(agent)
     # apply through converter map (res 1., origin 0,0 -> i,j == x,y)
     self.converter_cmap2d.render_agents_in_lidar(ranges, angles, other_agents, lidar_pos[:2])
     self.lidar_scan = ranges
     self.lidar_angles = angles
     # robotstate obs
     # shape (n_agents, 5 [grx, gry, vx, vy, vtheta]) - all in base frame
     baselink_in_world = np.array([robot.px, robot.py, robot.theta])
     world_in_baselink = inverse_pose2d(baselink_in_world)
     robotvel_in_world = np.array([robot.vx, robot.vy, 0])  # TODO: actual robot rot vel?
     robotvel_in_baselink = apply_tf_to_vel(robotvel_in_world, world_in_baselink)
     goal_in_world = np.array([robot.gx, robot.gy, 0])
     goal_in_baselink = apply_tf_to_pose(goal_in_world, world_in_baselink)
     robotstate_obs = np.hstack([goal_in_baselink[:2], robotvel_in_baselink])
     obs = (self.lidar_scan, robotstate_obs)
     return obs
Ejemplo n.º 3
0
    def visuals_loop(self):
        while True:
            xx, yy, clusters, is_legs, cogs, radii = self.clusters_data
            # past positions
            tf_past_in_rob = []
            if self.tf_pastrobs_in_fix:
                tf_rob_in_fix = self.tf_pastrobs_in_fix[-1]
                for i, tf_a in enumerate(self.tf_pastrobs_in_fix[::-1]):
                    tf_b = apply_tf_to_pose(
                        Pose2D(tf_a), inverse_pose2d(Pose2D(tf_rob_in_fix)))
                    tf_past_in_rob.append(tf_b)
            # Plotting
            plt.figure("clusters")
            plt.cla()
            plt.scatter(xx,
                        yy,
                        zorder=1,
                        facecolor=(1, 1, 1, 1),
                        edgecolor=(0, 0, 0, 1),
                        color='k',
                        marker='.')
            #         for x, y in zip(xx, yy):
            #             plt.plot([0, x], [0, y], linewidth=0.01, color='red' , zorder=2)
            plt.scatter([0], [0], marker='+', color='k', s=1000)
            for c in clusters:
                plt.plot(xx[c], yy[c], zorder=2)
            for l, cog, r in zip(is_legs, cogs, radii):
                if l:
                    patch = patches.Circle(cog,
                                           r,
                                           facecolor=(0, 0, 0, 0),
                                           edgecolor=(0, 0, 0, 1),
                                           linewidth=3)
                else:
                    patch = patches.Circle(cog,
                                           r,
                                           facecolor=(0, 0, 0, 0),
                                           edgecolor=(0, 0, 0, 1),
                                           linestyle='--')
                plt.gca().add_artist(patch)
            if tf_past_in_rob:
                xy_past = np.array(tf_past_in_rob)
                plt.plot(xy_past[:, 0], xy_past[:, 1], color='red')

            plt.gca().set_aspect('equal', adjustable='box')
            LIM = 10
            plt.ylim([-LIM, LIM])
            plt.xlim([-LIM, LIM])
            # pause
            plt.pause(0.1)
            rospy.timer.sleep(0.01)
Ejemplo n.º 4
0
    def scan_callback(self, msg):
        atic = timer()
        # edge case: first callback
        if self.msg_prev is None:
            self.msg_prev = msg
            return
        if self.odom is None:
            print("odom not received yet")
            return
        if self.tf_rob_in_fix is None:
            print("tf_rob_in_fix not found yet")
            return
        if self.tf_goal_in_fix is None:
            self.tf_goal_in_fix = self.tf_rob_in_fix
            print("goal set")
        # TODO check that odom and tf are not old

        # measure rotation TODO
        s = np.array(msg.ranges)

        # prediction
        dt = (msg.header.stamp - self.msg_prev.header.stamp).to_sec()
        s_prev = np.array(self.msg_prev.ranges)
        ds = (s - s_prev)
        max_ds = self.kMaxObstacleVel_ms * dt
        ds_capped = ds
        ds_capped[np.abs(ds) > max_ds] = 0
        s_next = np.maximum(0, s + ds_capped).astype(np.float32)

        # cluster
        EUCLIDEAN_CLUSTERING_THRESH_M = 0.05
        angles = np.linspace(0,
                             2 * np.pi,
                             s_next.shape[0] + 1,
                             dtype=np.float32)[:-1]
        clusters, x, y = clustering.euclidean_clustering(
            s_next, angles, EUCLIDEAN_CLUSTERING_THRESH_M)
        cluster_sizes = clustering.cluster_sizes(len(s_next), clusters)
        s_next[cluster_sizes <= 3] = 25

        # dwa
        # Get state
        # goal in robot frame
        goal_in_robot_frame = apply_tf_to_pose(
            Pose2D(self.tf_goal_in_fix),
            inverse_pose2d(Pose2D(self.tf_rob_in_fix)))
        gx = goal_in_robot_frame[0]
        gy = goal_in_robot_frame[1]

        # robot speed in robot frame
        u = self.odom.twist.twist.linear.x
        v = self.odom.twist.twist.linear.y
        w = self.odom.twist.twist.angular.z

        DWA_DT = 0.5
        COMFORT_RADIUS_M = self.kRobotComfortRadius_m
        MAX_XY_VEL = 0.5
        tic = timer()
        best_u, best_v, best_score = dynamic_window.linear_dwa(
            s_next,
            angles,
            u,
            v,
            gx,
            gy,
            DWA_DT,
            DV=0.05,
            UMIN=0 if self.args.forward_only else -MAX_XY_VEL,
            UMAX=MAX_XY_VEL,
            VMIN=-MAX_XY_VEL,
            VMAX=MAX_XY_VEL,
            AMAX=10.,
            COMFORT_RADIUS_M=COMFORT_RADIUS_M,
        )
        toc = timer()
        #         print(best_u * DWA_DT, best_v * DWA_DT, best_score)
        #         print("DWA: {:.2f} Hz".format(1/(toc-tic)))

        # Slow turn towards goal
        # TODO
        best_w = 0
        WMAX = 0.5
        angle_to_goal = np.arctan2(gy, gx)  # [-pi, pi]
        if np.sqrt(gx * gx + gy * gy) > 0.5:  # turn only if goal is far away
            if np.abs(angle_to_goal) > (np.pi / 4 / 10):  # deadzone
                best_w = np.clip(angle_to_goal, -WMAX, WMAX)  # linear ramp

        if not self.STOP:
            # publish cmd_vel
            cmd_vel_msg = Twist()
            SIDEWAYS_FACTOR = 0.3 if self.args.forward_only else 1.
            cmd_vel_msg.linear.x = np.clip(best_u * 0.5, -0.3, 0.3)
            cmd_vel_msg.linear.y = np.clip(best_v * 0.5 * SIDEWAYS_FACTOR,
                                           -0.3, 0.3)
            cmd_vel_msg.angular.z = best_w
            self.cmd_vel_pub.publish(cmd_vel_msg)

        # publish cmd_vel vis
        pub = rospy.Publisher("/dwa_cmd_vel", Marker, queue_size=1)
        mk = Marker()
        mk.header.frame_id = self.kRobotFrame
        mk.ns = "arrows"
        mk.id = 0
        mk.type = 0
        mk.action = 0
        mk.scale.x = 0.02
        mk.scale.y = 0.02
        mk.color.b = 1
        mk.color.a = 1
        mk.frame_locked = True
        pt = Point()
        pt.x = 0
        pt.y = 0
        pt.z = 0.03
        mk.points.append(pt)
        pt = Point()
        pt.x = 0 + best_u * DWA_DT
        pt.y = 0 + best_v * DWA_DT
        pt.z = 0.03
        mk.points.append(pt)
        pub.publish(mk)
        pub = rospy.Publisher("/dwa_goal", Marker, queue_size=1)
        mk = Marker()
        mk.header.frame_id = self.kRobotFrame
        mk.ns = "arrows"
        mk.id = 0
        mk.type = 0
        mk.action = 0
        mk.scale.x = 0.02
        mk.scale.y = 0.02
        mk.color.g = 1
        mk.color.a = 1
        mk.frame_locked = True
        pt = Point()
        pt.x = 0
        pt.y = 0
        pt.z = 0.03
        mk.points.append(pt)
        pt = Point()
        pt.x = 0 + gx
        pt.y = 0 + gy
        pt.z = 0.03
        mk.points.append(pt)
        pub.publish(mk)
        pub = rospy.Publisher("/dwa_radius", Marker, queue_size=1)
        mk = Marker()
        mk.header.frame_id = self.kRobotFrame
        mk.ns = "radius"
        mk.id = 0
        mk.type = 3
        mk.action = 0
        mk.pose.position.z = -0.1
        mk.scale.x = COMFORT_RADIUS_M * 2
        mk.scale.y = COMFORT_RADIUS_M * 2
        mk.scale.z = 0.01
        mk.color.b = 1
        mk.color.g = 1
        mk.color.r = 1
        mk.color.a = 1
        mk.frame_locked = True
        pub.publish(mk)

        # publish scan prediction
        msg_next = deepcopy(msg)
        msg_next.ranges = s_next
        # for pretty colors
        cluster_ids = clustering.cluster_ids(len(x), clusters)
        random_map = np.arange(len(cluster_ids))
        np.random.shuffle(random_map)
        cluster_ids = random_map[cluster_ids]
        msg_next.intensities = cluster_ids
        self.pubs[0].publish(msg_next)

        # publish past
        msg_prev = deepcopy(msg)
        msg_prev.ranges = self.msg_prev.ranges
        self.pubs[1].publish(msg_prev)

        # ...

        # finally, set up for next callback
        self.msg_prev = msg

        atoc = timer()
        if self.args.hz:
            print("DWA callback: {:.2f} Hz".format(1 / (atoc - atic)))
def grid_feature_estimate_from_sensor_data(tracked_persons, pose2d_tracksframe_in_refmap,
                                           robot_heading, state, fixed_state):
    # same for all features
    n_agents = len(tracked_persons.tracks)
    # visibility_map  # TODO: use lidar free space instead!
    map_ = fixed_state.map
    ij = indexing.as_idx_array(map_.occupancy(), axis='all')
    ii = ij[..., 0]
    jj = ij[..., 1]
    fov = [robot_heading - HALF_FOV, robot_heading + HALF_FOV]
    visibility = fixed_state.map.visibility_map_ij(state.get_pos_ij(), fov=fov)
    for feature_name in kStateFeatures:
        feature_index = kStateFeatures[feature_name]
        # start by assuming no humans detected, then add detections one by one.
        # TODO: increase nohuman uncertainty with distance?
        if feature_name == "crowdedness":
            feature_map = NOHUMAN_CROWDEDNESS_SENSOR_PR * np.zeros_like(ii, dtype=np.float32)
            feature_uncertainty_map = NOHUMAN_CWD_UNCERTAINTY_SENSOR_PR * np.ones_like(ii, dtype=np.float32)
        elif feature_name == "permissivity":
            feature_map = NOHUMAN_PERMISSIVITY_SENSOR_PR * np.ones_like(ii, dtype=np.float32)
            feature_uncertainty_map = NOHUMAN_PRM_UNCERTAINTY_SENSOR_PR * np.ones_like(ii, dtype=np.float32)
        elif feature_name == "perceptivity":
            feature_map = NOHUMAN_PERCEPTIVITY_SENSOR_PR * np.ones_like(ii, dtype=np.float32)
            feature_uncertainty_map = NOHUMAN_PER_UNCERTAINTY_SENSOR_PR * np.ones_like(ii, dtype=np.float32)
        for k in range(n_agents):
            tp = tracked_persons.tracks[k]
            from tf.transformations import euler_from_quaternion
            import pose2d
            quaternion = (
                tp.pose.pose.orientation.x,
                tp.pose.pose.orientation.y,
                tp.pose.pose.orientation.z,
                tp.pose.pose.orientation.w,
            )
            tp_pos_in_tracksframe = np.array([
                tp.pose.pose.position.x,
                tp.pose.pose.position.y,
                euler_from_quaternion(quaternion)[2],
            ])
            tp_pos_in_refmap = pose2d.apply_tf_to_pose(
                tp_pos_in_tracksframe, pose2d_tracksframe_in_refmap)
            ai, aj = map_.xy_to_ij([tp_pos_in_refmap[:2]])[0]
            # compute P(Z|S)
            if feature_name == "crowdedness":
                density_radius = CROWDEDNESS_RADIUS
            elif feature_name == "permissivity":
                density_radius = PERMISSIVITY_RADIUS
            elif feature_name == "perceptivity":
                density_radius = PERCEPTIVITY_RADIUS
            else:
                raise NotImplementedError("state estimate for {} not implemented.".format(
                    feature_name))
            sqr_density_radius_ij = (density_radius / map_.resolution())**2
            sqr_distance = ((ii - ai)**2 + (jj - aj)**2)
            mask = sqr_distance < sqr_density_radius_ij
            if feature_name == "crowdedness":
#                 influence_radius = 1.  # bigger means "larger" people
#                 feature_map[mask] += np.clip(1. / (0.0001 + sqr_distance[mask] * influence_radius),
#                                              0., 1.)
                feature_map[mask] += HUMAN_CROWDEDNESS_SENSOR_PR  # additive
                feature_uncertainty_map[mask] = HUMAN_CWD_UNCERTAINTY_SENSOR_PR
            elif feature_name == "permissivity":
                feature_map[mask] = HUMAN_PERMISSIVITY_SENSOR_PR
                feature_uncertainty_map[mask] = HUMAN_PRM_UNCERTAINTY_SENSOR_PR
            elif feature_name == "perceptivity":
                feature_map[mask] = HUMAN_PERCEPTIVITY_SENSOR_PR
                feature_uncertainty_map[mask] = HUMAN_PER_UNCERTAINTY_SENSOR_PR
        # what you can't see...
        feature_uncertainty_map[visibility < 0] = np.inf  # no sensor update outside of sensor fov
        # apply observation normal pdf using bayesian posterior  P(S|Z) ~= P(S) * P(Z|S)
        # product of two gaussian pdfs
        mu1 = np.array(state.grid_features_values()[feature_index, :, :])
        mu2 = feature_map
        sigmasq1 = np.array(state.grid_features_uncertainties()[feature_index, :, :])
        sigmasq2 = feature_uncertainty_map
        with warnings.catch_warnings():
            warnings.filterwarnings('ignore', r'invalid value encountered')
            warnings.filterwarnings('ignore', r'divide by zero')
            new_mu = (mu1 * sigmasq2 + mu2 * sigmasq1) / (sigmasq2 + sigmasq1)
            new_sigmasq = 1. / (1. / sigmasq1 + 1. / sigmasq2)
        # check numerics
        if CHECK_NUMERICS:
            is_both_small = np.logical_and(sigmasq1 < TINY_SIGMASQ, sigmasq2 < TINY_SIGMASQ)
            is_1_big = sigmasq1 > BIG_SIGMASQ
            is_2_big = sigmasq2 > BIG_SIGMASQ
            is_both_big = np.logical_and(sigmasq1 > BIG_SIGMASQ, sigmasq2 > BIG_SIGMASQ)
            new_mu[is_both_small] = ((mu1 + mu2) / 2.)[is_both_small]
            new_mu[is_1_big] = mu2[is_1_big]
            new_mu[is_2_big] = mu1[is_2_big]
            new_mu[is_both_big] = ((mu1 + mu2) / 2.)[is_both_big]
            new_sigmasq[is_both_small] = TINY_SIGMASQ
            new_sigmasq[is_1_big] = sigmasq2[is_1_big]
            new_sigmasq[is_2_big] = sigmasq1[is_2_big]
            new_mu[is_both_big] = BIG_SIGMASQ
            new_sigmasq[np.isnan(new_sigmasq)] = np.inf  # this shouldn't happen... so it will
        # assign
        state.grid_features_values()[feature_index, :, :] = new_mu
        state.grid_features_uncertainties()[feature_index, :, :] = new_sigmasq