예제 #1
0
#!/usr/bin/env python3
import rclpy
from rclpy.node import Node
from std_srvs.srv import Empty
from bitbots_msgs.msg import Buttons
from rclpy.node import Node
from rclpy.duration import Duration

rclpy.init(args=None)
node = Node('zero_on_button')

zero_l = node.create_client(Empty, "/foot_pressure_left/set_foot_zero")
zero_r = node.create_client(Empty, "/foot_pressure_right/set_foot_zero")
button_prev_state = False
press_time = node.get_clock().now() - Duration(seconds=1.0)


def cb(msg):
    global button_prev_state, press_time
    print("New msg")
    print(msg.button1)
    print(not button_prev_state)
    print(node.get_clock().now() - press_time > Duration(seconds=1.0))
    if msg.button3 and not button_prev_state and node.get_clock().now(
    ) - press_time > Duration(seconds=1.0):
        zero_l()
        zero_r()
        press_time = node.get_clock()
    button_prev_state = msg.button3

예제 #2
0
    def planning_callback(self):
        self.tau = self.tau + 1

        if not self.use_sim_time:
            try:
                self.z_meas = self.getPoseFromTF()
            except Exception as e:
                self.get_logger().info('Error: ' + str(e))

        counter = 0
        # Check if current goal is reached
        if torch.dist(self.z_meas[[0, 1]],
                      self.z_plan[self.i_plan].index_select(
                          0, torch.tensor([0, 1])),
                      p=2) < self.z_plan[self.i_plan][torch.tensor([3])]:
            if self.i_plan + 1 == self.z_plan.shape[0]:
                self.i_plan = 0
            else:
                self.i_plan = self.i_plan + 1
            self.get_logger().info('New goal: ' +
                                   str(self.z_plan[self.i_plan]))
            if self.rviz == True:
                goalMsg = PointStamped()
                if self.use_sim_time:
                    goalMsg.header.frame_id = "odom"
                else:
                    goalMsg.header.frame_id = "map"
                goalMsg.header.stamp = self.get_clock().now().to_msg()
                goalMsg.point.x = self.z_plan[self.i_plan][0].item()
                goalMsg.point.y = self.z_plan[self.i_plan][1].item()
                goalMsg.point.z = 0.0
                self.goalPublisher_.publish(goalMsg)

        a_alpha_tau = torch.tensor(
            [100., 1000.],
            dtype=torch.float)  # small preference for going forward initially
        a_beta_tau = torch.tensor([10., 1000.], dtype=torch.float)

        # Plan next action(s)
        while self.est_planning_epoch_time.__lt__(
                Duration(nanoseconds=0.80 *
                         self.planning_timer.time_until_next_call())
        ):  # check if there is sufficient time to run again
            tic_true = time.time()

            tic = self.get_clock().now()
            if not self.use_sim_time:
                try:
                    self.z_meas = self.getPoseFromTF()
                except Exception as e:
                    self.get_logger().info('Error: ' + str(e))

            z_meas_tau = self.z_meas
            z_L_tau = self.meas_L
            knownPlannedPaths_tau_i = self.knownPlannedPaths  # does a good job at avoiding concurrent access but not entirely safe..

            # calculate dt to next timestep:
            time_left = torch.tensor(
                [self.planning_timer.time_until_next_call() / (10**9)],
                dtype=torch.float)  # s
            planning_time = torch.tensor(
                [self.est_planning_epoch_time.nanoseconds / (10**9)],
                dtype=torch.float)  # s
            time_left_from_predicted_pose = time_left - planning_time
            z_current_dist = dist.MultivariateNormal(z_meas_tau,
                                                     scale_tril=z_L_tau)

            # Run SVI
            losses = []
            for svi_epoch in range(self.svi_epochs):
                losses.append(
                    self.svi.step(self,
                                  z_current_dist,
                                  self.a_current,
                                  self.deltaT,
                                  self.z_plan[self.i_plan],
                                  knownPlannedPaths_tau_i,
                                  planning_time,
                                  time_left_from_predicted_pose,
                                  T1=self.tau,
                                  T2=self.tau + self.Tau_span))

            # Send msg to other robots
            a_alpha = torch.zeros(self.Tau_span, 2)
            a_beta = torch.zeros(self.Tau_span, 2)
            for Tau in range(self.tau, self.tau + self.Tau_span):
                a_alpha[Tau - self.tau, :] = pyro.param(
                    "a_alpha_{}".format(Tau)).detach()
                a_beta[Tau - self.tau, :] = pyro.param(
                    "a_beta_{}".format(Tau)).detach()

            msgPlannedActions = self.packPlannedActionsMsg(
                z_meas_tau, z_L_tau, self.a_current, a_alpha, a_beta,
                planning_time, time_left_from_predicted_pose)
            self.plannedActionsPublisher_.publish(msgPlannedActions)

            if self.rviz == True:
                # Generate samples of predicted path:
                N = 100
                z_tmp = torch.zeros(N, self.Tau_span, 3)
                for i in range(N):
                    z_tmp[i, :, :] = motionModel(z_current_dist,
                                                 self.a_current,
                                                 a_alpha,
                                                 a_beta,
                                                 self.deltaT,
                                                 planning_time,
                                                 time_left_from_predicted_pose,
                                                 T1=self.tau,
                                                 T2=self.tau + self.Tau_span)

                self.publishPathPointCloud(z_tmp)

            if self.Log_DATA:
                # data to save
                logDict = {
                    "timestamp": tic_true,  # int
                    "tau": self.tau,  # int
                    "counter": counter,  # int
                    "losses": losses,  # [] list
                    "z_meas_tau":
                    z_meas_tau,  # torch.tensor([0,0,0],dtype=torch.float)
                    "z_L":
                    z_L_tau,  # z_cov = torch.diag(torch.tensor([1.,1.,1.],dtype=torch.float))
                    "a_current":
                    self.a_current,  # torch.tensor([0., 0.],dtype=torch.float)
                    "a_alpha": a_alpha,  # torch.zeros(self.Tau_span,2)
                    "a_beta": a_beta,  # torch.zeros(self.Tau_span,2)
                    "z_tmp": z_tmp,  # torch.zeros(N,self.Tau_span,3)
                    "currentGoal": self.z_plan[self.i_plan],
                    "tau_span": self.Tau_span,
                    "r_plan": self.r_plan,
                    "radius": self.radius
                }
                pickle.dump(logDict, self.logFileObject)

            # Draw actions
            # Get action distribution parameters
            a_alpha_tau = pyro.param("a_alpha_{}".format(self.tau)).detach()
            a_beta_tau = pyro.param("a_beta_{}".format(self.tau)).detach()

            # Sample random action from p(a_tau|O=1,C=0)
            a_tau = torch.distributions.Beta(a_alpha_tau,
                                             a_beta_tau).rsample().detach()

            # Alternatively, choose mean action...
            a_tau[0] = a_alpha_tau[0] / (a_alpha_tau[0] + a_beta_tau[0])
            a_tau[1] = a_alpha_tau[1] / (a_alpha_tau[1] + a_beta_tau[1])

            self.a_next = a_tau  # save current estimate of next action to take

            self.a_current = self.a_next
            a = action_transforme(
                self.a_current)  # scale actions appropriately

            msgVel = Twist()
            msgVel.linear.x = a[0].item()
            msgVel.angular.z = a[1].item()
            self.velPublisher_.publish(msgVel)

            # estimate execution time by Cumulative moving average
            toc = self.get_clock().now()
            toc_true = time.time() - tic_true
            dur = toc.__sub__(tic).nanoseconds
            if not (dur >= int(self.deltaT * (10**9))):
                dur_curr_est = self.est_planning_epoch_time.nanoseconds
                dur_new_est = (self.N_samples_epoch_time * dur_curr_est +
                               dur) / (self.N_samples_epoch_time + 1
                                       )  # Cumulative moving average
                self.est_planning_epoch_time = Duration(
                    nanoseconds=dur_new_est)
                self.N_samples_epoch_time = self.N_samples_epoch_time + 1

            counter = counter + 1

        self.get_logger().info('N iterations: ' + str(counter) +
                               '  est_planning_epoch_time: ' +
                               str(self.est_planning_epoch_time.nanoseconds /
                                   10**6) + "    goal: " +
                               str(self.z_plan[self.i_plan]) + " x: " +
                               str(self.z_meas))
        time_left = self.planning_timer.time_until_next_call()
        if time_left < 0:
            self.get_logger().warn("Planning did not finish in time - spend " +
                                   str(-1. * time_left / 10**6) +
                                   " ms to much")
def main():
    rclpy.init()

    navigator = BasicNavigator()

    # Set our demo's initial pose
    initial_pose = PoseStamped()
    initial_pose.header.frame_id = 'map'
    initial_pose.header.stamp = navigator.get_clock().now().to_msg()
    initial_pose.pose.position.x = 3.45
    initial_pose.pose.position.y = 2.15
    initial_pose.pose.orientation.z = 1.0
    initial_pose.pose.orientation.w = 0.0
    navigator.setInitialPose(initial_pose)

    # Activate navigation, if not autostarted. This should be called after setInitialPose()
    # or this will initialize at the origin of the map and update the costmap with bogus readings.
    # If autostart, you should `waitUntilNav2Active()` instead.
    # navigator.lifecycleStartup()

    # Wait for navigation to fully activate, since autostarting nav2
    navigator.waitUntilNav2Active()

    # If desired, you can change or load the map as well
    # navigator.changeMap('/path/to/map.yaml')

    # You may use the navigator to clear or obtain costmaps
    # navigator.clearAllCostmaps()  # also have clearLocalCostmap() and clearGlobalCostmap()
    # global_costmap = navigator.getGlobalCostmap()
    # local_costmap = navigator.getLocalCostmap()

    # set our demo's goal poses to follow
    goal_poses = []
    goal_pose1 = PoseStamped()
    goal_pose1.header.frame_id = 'map'
    goal_pose1.header.stamp = navigator.get_clock().now().to_msg()
    goal_pose1.pose.position.x = 1.5
    goal_pose1.pose.position.y = 0.55
    goal_pose1.pose.orientation.w = 0.707
    goal_pose1.pose.orientation.z = 0.707
    goal_poses.append(goal_pose1)

    # additional goals can be appended
    goal_pose2 = PoseStamped()
    goal_pose2.header.frame_id = 'map'
    goal_pose2.header.stamp = navigator.get_clock().now().to_msg()
    goal_pose2.pose.position.x = 1.5
    goal_pose2.pose.position.y = -3.75
    goal_pose2.pose.orientation.w = 0.707
    goal_pose2.pose.orientation.z = 0.707
    goal_poses.append(goal_pose2)
    goal_pose3 = PoseStamped()
    goal_pose3.header.frame_id = 'map'
    goal_pose3.header.stamp = navigator.get_clock().now().to_msg()
    goal_pose3.pose.position.x = -3.6
    goal_pose3.pose.position.y = -4.75
    goal_pose3.pose.orientation.w = 0.707
    goal_pose3.pose.orientation.z = 0.707
    goal_poses.append(goal_pose3)

    # sanity check a valid path exists
    # path = navigator.getPath(initial_pose, goal_pose1)

    nav_start = navigator.get_clock().now()
    navigator.followWaypoints(goal_poses)

    i = 0
    while not navigator.isTaskComplete():
        ################################################
        #
        # Implement some code here for your application!
        #
        ################################################

        # Do something with the feedback
        i = i + 1
        feedback = navigator.getFeedback()
        if feedback and i % 5 == 0:
            print('Executing current waypoint: ' +
                  str(feedback.current_waypoint + 1) + '/' +
                  str(len(goal_poses)))
            now = navigator.get_clock().now()

            # Some navigation timeout to demo cancellation
            if now - nav_start > Duration(seconds=600.0):
                navigator.cancelTask()

            # Some follow waypoints request change to demo preemption
            if now - nav_start > Duration(seconds=35.0):
                goal_pose4 = PoseStamped()
                goal_pose4.header.frame_id = 'map'
                goal_pose4.header.stamp = now.to_msg()
                goal_pose4.pose.position.x = -5.0
                goal_pose4.pose.position.y = -4.75
                goal_pose4.pose.orientation.w = 0.707
                goal_pose4.pose.orientation.z = 0.707
                goal_poses = [goal_pose4]
                nav_start = now
                navigator.followWaypoints(goal_poses)

    # Do something depending on the return code
    result = navigator.getResult()
    if result == TaskResult.SUCCEEDED:
        print('Goal succeeded!')
    elif result == TaskResult.CANCELED:
        print('Goal was canceled!')
    elif result == TaskResult.FAILED:
        print('Goal failed!')
    else:
        print('Goal has an invalid return status!')

    navigator.lifecycleShutdown()

    exit(0)
예제 #4
0
class TrajectoryPlanner(Node):
    def __init__(self):
        super().__init__('velocity_publisher')

        self.get_logger().info(
            'Initializing Co-operative shuttlebus controller')
        # self.group = ReentrantCallbackGroup() # to allow callbacks to be executed in parallel
        self.group = MutuallyExclusiveCallbackGroup()

        self.use_sim_time = self.get_parameter(
            'use_sim_time').get_parameter_value().bool_value
        self.get_logger().info("use_sim_time: " + str(self.use_sim_time))

        self.ROBOTNAMESPACE = os.environ['ROBOTNAMESPACE']

        self.rviz = True
        self.Log_DATA = True

        # size of robots as a radius of minimum spanning disk
        # https://emanual.robotis.com/docs/en/platform/turtlebot3/specifications/
        # the wheels on the robot is placed in the front. The robot is 138mm long and 178mm wide, thus asssuming the wheel to be placed in the front an conservative estimate is
        # from https://www.robotis.us/turtlebot-3-burger-us/ a more appropriate value would be 105 mm
        self.radius = 0.138

        self.z_meas = torch.tensor([0, 0, 0], dtype=torch.float)
        self.deltaT = torch.tensor([1.0], dtype=torch.float)  # in seconds
        self.Tau_span = 4  # prediction horizon
        self.svi_epochs = 10
        if self.use_sim_time:
            self.covariance_scale = torch.tensor([1.0], dtype=torch.float)
        else:
            self.covariance_scale = torch.tensor(
                [0.1], dtype=torch.float
            )  # the covariances from AMCL seems very conservative - probably due to a need for population error in the particle filter. thus we scale it to be less conservative

        self.r_plan = 0.2

        if self.ROBOTNAMESPACE == "turtlebot1":
            self.z_plan = torch.tensor([[2.0000, 1.0000, 0.0000, self.r_plan],
                                        [0.0000, 1.0000, 0.0000, self.r_plan]],
                                       dtype=torch.float)
        elif self.ROBOTNAMESPACE == "turtlebot2":
            self.z_plan = torch.tensor([[1.5000, 1.8660, 0.0000, self.r_plan],
                                        [0.5000, 0.1340, 0.0000, self.r_plan]],
                                       dtype=torch.float)
        elif self.ROBOTNAMESPACE == "turtlebot3":
            self.z_plan = torch.tensor([[0.5000, 1.8660, 0.0000, self.r_plan],
                                        [1.5000, 0.1340, 0.0000, self.r_plan]],
                                       dtype=torch.float)
        else:
            self.z_plan = torch.tensor(
                [[2.99, 2.57, 0, self.r_plan], [2.01, 1.93, 0, self.r_plan],
                 [1.65, 1.12, 0, self.r_plan], [1.25, 0.65, 0, self.r_plan],
                 [0.039, -0.18, 0, self.r_plan], [1.25, 0.65, 0, self.r_plan],
                 [1.65, 1.12, 0, self.r_plan], [2.01, 1.93, 0, self.r_plan]],
                dtype=torch.float)

        self.i_plan = 0

        self.meas_L = torch.diag(
            torch.tensor([1., 1., 1.], dtype=torch.float)
        )  # cholesky decomposition of measurement model covariance - this is just a temporary assignment it is reassigned in the meas callback

        # setup the inference algorithm
        optim_args = {"lr": 0.05}
        optimizer = pyro.optim.Adam(optim_args)
        self.svi = pyro.infer.SVI(model=decisionModel,
                                  guide=decisionGuide,
                                  optim=optimizer,
                                  loss=pyro.infer.Trace_ELBO(num_particles=1))

        self.get_logger().info('Finished SVI setup')

        self.est_planning_epoch_time = Duration(
            nanoseconds=(self.deltaT[0] / 3.) * 10**
            9)  #variable to save Cumulative moving average of execution times
        self.N_samples_epoch_time = 1

        self.a_current = torch.tensor([0.0, 0.0], dtype=torch.float)
        self.a_next = torch.tensor(
            [0.0, 0.0], dtype=torch.float
        )  # variable to save the current estimate of next action

        if not self.use_sim_time:  # to get current pose from tf tree
            self.tfbuffer = tf2_ros.Buffer()
            self.tflistener = tf2_ros.TransformListener(self.tfbuffer, self)

        if self.use_sim_time:
            self.poseSubscription = self.create_subscription(
                Odometry,
                'odom',
                self.pose_callback,
                1,
                callback_group=self.group)
            self.poseSubscription  # prevent unused variable warning
        else:
            self.poseSubscription = self.create_subscription(
                PoseWithCovarianceStamped,
                'amcl_pose',
                self.pose_callback,
                1,
                callback_group=self.group)
            self.poseSubscription  # prevent unused variable warning

        self.subscriberManager_timer = self.create_timer(
            2, self.subscriberManager_callback, callback_group=self.group)
        self.plannedActions_subscriptions = {}
        self.knownPlannedPaths = {}

        self.get_logger().info('Created subscriptions')

        self.plannedActionsPublisher_ = self.create_publisher(
            PlannedActions, 'plannedActions', 1, callback_group=self.group)

        if self.rviz == True:
            self.plannedPathPublisher_rviz = {}
            for tau in range(self.Tau_span):
                #self.plannedPathPublisher_rviz[tau] = self.create_publisher(PoseWithCovarianceStamped, 'Path_rviz_'+str(tau), 1, callback_group=self.group)
                self.plannedPathPublisher_rviz[tau] = self.create_publisher(
                    PointCloud,
                    'Path_PointCloud_' + str(tau),
                    1,
                    callback_group=self.group)

        self.goalPublisher_ = self.create_publisher(PointStamped,
                                                    'currentGoal',
                                                    1,
                                                    callback_group=self.group)
        goalMsg = PointStamped()
        if self.use_sim_time:
            goalMsg.header.frame_id = "odom"
        else:
            goalMsg.header.frame_id = "map"
        goalMsg.header.stamp = self.get_clock().now().to_msg()
        goalMsg.point.x = self.z_plan[self.i_plan][0].item()
        goalMsg.point.y = self.z_plan[self.i_plan][1].item()
        goalMsg.point.z = 0.0
        self.goalPublisher_.publish(goalMsg)

        self.velPublisher_ = self.create_publisher(Twist, 'publishedVel', 1)

        self.get_logger().info('Created publishers')

        planning_timer_period = self.deltaT.item()  # seconds
        self.planning_timer = self.create_timer(planning_timer_period,
                                                self.planning_callback,
                                                callback_group=self.group)

        # align planning_timer for all robots
        self.t0 = alignTimer(self.get_clock(), self.planning_timer)

        self.tau, mod = divmod(self.t0.nanoseconds,
                               int(self.deltaT.item() * (10**9)))
        if mod > self.deltaT.item() * (10**9) / 2:  # divmod rounds down
            self.tau = self.tau + 1

        self.get_logger().info('Controller Initialized')
        self.get_logger().info('z_0' + str(self.z_meas))

        if self.Log_DATA:
            outdir = '/root/DATA/logs/' + self.ROBOTNAMESPACE
            if not os.path.exists(outdir):
                os.mkdir(outdir)

            filename = self.ROBOTNAMESPACE + "-" + datetime.now().strftime(
                "%d_%m_%Y-%H_%M_%S") + ".pkl"
            logFilePath = os.path.join(outdir, filename)
            self.logFileObject = open(logFilePath, 'ab')
            self.get_logger().info('Data Logging Initialized')

    def subscriberManager_callback(self):
        topic_to_find = "plannedActions"

        topics = self.get_topic_names_and_types()

        for topic in topics:
            if topic_to_find in topic[0] and topic[
                    0] != self.plannedActionsPublisher_.topic_name:  #[0] for the name as a string
                allready_subscriped = False
                for ID in self.plannedActions_subscriptions:  # check if a subscription already exists
                    if topic[0] == self.plannedActions_subscriptions[
                            ID].topic_name:
                        allready_subscriped = True
                        break  # a subscription already exists, no need to look further

                if allready_subscriped == False:  # if no subscription existed then create one
                    ID = topic[0]
                    ID = ID.replace("/controller/plannedActions", "")
                    ID = ID.replace("/", "")
                    self.plannedActions_subscriptions[
                        ID] = self.create_subscription(
                            PlannedActions,
                            topic[0],
                            partial(
                                self.plannedActionsSubcriber_callback, topic,
                                ID
                            ),  # partial is used to get addtional arguments for the call back
                            1)  # add a subscription...

                    self.get_logger().info('Created subscription for topic "' +
                                           topic[0] + '"' +
                                           ' and assigned it the ID "' + ID +
                                           '"')

        # TODO: add feature to delete subscriptions again

    def unpackPoseWithCovarianceMsg(self, poseWithCovariance):
        x = poseWithCovariance.pose.position.x
        y = poseWithCovariance.pose.position.y
        z = poseWithCovariance.pose.position.z

        orientation_q = poseWithCovariance.pose.orientation
        orientation_list = [
            orientation_q.x, orientation_q.y, orientation_q.z, orientation_q.w
        ]
        (roll, pitch, yaw) = euler_from_quaternion(orientation_list)
        #roll, pitch, yaw = self.quaternionMsg2RPY(orientation_q)

        z_tmp = torch.tensor([x, y, z, roll, pitch, yaw], dtype=torch.float)
        cov_row_major = torch.tensor([poseWithCovariance.covariance],
                                     dtype=torch.float)  # in row major format

        indices = torch.tensor(
            [0, 1, 5])  # we want to extract the marginal for x, y, yaw

        mean_marginal, cov_marginal = getNormalMarginal(
            z_tmp, cov_row_major, indices)

        # zero diagonal vector is assumed to be an error and should only happen in the beginning. Thus, a small number is added!
        #cov_marginal = cov_marginal + torch.diag(0.001*torch.ones(cov_marginal.shape[0]))

        # the covariances from AMCL seems very conservative - probably due to a need for population error in the particle filter - thus we scale it to be less conservative
        cov_marginal = cov_marginal * self.covariance_scale
        mean = mean_marginal
        L = torch.cholesky(cov_marginal)

        return mean, L

    def plannedActionsSubcriber_callback(self, topic, ID, msg):

        if ID not in self.knownPlannedPaths:
            self.knownPlannedPaths[ID] = {}  # create empty dict

        self.knownPlannedPaths[ID]["radius"] = msg.radius
        self.knownPlannedPaths[ID]["planning_time"] = torch.tensor(
            [msg.planning_time], dtype=torch.float)
        self.knownPlannedPaths[ID][
            "time_left_from_predicted_pose"] = torch.tensor(
                [msg.time_left_from_predicted_pose], dtype=torch.float)
        poseWithCovariance = msg.pose.pose
        mean, L = self.unpackPoseWithCovarianceMsg(poseWithCovariance)
        poseDistribution = torch.distributions.MultivariateNormal(mean,
                                                                  scale_tril=L)
        self.knownPlannedPaths[ID]["z_meas_dist"] = poseDistribution
        self.knownPlannedPaths[ID]["a_current"] = torch.FloatTensor(
            msg.a_current)
        self.knownPlannedPaths[ID]["a_alpha"] = torch.reshape(
            torch.FloatTensor(msg.a_alpha), (self.Tau_span, 2))
        self.knownPlannedPaths[ID]["a_beta"] = torch.reshape(
            torch.FloatTensor(msg.a_beta), (self.Tau_span, 2))
        self.get_logger().info('Recieved planned actions from: ' + ID)

    def pose_callback(self, msg):
        if not self.use_sim_time:
            z_meas_tmp, self.meas_L = self.unpackPoseWithCovarianceMsg(
                msg.pose)
        else:
            self.z_meas, self.meas_L = self.unpackPoseWithCovarianceMsg(
                msg.pose)

    def packPlannedActionsMsg(self, z_meas_tau, z_L_tau, a_current, a_alpha,
                              a_beta, planning_time,
                              time_left_from_predicted_pose):
        msgPlannedActions = PlannedActions()
        time_stamp_msg = self.get_clock().now()
        current_time = time_stamp_msg.to_msg()
        msgPlannedActions.header.stamp = current_time
        if self.use_sim_time:
            msgPlannedActions.header.frame_id = "odom"
        else:
            msgPlannedActions.header.frame_id = "map"

        msgPlannedActions.radius = self.radius
        msgPlannedActions.planning_time = planning_time.item()
        msgPlannedActions.time_left_from_predicted_pose = time_left_from_predicted_pose.item(
        )

        # Current pose estimate
        poseWithCovariance = PoseWithCovarianceStamped()
        if self.use_sim_time:
            poseWithCovariance.header.frame_id = "odom"
        else:
            poseWithCovariance.header.frame_id = "map"
        poseWithCovariance.header.stamp = current_time
        poseWithCovariance.pose.pose.position.x = z_meas_tau[0].item()
        poseWithCovariance.pose.pose.position.y = z_meas_tau[1].item()

        q = quaternion_from_euler(0.0, 0.0, z_meas_tau[2].item())
        poseWithCovariance.pose.pose.orientation.x = q[0]
        poseWithCovariance.pose.pose.orientation.y = q[1]
        poseWithCovariance.pose.pose.orientation.z = q[2]
        poseWithCovariance.pose.pose.orientation.w = q[3]

        newIndices = torch.tensor([0, 1, 5])
        newShape = torch.tensor([6, 6])
        z_cov_tau = z_L_tau * torch.transpose(z_L_tau, 0, 1)  # cov = L*L^T
        poseWithCovariance.pose.covariance = expandCovarianceMatrix(
            z_L_tau, newShape, newIndices).flatten().tolist()
        msgPlannedActions.pose = poseWithCovariance

        # planned actions
        msgPlannedActions.a_current = a_current.flatten().tolist()
        msgPlannedActions.a_alpha = a_alpha.flatten().tolist()
        msgPlannedActions.a_beta = a_beta.flatten().tolist()

        return msgPlannedActions

    def publishPathPointCloud(self, pathPoints):
        time_stamp_path = Time(nanoseconds=(self.tau * int(self.deltaT *
                                                           (10**9))))

        for tau in range(self.Tau_span):
            dur = Duration(nanoseconds=(tau + 1) * int(self.deltaT * (10**9)))
            time_stamp_future = time_stamp_path.__add__(dur)
            pointCloud_tmp = PointCloud()
            pointCloud_tmp.header.stamp = time_stamp_future.to_msg()
            if self.use_sim_time:
                pointCloud_tmp.header.frame_id = "odom"
            else:
                pointCloud_tmp.header.frame_id = "map"

            for i in range(pathPoints.shape[0]):
                point_tmp = Point32()
                point_tmp.x = pathPoints[i, tau, 0].item()
                point_tmp.y = pathPoints[i, tau, 1].item()
                point_tmp.z = 0.0
                pointCloud_tmp.points.append(point_tmp)

            self.plannedPathPublisher_rviz[tau].publish(pointCloud_tmp)

    def getPoseFromTF(self):
        source_frame = 'map'
        target_frame = 'base_link'

        transformation_timestamped = self.tfbuffer.lookup_transform(
            source_frame, target_frame, rclpy.time.Time(seconds=0))
        x = transformation_timestamped.transform.translation.x
        y = transformation_timestamped.transform.translation.y

        orientation_q = transformation_timestamped.transform.rotation
        orientation_list = [
            orientation_q.x, orientation_q.y, orientation_q.z, orientation_q.w
        ]
        (roll, pitch, yaw) = euler_from_quaternion(orientation_list)

        z_meas = torch.tensor([x, y, yaw], dtype=torch.float)

        return z_meas

    def planning_callback(self):
        self.tau = self.tau + 1

        if not self.use_sim_time:
            try:
                self.z_meas = self.getPoseFromTF()
            except Exception as e:
                self.get_logger().info('Error: ' + str(e))

        counter = 0
        # Check if current goal is reached
        if torch.dist(self.z_meas[[0, 1]],
                      self.z_plan[self.i_plan].index_select(
                          0, torch.tensor([0, 1])),
                      p=2) < self.z_plan[self.i_plan][torch.tensor([3])]:
            if self.i_plan + 1 == self.z_plan.shape[0]:
                self.i_plan = 0
            else:
                self.i_plan = self.i_plan + 1
            self.get_logger().info('New goal: ' +
                                   str(self.z_plan[self.i_plan]))
            if self.rviz == True:
                goalMsg = PointStamped()
                if self.use_sim_time:
                    goalMsg.header.frame_id = "odom"
                else:
                    goalMsg.header.frame_id = "map"
                goalMsg.header.stamp = self.get_clock().now().to_msg()
                goalMsg.point.x = self.z_plan[self.i_plan][0].item()
                goalMsg.point.y = self.z_plan[self.i_plan][1].item()
                goalMsg.point.z = 0.0
                self.goalPublisher_.publish(goalMsg)

        a_alpha_tau = torch.tensor(
            [100., 1000.],
            dtype=torch.float)  # small preference for going forward initially
        a_beta_tau = torch.tensor([10., 1000.], dtype=torch.float)

        # Plan next action(s)
        while self.est_planning_epoch_time.__lt__(
                Duration(nanoseconds=0.80 *
                         self.planning_timer.time_until_next_call())
        ):  # check if there is sufficient time to run again
            tic_true = time.time()

            tic = self.get_clock().now()
            if not self.use_sim_time:
                try:
                    self.z_meas = self.getPoseFromTF()
                except Exception as e:
                    self.get_logger().info('Error: ' + str(e))

            z_meas_tau = self.z_meas
            z_L_tau = self.meas_L
            knownPlannedPaths_tau_i = self.knownPlannedPaths  # does a good job at avoiding concurrent access but not entirely safe..

            # calculate dt to next timestep:
            time_left = torch.tensor(
                [self.planning_timer.time_until_next_call() / (10**9)],
                dtype=torch.float)  # s
            planning_time = torch.tensor(
                [self.est_planning_epoch_time.nanoseconds / (10**9)],
                dtype=torch.float)  # s
            time_left_from_predicted_pose = time_left - planning_time
            z_current_dist = dist.MultivariateNormal(z_meas_tau,
                                                     scale_tril=z_L_tau)

            # Run SVI
            losses = []
            for svi_epoch in range(self.svi_epochs):
                losses.append(
                    self.svi.step(self,
                                  z_current_dist,
                                  self.a_current,
                                  self.deltaT,
                                  self.z_plan[self.i_plan],
                                  knownPlannedPaths_tau_i,
                                  planning_time,
                                  time_left_from_predicted_pose,
                                  T1=self.tau,
                                  T2=self.tau + self.Tau_span))

            # Send msg to other robots
            a_alpha = torch.zeros(self.Tau_span, 2)
            a_beta = torch.zeros(self.Tau_span, 2)
            for Tau in range(self.tau, self.tau + self.Tau_span):
                a_alpha[Tau - self.tau, :] = pyro.param(
                    "a_alpha_{}".format(Tau)).detach()
                a_beta[Tau - self.tau, :] = pyro.param(
                    "a_beta_{}".format(Tau)).detach()

            msgPlannedActions = self.packPlannedActionsMsg(
                z_meas_tau, z_L_tau, self.a_current, a_alpha, a_beta,
                planning_time, time_left_from_predicted_pose)
            self.plannedActionsPublisher_.publish(msgPlannedActions)

            if self.rviz == True:
                # Generate samples of predicted path:
                N = 100
                z_tmp = torch.zeros(N, self.Tau_span, 3)
                for i in range(N):
                    z_tmp[i, :, :] = motionModel(z_current_dist,
                                                 self.a_current,
                                                 a_alpha,
                                                 a_beta,
                                                 self.deltaT,
                                                 planning_time,
                                                 time_left_from_predicted_pose,
                                                 T1=self.tau,
                                                 T2=self.tau + self.Tau_span)

                self.publishPathPointCloud(z_tmp)

            if self.Log_DATA:
                # data to save
                logDict = {
                    "timestamp": tic_true,  # int
                    "tau": self.tau,  # int
                    "counter": counter,  # int
                    "losses": losses,  # [] list
                    "z_meas_tau":
                    z_meas_tau,  # torch.tensor([0,0,0],dtype=torch.float)
                    "z_L":
                    z_L_tau,  # z_cov = torch.diag(torch.tensor([1.,1.,1.],dtype=torch.float))
                    "a_current":
                    self.a_current,  # torch.tensor([0., 0.],dtype=torch.float)
                    "a_alpha": a_alpha,  # torch.zeros(self.Tau_span,2)
                    "a_beta": a_beta,  # torch.zeros(self.Tau_span,2)
                    "z_tmp": z_tmp,  # torch.zeros(N,self.Tau_span,3)
                    "currentGoal": self.z_plan[self.i_plan],
                    "tau_span": self.Tau_span,
                    "r_plan": self.r_plan,
                    "radius": self.radius
                }
                pickle.dump(logDict, self.logFileObject)

            # Draw actions
            # Get action distribution parameters
            a_alpha_tau = pyro.param("a_alpha_{}".format(self.tau)).detach()
            a_beta_tau = pyro.param("a_beta_{}".format(self.tau)).detach()

            # Sample random action from p(a_tau|O=1,C=0)
            a_tau = torch.distributions.Beta(a_alpha_tau,
                                             a_beta_tau).rsample().detach()

            # Alternatively, choose mean action...
            a_tau[0] = a_alpha_tau[0] / (a_alpha_tau[0] + a_beta_tau[0])
            a_tau[1] = a_alpha_tau[1] / (a_alpha_tau[1] + a_beta_tau[1])

            self.a_next = a_tau  # save current estimate of next action to take

            self.a_current = self.a_next
            a = action_transforme(
                self.a_current)  # scale actions appropriately

            msgVel = Twist()
            msgVel.linear.x = a[0].item()
            msgVel.angular.z = a[1].item()
            self.velPublisher_.publish(msgVel)

            # estimate execution time by Cumulative moving average
            toc = self.get_clock().now()
            toc_true = time.time() - tic_true
            dur = toc.__sub__(tic).nanoseconds
            if not (dur >= int(self.deltaT * (10**9))):
                dur_curr_est = self.est_planning_epoch_time.nanoseconds
                dur_new_est = (self.N_samples_epoch_time * dur_curr_est +
                               dur) / (self.N_samples_epoch_time + 1
                                       )  # Cumulative moving average
                self.est_planning_epoch_time = Duration(
                    nanoseconds=dur_new_est)
                self.N_samples_epoch_time = self.N_samples_epoch_time + 1

            counter = counter + 1

        self.get_logger().info('N iterations: ' + str(counter) +
                               '  est_planning_epoch_time: ' +
                               str(self.est_planning_epoch_time.nanoseconds /
                                   10**6) + "    goal: " +
                               str(self.z_plan[self.i_plan]) + " x: " +
                               str(self.z_meas))
        time_left = self.planning_timer.time_until_next_call()
        if time_left < 0:
            self.get_logger().warn("Planning did not finish in time - spend " +
                                   str(-1. * time_left / 10**6) +
                                   " ms to much")
예제 #5
0
    def __init__(self):
        super().__init__('velocity_publisher')

        self.get_logger().info(
            'Initializing Co-operative shuttlebus controller')
        # self.group = ReentrantCallbackGroup() # to allow callbacks to be executed in parallel
        self.group = MutuallyExclusiveCallbackGroup()

        self.use_sim_time = self.get_parameter(
            'use_sim_time').get_parameter_value().bool_value
        self.get_logger().info("use_sim_time: " + str(self.use_sim_time))

        self.ROBOTNAMESPACE = os.environ['ROBOTNAMESPACE']

        self.rviz = True
        self.Log_DATA = True

        # size of robots as a radius of minimum spanning disk
        # https://emanual.robotis.com/docs/en/platform/turtlebot3/specifications/
        # the wheels on the robot is placed in the front. The robot is 138mm long and 178mm wide, thus asssuming the wheel to be placed in the front an conservative estimate is
        # from https://www.robotis.us/turtlebot-3-burger-us/ a more appropriate value would be 105 mm
        self.radius = 0.138

        self.z_meas = torch.tensor([0, 0, 0], dtype=torch.float)
        self.deltaT = torch.tensor([1.0], dtype=torch.float)  # in seconds
        self.Tau_span = 4  # prediction horizon
        self.svi_epochs = 10
        if self.use_sim_time:
            self.covariance_scale = torch.tensor([1.0], dtype=torch.float)
        else:
            self.covariance_scale = torch.tensor(
                [0.1], dtype=torch.float
            )  # the covariances from AMCL seems very conservative - probably due to a need for population error in the particle filter. thus we scale it to be less conservative

        self.r_plan = 0.2

        if self.ROBOTNAMESPACE == "turtlebot1":
            self.z_plan = torch.tensor([[2.0000, 1.0000, 0.0000, self.r_plan],
                                        [0.0000, 1.0000, 0.0000, self.r_plan]],
                                       dtype=torch.float)
        elif self.ROBOTNAMESPACE == "turtlebot2":
            self.z_plan = torch.tensor([[1.5000, 1.8660, 0.0000, self.r_plan],
                                        [0.5000, 0.1340, 0.0000, self.r_plan]],
                                       dtype=torch.float)
        elif self.ROBOTNAMESPACE == "turtlebot3":
            self.z_plan = torch.tensor([[0.5000, 1.8660, 0.0000, self.r_plan],
                                        [1.5000, 0.1340, 0.0000, self.r_plan]],
                                       dtype=torch.float)
        else:
            self.z_plan = torch.tensor(
                [[2.99, 2.57, 0, self.r_plan], [2.01, 1.93, 0, self.r_plan],
                 [1.65, 1.12, 0, self.r_plan], [1.25, 0.65, 0, self.r_plan],
                 [0.039, -0.18, 0, self.r_plan], [1.25, 0.65, 0, self.r_plan],
                 [1.65, 1.12, 0, self.r_plan], [2.01, 1.93, 0, self.r_plan]],
                dtype=torch.float)

        self.i_plan = 0

        self.meas_L = torch.diag(
            torch.tensor([1., 1., 1.], dtype=torch.float)
        )  # cholesky decomposition of measurement model covariance - this is just a temporary assignment it is reassigned in the meas callback

        # setup the inference algorithm
        optim_args = {"lr": 0.05}
        optimizer = pyro.optim.Adam(optim_args)
        self.svi = pyro.infer.SVI(model=decisionModel,
                                  guide=decisionGuide,
                                  optim=optimizer,
                                  loss=pyro.infer.Trace_ELBO(num_particles=1))

        self.get_logger().info('Finished SVI setup')

        self.est_planning_epoch_time = Duration(
            nanoseconds=(self.deltaT[0] / 3.) * 10**
            9)  #variable to save Cumulative moving average of execution times
        self.N_samples_epoch_time = 1

        self.a_current = torch.tensor([0.0, 0.0], dtype=torch.float)
        self.a_next = torch.tensor(
            [0.0, 0.0], dtype=torch.float
        )  # variable to save the current estimate of next action

        if not self.use_sim_time:  # to get current pose from tf tree
            self.tfbuffer = tf2_ros.Buffer()
            self.tflistener = tf2_ros.TransformListener(self.tfbuffer, self)

        if self.use_sim_time:
            self.poseSubscription = self.create_subscription(
                Odometry,
                'odom',
                self.pose_callback,
                1,
                callback_group=self.group)
            self.poseSubscription  # prevent unused variable warning
        else:
            self.poseSubscription = self.create_subscription(
                PoseWithCovarianceStamped,
                'amcl_pose',
                self.pose_callback,
                1,
                callback_group=self.group)
            self.poseSubscription  # prevent unused variable warning

        self.subscriberManager_timer = self.create_timer(
            2, self.subscriberManager_callback, callback_group=self.group)
        self.plannedActions_subscriptions = {}
        self.knownPlannedPaths = {}

        self.get_logger().info('Created subscriptions')

        self.plannedActionsPublisher_ = self.create_publisher(
            PlannedActions, 'plannedActions', 1, callback_group=self.group)

        if self.rviz == True:
            self.plannedPathPublisher_rviz = {}
            for tau in range(self.Tau_span):
                #self.plannedPathPublisher_rviz[tau] = self.create_publisher(PoseWithCovarianceStamped, 'Path_rviz_'+str(tau), 1, callback_group=self.group)
                self.plannedPathPublisher_rviz[tau] = self.create_publisher(
                    PointCloud,
                    'Path_PointCloud_' + str(tau),
                    1,
                    callback_group=self.group)

        self.goalPublisher_ = self.create_publisher(PointStamped,
                                                    'currentGoal',
                                                    1,
                                                    callback_group=self.group)
        goalMsg = PointStamped()
        if self.use_sim_time:
            goalMsg.header.frame_id = "odom"
        else:
            goalMsg.header.frame_id = "map"
        goalMsg.header.stamp = self.get_clock().now().to_msg()
        goalMsg.point.x = self.z_plan[self.i_plan][0].item()
        goalMsg.point.y = self.z_plan[self.i_plan][1].item()
        goalMsg.point.z = 0.0
        self.goalPublisher_.publish(goalMsg)

        self.velPublisher_ = self.create_publisher(Twist, 'publishedVel', 1)

        self.get_logger().info('Created publishers')

        planning_timer_period = self.deltaT.item()  # seconds
        self.planning_timer = self.create_timer(planning_timer_period,
                                                self.planning_callback,
                                                callback_group=self.group)

        # align planning_timer for all robots
        self.t0 = alignTimer(self.get_clock(), self.planning_timer)

        self.tau, mod = divmod(self.t0.nanoseconds,
                               int(self.deltaT.item() * (10**9)))
        if mod > self.deltaT.item() * (10**9) / 2:  # divmod rounds down
            self.tau = self.tau + 1

        self.get_logger().info('Controller Initialized')
        self.get_logger().info('z_0' + str(self.z_meas))

        if self.Log_DATA:
            outdir = '/root/DATA/logs/' + self.ROBOTNAMESPACE
            if not os.path.exists(outdir):
                os.mkdir(outdir)

            filename = self.ROBOTNAMESPACE + "-" + datetime.now().strftime(
                "%d_%m_%Y-%H_%M_%S") + ".pkl"
            logFilePath = os.path.join(outdir, filename)
            self.logFileObject = open(logFilePath, 'ab')
            self.get_logger().info('Data Logging Initialized')
예제 #6
0
def to_s(duration):
    return Duration.from_msg(duration).nanoseconds / 1e9
예제 #7
0
import pytest
import rclpy
from rclpy.clock import Clock
from rclpy.clock import ClockType
from rclpy.clock import JumpHandle
from rclpy.clock import JumpThreshold
from rclpy.clock import ROSClock
from rclpy.context import Context
from rclpy.duration import Duration
from rclpy.exceptions import NotInitializedException
from rclpy.time import Time
from rclpy.utilities import get_default_context

from .mock_compat import __name__ as _  # noqa: ignore=F401

A_SMALL_AMOUNT_OF_TIME = Duration(seconds=0.5)


def test_invalid_jump_threshold():
    with pytest.raises(ValueError, match='.*min_forward.*'):
        JumpThreshold(min_forward=Duration(nanoseconds=0),
                      min_backward=Duration(nanoseconds=-1))

    with pytest.raises(ValueError, match='.*min_forward.*'):
        JumpThreshold(min_forward=Duration(nanoseconds=-1),
                      min_backward=Duration(nanoseconds=-1))

    with pytest.raises(ValueError, match='.*min_backward.*'):
        JumpThreshold(min_forward=Duration(nanoseconds=1),
                      min_backward=Duration(nanoseconds=0))
예제 #8
0
 def run():
     nonlocal retval
     retval = clock.sleep_until(clock.now() + Duration(seconds=10),
                                context=non_default_context)
예제 #9
0
 def run():
     nonlocal retval
     retval = clock.sleep_for(Duration(seconds=10),
                              context=non_default_context)
예제 #10
0
 def run():
     nonlocal retval
     retval = clock.sleep_until(clock.now() + Duration(seconds=10))
예제 #11
0
 def run():
     nonlocal retval
     retval = clock.sleep_for(Duration(seconds=10))
예제 #12
0
def test_sleep_for_invalid_context():
    clock = Clock()
    with pytest.raises(NotInitializedException):
        clock.sleep_for(Duration(seconds=0.1), context=Context())
예제 #13
0
def test_sleep_for_non_default_context(non_default_context):
    clock = Clock()
    assert clock.sleep_for(Duration(seconds=0.1), context=non_default_context)
예제 #14
0
def test_sleep_until_non_default_context(non_default_context):
    clock = Clock()
    assert clock.sleep_until(clock.now() + Duration(seconds=0.1),
                             context=non_default_context)
예제 #15
0
class BallManager(Node):

    DELETE_BALL_DURATION = Duration(seconds=5.0)

    SPAWN_BALL_DURATION = 10.0
    TOTAL_BALL_COUNT = 1

    GAME_STARTED_DURATION = 30.0
    GAME_STARTING_DURATION = 30.0
    GAME_PAUSED_DURATION = 120.0

    MODEL_NAMES = [
        "zenith_camera",
        "tennis_court",
        "player_1",
        "player_1_collision",
        "player_2",
        "player_2_collision"
    ]

    def __init__(self):
        super().__init__("ball_manager")
        self.score = 0.0
        self.ball_id = 0
        self.balls = dict()
        self.game_status = 42
        self.next_status_stamp = None
        self.robot_pose = None
        self.robot_model_name = None
        self.ball_description_file = os.path.join(get_package_share_directory("tennis_court"), "urdf", "ball.urdf.xacro")

        qos_profile = QoSProfile(
            history=HistoryPolicy.KEEP_LAST, depth=1,
            reliability=ReliabilityPolicy.BEST_EFFORT,
            durability=DurabilityPolicy.TRANSIENT_LOCAL
        )

        # Ball manager stats publisher
        self.stats_pub = self.create_publisher(BallManagerStats, "/ball_manager_stats", qos_profile)

        # Game status publisher
        self.game_status_pub = self.create_publisher(GameStatus, "/game_status", qos_profile)
        self.game_status_pub.publish(GameStatus(is_started=False))

        # Spawn entity client
        self.spawn_entity_client = self.create_client(SpawnEntity, "/spawn_entity")
        while not self.spawn_entity_client.wait_for_service(timeout_sec=2.0):
            self.get_logger().warn("Service '/spawn_entity' not available, waiting...")

        # Delete entity client
        self.delete_entity_client = self.create_client(DeleteEntity, "/delete_entity")
        while not self.delete_entity_client.wait_for_service(timeout_sec=2.0):
            self.get_logger().warn("Service '/delete_entity' not available, waiting...")

        # Set entity state client
        self.set_entity_state_client = self.create_client(SetEntityState, "/set_entity_state")
        while not self.set_entity_state_client.wait_for_service(timeout_sec=2.0):
            self.get_logger().warn("Service '/set_entity_state' not available, waiting...")

        # Model states subscriber
        self.model_states_sub = self.create_subscription(ModelStates, "/model_states", self.on_model_states, 10)

        # Ball spawner timer
        self.spawn_ball_timer = None

        # Publish stats timer
        self.publish_stats_timer = self.create_timer(0.1, self.publish_stats)

        # Game status timer
        self.game_status_timer = None

        self.get_logger().info("Ball manager initialized")

    def publish_stats(self):
        time_left = 0.0
        if self.next_status_stamp is not None:
            time_left = max(0.0, (self.next_status_stamp - self.get_clock().now()).nanoseconds * 1e-9)
        in_safe_region = True
        if self.robot_pose is not None:
            in_safe_region = self.is_robot_in_safe_region(self.robot_pose.position)
            if self.game_status == BallManagerStats.GAME_STARTED and not in_safe_region:
                self.score -= 1.0
            elif self.game_status in [BallManagerStats.GAME_STARTED, BallManagerStats.GAME_STARTING] and in_safe_region:
                self.score += 0.1
        self.stats_pub.publish(BallManagerStats(
            score=int(round(self.score)),
            current_ball_count=self.get_ball_count(),
            total_ball_count=len(self.balls),
            game_status=self.game_status,
            status_time_left=time_left,
            robot_is_safe=in_safe_region
        ))

    def get_ball_count(self):
        return len([ball for ball in self.balls.values() if ball.status == Ball.STATUS_SPAWNED])

    def spawn_ball(self):
        self.spawn_ball_timer.cancel()

        self.ball_id += 1
        ball = Ball(self.ball_id)
        self.balls[ball.id] = ball

        xacro_mappings = {
            "vel_x": str(ball.initial_velocity.linear.x),
            "vel_y": str(ball.initial_velocity.linear.y),
            "vel_z": str(ball.initial_velocity.linear.z)
        }
        urdf_description = xacro.process_file(self.ball_description_file, mappings=xacro_mappings).toxml()
        with open("/tmp/test.urdf", "w") as s:
            s.write(urdf_description)
        self.spawn_entity_client.call_async(SpawnEntity.Request(
            name=ball.name,
            xml=urdf_description,
            initial_pose=ball.initial_pose
        ))
        self.get_logger().info(f"Ball {ball.id} spawned")
        ball.set_spawned(self.get_clock().now())

    def set_robot_model(self, model_name):
        """
        :type model_name: str
        """
        self.get_logger().info(f"Tracking robot {model_name}")
        self.robot_model_name = model_name
        self.game_status = BallManagerStats.GAME_STARTING
        self.next_status_stamp = self.get_clock().now() + Duration(seconds=self.GAME_STARTING_DURATION)
        self.game_status_timer = self.create_timer(self.GAME_STARTING_DURATION, self.game_status_update)
        self.game_status_pub.publish(GameStatus(is_started=True))
        if len(self.balls) < self.TOTAL_BALL_COUNT:
            self.spawn_ball_timer = self.create_timer(self.GAME_STARTING_DURATION * 0.2, self.spawn_ball)

    def game_status_update(self):
        self.game_status_timer.cancel()
        duration = 0.0
        if self.game_status == BallManagerStats.GAME_STARTING: # GAME_STARTED
            self.game_status = BallManagerStats.GAME_STARTED
            duration = self.GAME_STARTED_DURATION
            if len(self.balls) < self.TOTAL_BALL_COUNT:
                self.spawn_ball_timer = self.create_timer(duration * 0.2, self.spawn_ball)
        elif self.game_status == BallManagerStats.GAME_STARTED: # GAME_PAUSED
            self.game_status_pub.publish(GameStatus(is_started=False))
            self.game_status = BallManagerStats.GAME_PAUSED
            duration = self.GAME_PAUSED_DURATION
        elif self.game_status == BallManagerStats.GAME_PAUSED: # GAME_STARTING
            self.game_status_pub.publish(GameStatus(is_started=True))
            self.game_status = BallManagerStats.GAME_STARTING
            duration = self.GAME_STARTING_DURATION
            if len(self.balls) < self.TOTAL_BALL_COUNT:
                self.spawn_ball_timer = self.create_timer(duration * 0.2, self.spawn_ball)
        self.next_status_stamp = self.get_clock().now() + Duration(seconds=duration)
        self.game_status_timer = self.create_timer(duration, self.game_status_update)

    def on_model_states(self, model_states):
        """
        :type model_states: ModelStates
        """
        timestamp = self.get_clock().now()
        for model_name, model_pose in zip(model_states.name, model_states.pose):
            if not model_name.startswith("ball"):
                if self.robot_model_name is None and model_name not in self.MODEL_NAMES:
                    self.set_robot_model(model_name)
                if model_name == self.robot_model_name:
                    self.robot_pose = model_pose
                continue
            ball_id = int(model_name[4:])
            ball = self.balls[ball_id]
            if ball.status != Ball.STATUS_SPAWNED:
                continue
            in_region = self.is_ball_in_gathering_region(model_pose.position)
            if in_region and not ball.in_region:
                self.get_logger().info(f"Ball {ball_id} entered region")
            if not in_region and ball.in_region:
                self.get_logger().info(f"Ball {ball_id} exited region")
            region_time = ball.get_region_time(in_region, timestamp)
            if region_time > self.DELETE_BALL_DURATION:
                self.delete_ball(ball_id)

    def delete_ball(self, ball_id):
        """
        :type ball_id: int
        """
        ball = self.balls[ball_id]
        self.delete_entity_client.call_async(DeleteEntity.Request(
            name=ball.name
        ))
        ball.set_destroyed(self.get_clock().now())
        self.score += self.compute_score(ball.get_lifespan())
        self.get_logger().info(f"Ball {ball_id} deleted")
        if self.get_ball_count() == 0 and len(self.balls) == self.TOTAL_BALL_COUNT:
            self.get_logger().info(f"End of simulation, congratulations :)")
            self.destroy_node()

    def compute_score(self, lifespan):
        """
        :type lifespan: Duration
        """
        now = self.get_clock().now().nanoseconds * 1e-9
        lifespan_sec = lifespan.nanoseconds * 1e-9
        global_score = int(10000.0 / now)
        ball_score = int(20000.0 / lifespan_sec)
        self.get_logger().info(f"Took {round(now, 3)} seconds to collect ball with a lifespan of {round(lifespan_sec, 3)} seconds. " +
                               f"Got global time score of {global_score} and ball score of {ball_score}")
        return float(global_score + ball_score)

    @staticmethod
    def is_ball_in_gathering_region(point):
        """
        :type point: Point
        """
        region_size = Point(x=2.224250, y=2.631040, z=2.0)
        region_1_center = Point(x=-6.85, y=-13.65, z=1.0)
        region_2_center = Point(x=6.85, y=13.65, z=1.0)
        if region_1_center.x - region_size.x / 2.0 <= point.x <= region_1_center.x + region_size.x / 2.0 and \
            region_1_center.y - region_size.y / 2.0 <= point.y <= region_1_center.y + region_size.y / 2.0 and \
            region_1_center.z - region_size.z / 2.0 <= point.z <= region_1_center.z + region_size.z / 2.0:
            return True
        if region_2_center.x - region_size.x / 2.0 <= point.x <= region_2_center.x + region_size.x / 2.0 and \
            region_2_center.y - region_size.y / 2.0 <= point.y <= region_2_center.y + region_size.y / 2.0 and \
            region_2_center.z - region_size.z / 2.0 <= point.z <= region_2_center.z + region_size.z / 2.0:
            return True
        return False

    @staticmethod
    def is_robot_in_safe_region(point):
        """
        :type point: Point
        """
        region_size = Point(x=2.281020, y=21.2145, z=4.0)
        region_1_center = Point(x=-6.85, y=0.0, z=1.0)
        region_2_center = Point(x=6.85, y=0.0, z=1.0)
        if region_1_center.x - region_size.x / 2.0 <= point.x <= region_1_center.x + region_size.x / 2.0 and \
            region_1_center.y - region_size.y / 2.0 <= point.y <= region_1_center.y + region_size.y / 2.0 and \
            region_1_center.z - region_size.z / 2.0 <= point.z <= region_1_center.z + region_size.z / 2.0:
            return True
        if region_2_center.x - region_size.x / 2.0 <= point.x <= region_2_center.x + region_size.x / 2.0 and \
            region_2_center.y - region_size.y / 2.0 <= point.y <= region_2_center.y + region_size.y / 2.0 and \
            region_2_center.z - region_size.z / 2.0 <= point.z <= region_2_center.z + region_size.z / 2.0:
            return True
        return False
예제 #16
0
def main(args=None):
    # Argument parsing and usage
    parser = get_parser()
    parsed_args = parser.parse_args()

    # Configuration variables
    qos_policy_name = parsed_args.incompatible_qos_policy_name
    qos_profile_publisher = QoSProfile(depth=10)
    qos_profile_subscription = QoSProfile(depth=10)

    if qos_policy_name == 'durability':
        print('Durability incompatibility selected.\n'
              'Incompatibility condition: publisher durability kind <'
              'subscripition durability kind.\n'
              'Setting publisher durability to: VOLATILE\n'
              'Setting subscription durability to: TRANSIENT_LOCAL\n')
        qos_profile_publisher.durability = \
            QoSDurabilityPolicy.VOLATILE
        qos_profile_subscription.durability = \
            QoSDurabilityPolicy.TRANSIENT_LOCAL
    elif qos_policy_name == 'deadline':
        print(
            'Deadline incompatibility selected.\n'
            'Incompatibility condition: publisher deadline > subscription deadline.\n'
            'Setting publisher durability to: 2 seconds\n'
            'Setting subscription durability to: 1 second\n')
        qos_profile_publisher.deadline = Duration(seconds=2)
        qos_profile_subscription.deadline = Duration(seconds=1)
    elif qos_policy_name == 'liveliness_policy':
        print('Liveliness Policy incompatibility selected.\n'
              'Incompatibility condition: publisher liveliness policy <'
              'subscripition liveliness policy.\n'
              'Setting publisher liveliness policy to: AUTOMATIC\n'
              'Setting subscription liveliness policy to: MANUAL_BY_TOPIC\n')
        qos_profile_publisher.liveliness = \
            QoSLivelinessPolicy.AUTOMATIC
        qos_profile_subscription.liveliness = \
            QoSLivelinessPolicy.MANUAL_BY_TOPIC
    elif qos_policy_name == 'liveliness_lease_duration':
        print(
            'Liveliness lease duration incompatibility selected.\n'
            'Incompatibility condition: publisher liveliness lease duration >'
            'subscription liveliness lease duration.\n'
            'Setting publisher liveliness lease duration to: 2 seconds\n'
            'Setting subscription liveliness lease duration to: 1 second\n')
        qos_profile_publisher.liveliness_lease_duration = Duration(seconds=2)
        qos_profile_subscription.liveliness_lease_duration = Duration(
            seconds=1)
    elif qos_policy_name == 'reliability':
        print(
            'Reliability incompatibility selected.\n'
            'Incompatibility condition: publisher reliability < subscripition reliability.\n'
            'Setting publisher reliability to: BEST_EFFORT\n'
            'Setting subscription reliability to: RELIABLE\n')
        qos_profile_publisher.reliability = \
            QoSReliabilityPolicy.BEST_EFFORT
        qos_profile_subscription.reliability = \
            QoSReliabilityPolicy.RELIABLE
    else:
        print('{name} not recognised.'.format(name=qos_policy_name))
        parser.print_help()
        return 0

    # Initialization and configuration
    rclpy.init(args=args)
    topic = 'incompatible_qos_chatter'
    num_msgs = 5

    publisher_callbacks = PublisherEventCallbacks(
        incompatible_qos=lambda event: get_logger('Talker').info(str(event)))
    subscription_callbacks = SubscriptionEventCallbacks(
        incompatible_qos=lambda event: get_logger('Listener').info(str(event)))

    try:
        talker = Talker(topic,
                        qos_profile_publisher,
                        event_callbacks=publisher_callbacks,
                        publish_count=num_msgs)
        listener = Listener(topic,
                            qos_profile_subscription,
                            event_callbacks=subscription_callbacks)
    except UnsupportedEventTypeError as exc:
        print()
        print(exc, end='\n\n')
        print('Please try this demo using a different RMW implementation')
        return 1

    executor = SingleThreadedExecutor()
    executor.add_node(listener)
    executor.add_node(talker)

    try:
        while talker.publish_count < num_msgs:
            executor.spin_once()
    except KeyboardInterrupt:
        pass
    except ExternalShutdownException:
        return 1
    finally:
        rclpy.try_shutdown()

    return 0
예제 #17
0
 def get_region_time(self, in_region, timestamp):
     """
     :type in_region: bool
     :type timestamp: Clock
     """
     self.in_region = in_region
     if self.enter_region_time is None and in_region:
         self.enter_region_time = timestamp
     if not self.in_region and self.enter_region_time is not None:
         self.enter_region_time = None
     return timestamp - self.enter_region_time if self.enter_region_time is not None else Duration()
예제 #18
0
    def transform_lookup(target: str, source: str) -> ndarray:
        MAX_TRIES = 10
        attempt = 1
        while attempt < MAX_TRIES:
            try:
                transform = tf_buffer.lookup_transform(target, source, Time(), timeout=Duration(seconds=0.5))
            except LookupException:
                attempt += 1
                if attempt >= MAX_TRIES:
                    raise
                print("Couldn't transform, retrying...")
            else:
                break
        transform = transform.transform
        rot = transform.rotation
        trans = transform.translation

        rot_matrix = tf.quaternion_matrix((rot.w, rot.x, rot.y, rot.z))
        trans_matrix = tf.translation_matrix((trans.x, trans.y, trans.z))

        # WARNING! Correct order of multiplication! trans * rot
        return tf.concatenate_matrices(trans_matrix, rot_matrix)
예제 #19
0
 def __init__(self, fs, queue_size, slop, allow_headerless=False):
     TimeSynchronizer.__init__(self, fs, queue_size)
     self.slop = Duration(seconds=slop)
     self.allow_headerless = allow_headerless
예제 #20
0
 def send_text(self, text):
     self.speak_msg.text = text
     self.speak_pub.publish(self.speak_msg)
     # don't send it multiple times
     self.get_clock().sleep_for(Duration(seconds=0.1))