Esempio n. 1
0
    def execute(self):

        ##########################################
        #                 GOAL i                 #
        ##########################################

        goal = GoalObject({}, 15.0)

        print "request_goal"
        self.request_goal(goal)

        print "wait_goal_result"
        self.wait_goal_result()

        ##########################################
        #    CHECK RESULT i AND UPDATE SCORE     #
        ##########################################

        if goal.has_timed_out():
            print "GOAL TIMEOUT"
        elif goal.has_been_completed():
            print "GOAL COMPLETED:"
            result = goal.get_result()
            print "result:\n", result
        else:
            print "GOAL NOT COMPLETED"

        self.save_and_publish_score()

        if not self.is_benchmark_running():
            print self.get_end_description()

            if self.has_benchmark_timed_out():
                print "BENCHMARK TIMEOUT"
            elif self.has_benchmark_been_stopped():
                print "BENCHMARK STOPPED"
            else:
                print "BENCHMARK ABORTED"
Esempio n. 2
0
    def execute(self):

        start_benchmark_time = rospy.Time.now()

        print "params:\n", self.params

        # init tf and subscribers
        tf_listener = tf.TransformListener()
        tf_broadcaster = tf.TransformBroadcaster()

        # Get the parameters of the benchmark (waypoints, penalty_time, timeout_time)
        starting_robot_pose = Pose2D(*self.params["starting_pose"])
        waypoints = self.params["waypoints"]
        goal_timeout = self.params["goal_timeout"]

        # Variables to compute score
        N = len(waypoints)
        i = 0
        sum_d = 0.0  # [m]
        sum_sin = 0.0
        sum_cos = 0.0
        execution_time = rospy.Duration(0.0)  # [s]
        timed_out_waypoints = 0
        reached_waypoints = 0

        # acquire markerset-robot transform from config
        team_name = self.get_benchmark_team()
        transform_path = path.join(
            path.join(rospy.get_param("~resources_directory"), "transforms"),
            "transform-%s.yaml" % team_name)

        with open(transform_path, 'r') as transform_file:
            markerset_to_robot_transform = yaml.load(
                transform_file)["markerset_to_robot_transform"]

        print "team_name: ", team_name, "\ttransform_path: ", transform_path, "\tmarkerset_to_robot_transform: ", markerset_to_robot_transform

        while self.is_benchmark_running() and i < N:

            self.score["segment_%i" % (i + 1)] = {}

            ##########################################
            #                 GOAL i                 #
            ##########################################

            goal = GoalObject(waypoints[i], goal_timeout)

            print "request_goal"
            self.request_goal(goal)

            start_segment_time = rospy.Time.now()
            self.score["segment_%i" % (i + 1)]["start_segment_time[s]"] = (
                start_segment_time - start_benchmark_time).to_sec()
            self.save_and_publish_score()

            print "wait_goal_result"
            self.wait_goal_result()

            end_segment_time = rospy.Time.now()
            self.score["segment_%i" % (i + 1)]["end_segment_time[s]"] = (
                end_segment_time - start_benchmark_time).to_sec()
            self.save_and_publish_score()

            segment_time = end_segment_time - start_segment_time
            execution_time += segment_time

            self.score["segment_%i" %
                       (i + 1)]["segment_time[s]"] = segment_time.to_sec()
            self.save_and_publish_score()
            rospy.loginfo("segment_time: %f" % segment_time.to_sec())

            ##########################################
            #    CHECK RESULT i AND UPDATE SCORE     #
            ##########################################

            self.score["segment_%i" %
                       (i + 1)]["timeout"] = goal.has_timed_out()
            self.score["segment_%i" %
                       (i + 1)]["completed"] = goal.has_been_completed()

            if goal.has_timed_out():
                print "GOAL TIMEOUT"
                timed_out_waypoints += 1
            elif goal.has_been_completed():
                print "GOAL COMPLETED:"
                result = goal.get_result()
                reached_waypoints += 1
                print "result:\n", result
                self.score["segment_%i" % (i + 1)]["result"] = result
            else:
                print "GOAL NOT COMPLETED"

            self.save_and_publish_score()

            # Collect the target and robot poses (using tf)
            target_pose = Pose2D(*waypoints[i])
            robot_pose = None

            robot_pose = Pose2D_from_tf_concatenation(
                self,
                "/robot",
                markerset_to_robot_transform[0],
                markerset_to_robot_transform[1],
                "/robot_markerset",
                "/testbed_origin",
                tf_broadcaster,
                tf_listener,
                max_failed_attempts=5)

            # If the pose could not be acquired, ask the RefOp if the waypoint should be skipped or try again
            while robot_pose == None:
                rospy.logwarn(
                    "The position of the robot markerset could not be acquired"
                )

                manual_operation_3 = ManualOperationObject(
                    "The position of the robot markerset could not be acquired!\nTry again or skip the waypoint? type 's' for skip, anything else to try again"
                )

                self.request_manual_operation(manual_operation_3)

                if manual_operation_3.has_been_completed():
                    print "First Manual Operation result: %s" % manual_operation_3.get_result(
                    )

                    if manual_operation_3.get_result() == 's':
                        rospy.logwarn("Waypoint %i SKIPPED" % (i + 1))
                        break

                    else:
                        rospy.logwarn("Checking tracking again")

                else:
                    print "First Manual Operation NOT EXECUTED"

                if not self.is_benchmark_running():
                    if self.has_benchmark_timed_out():
                        print "BENCHMARK TIMEOUT"
                        return
                    elif self.has_benchmark_been_stopped():
                        print "BENCHMARK STOPPED"
                        return
                    else:
                        print "BENCHMARK ABORTED"
                        return

                robot_pose = Pose2D_from_tf_concatenation(
                    self,
                    "/robot",
                    markerset_to_robot_transform[0],
                    markerset_to_robot_transform[1],
                    "/robot_markerset",
                    "/testbed_origin",
                    tf_broadcaster,
                    tf_listener,
                    max_failed_attempts=5)

            self.score["segment_%i" %
                       (i + 1)]["target_pose"] = yaml.dump(target_pose)
            self.score["segment_%i" %
                       (i + 1)]["robot_pose"] = yaml.dump(robot_pose)

            robot_pose_acquired = robot_pose != None
            print "\n\n\n\n\n\nrobot_pose:\n", robot_pose, "\n\n\n\n\n\n"

            if robot_pose_acquired:
                # Evaluate position and orientation error between target pose (tp) and robot pose (rp)
                rp = robot_pose  # robot position
                tp = target_pose  # target position

                d_error = sqrt((rp.x - tp.x)**2 + (rp.y - tp.y)**2)
                sum_d = sum_d + d_error

                sin_error = sin(fabs(rp.theta - tp.theta))
                sum_sin += sin_error
                cos_error = cos(fabs(rp.theta - tp.theta))
                sum_cos += cos_error

                self.score["segment_%i" % (i + 1)]["distance_error"] = d_error
                self.score["segment_%i" %
                           (i + 1)]["orientation_error"] = atan2(
                               fabs(sin_error), cos_error)

                rospy.loginfo(
                    "\n\n - segment_time: %s\n - dinstance_error: %s\n - orientation_error: %s"
                    % (segment_time.to_sec(),
                       self.score["segment_%i" % (i + 1)]["distance_error"],
                       self.score["segment_%i" %
                                  (i + 1)]["orientation_error"]))

            self.save_and_publish_score()

            if not self.is_benchmark_running():
                print self.get_end_description()

                if self.has_benchmark_timed_out():
                    print "BENCHMARK TIMEOUT"
                elif self.has_benchmark_been_stopped():
                    print "BENCHMARK STOPPED"
                else:
                    print "BENCHMARK ABORTED"

            i += 1

        ##########################################
        #            UPDATE SCORE                #
        ##########################################

        # Evaluate final score
        if reached_waypoints > 0:
            position_accuracy = sum_d / reached_waypoints
            orientation_accuracy = atan2(fabs(sum_sin), sum_cos)
        else:
            position_accuracy = None
            orientation_accuracy = None

        self.score["reached_waypoints"] = reached_waypoints
        self.score["timed_out_waypoints"] = timed_out_waypoints
        self.score["execution_time[s]"] = execution_time.to_sec()
        self.score["position_accuracy[m]"] = position_accuracy
        self.score["orientation_accuracy[rad]"] = orientation_accuracy
        self.save_and_publish_score()
Esempio n. 3
0
    def execute(self):
        # Load the list of people's names from the config file
        people = self.params["people"]

        # Num. of requested goals: equal to the num. of people
        N = len(people)

        # init tf
        tf_listener = tf.TransformListener()
        tf_broadcaster = tf.TransformBroadcaster()

        # Variables to compute score
        i = 0
        detected_sum = 0
        recognized_sum = 0
        position_error_sum = 0
        execution_time = rospy.Duration(0)

        # Start the benchmark
        while self.is_benchmark_running() and (i < N):
            # Pick a random person and remove it from the list
            person = random.choice(people)
            people.remove(person)
            self.score["goal_%i" % i] = {}

            ##########################################
            #            MANUAL OPERATION            #
            ##########################################

            manual_operation_1 = ManualOperationObject(
                "%s should enter the target area wearing a cap with MoCap markers, and place him/herself in an arbitrary position."
                % person)

            self.request_manual_operation(manual_operation_1)

            ##########################################
            #     CHECK RESULT AND UPDATE SCORE      #
            ##########################################

            if manual_operation_1.has_been_completed():
                print "First Manual Operation result: %s" % manual_operation_1.get_result(
                )
            else:
                print "First Manual Operation NOT EXECUTED"

            if not self.is_benchmark_running():
                if self.has_benchmark_timed_out():
                    print "BENCHMARK TIMEOUT"
                elif self.has_benchmark_been_stopped():
                    print "BENCHMARK STOPPED"
                else:
                    print "BENCHMARK ABORTED"
                return

            ## Fist manual operation finished, the person should be trackable inside the area
            # Check if we can get the person's pose - although we don't use the pose now, only after the robot's end_execute signal
            person_pose_mocap = None
            person_pose_mocap = Pose2D_from_tf_concatenation(
                self,
                "/person", (0, 0, 0), (0, 0, 0, 1),
                "/actor_markerset",
                "/testbed_origin",
                tf_broadcaster,
                tf_listener,
                max_failed_attempts=3)
            while person_pose_mocap == None:
                rospy.logwarn(
                    "The position of the person markerset could not be acquired - asking RefOp to retry"
                )
                manual_operation_2 = ManualOperationObject(
                    "Could not get %s's pose. Make sure (s)he is inside the designated area and wearing the markerset cap."
                    % person)

                self.request_manual_operation(manual_operation_2)

                if manual_operation_2.has_been_completed():
                    print "Second Manual Operation result: %s" % manual_operation_2.get_result(
                    )
                else:
                    print "Second Manual Operation NOT EXECUTED"

                if not self.is_benchmark_running():
                    if self.has_benchmark_timed_out():
                        print "BENCHMARK TIMEOUT"
                    elif self.has_benchmark_been_stopped():
                        print "BENCHMARK STOPPED"
                    else:
                        print "BENCHMARK ABORTED"
                    return

                person_pose_mocap = Pose2D_from_tf_concatenation(
                    self,
                    "/person", (0, 0, 0), (0, 0, 0, 1),
                    "/actor_markerset",
                    "/testbed_origin",
                    tf_broadcaster,
                    tf_listener,
                    max_failed_attempts=3)

            #########################################################
            #   PERSON IS IN THE AREA, SEND THE GOAL TO THE ROBOT   #
            #########################################################

            ##########################################
            #                 GOAL i                 #
            ##########################################

            goal = GoalObject({}, self.params['goal_timeout'])

            self.request_goal(goal)
            start_time = rospy.Time.now()

            print "wait_goal_result"

            self.wait_goal_result()
            end_time = rospy.Time.now()

            segment_time = end_time - start_time

            execution_time += segment_time

            self.score["goal_%i" %
                       i]["segment_time [s]"] = segment_time.to_sec()
            self.save_and_publish_score()
            rospy.loginfo("segment_time: %f" % segment_time.to_sec())

            ##########################################
            #    CHECK RESULT i AND UPDATE SCORE     #
            ##########################################

            self.score["goal_%i" % i]["timeout"] = goal.has_timed_out()
            self.score["goal_%i" % i]["completed"] = goal.has_been_completed()

            if goal.has_timed_out():
                print "GOAL TIMEOUT"
            elif goal.has_been_completed():
                print "GOAL COMPLETED"
                print "RESULT: %s" % (goal.get_result())
            else:
                print "GOAL NOT COMPLETED"

            if goal.has_been_completed():

                # Acquire person position from actor_markerset, which is the ground truth
                person_pose_mocap = None
                person_pose_mocap = Pose2D_from_tf_concatenation(
                    self,
                    "/person", (0, 0, 0), (0, 0, 0, 1),
                    "/actor_markerset",
                    "/testbed_origin",
                    tf_broadcaster,
                    tf_listener,
                    max_failed_attempts=3)
                if person_pose_mocap == None:
                    # Robot has finished the goal, but the pose couldn't be acquired from MoCap: restart the goal
                    rospy.logwarn(
                        "The position of the person markerset could not be acquired - restarting current goal"
                    )

                    execution_time -= segment_time
                    people.append(person)

                    continue

                detected_sum += 1

                result = goal.get_result()

                rospy.loginfo(
                    "Received result - X: %.2f \t Y: %.2f \t Theta: %.2f \t Person name: %s"
                    % (result['x'], result['y'], result['theta'],
                       result['person_name']))

                # Compare person name (case-insensitive)
                recognized = (result['person_name'].lower() == person.lower())
                if recognized:
                    recognized_sum += 1

                # Calculate position error
                result_person_pose = Pose2D(result['x'], result['y'],
                                            result['theta'])

                x_err = result_person_pose.x - person_pose_mocap.x
                y_err = result_person_pose.y - person_pose_mocap.y
                position_error = sqrt(x_err**2 + y_err**2)

                position_error_sum += position_error

                # TODO no orientation error for now: should it be added?

                # Update the score with the result of the current goal
                self.score["goal_%i" % i]['detected'] = True
                self.score[
                    "goal_%i" %
                    i]['robot-estimated person name'] = result['person_name']
                self.score["goal_%i" % i]['ground-truth person name'] = person
                self.score["goal_%i" % i]['person_recognized'] = recognized
                self.score["goal_%i" %
                           i]['person_position_error'] = position_error
                self.score[
                    "goal_%i" %
                    i]['ground-truth person position'] = '(%.2f, %.2f)' % (
                        person_pose_mocap.x, person_pose_mocap.y)
                self.score[
                    "goal_%i" %
                    i]['robot-estimated person position'] = '(%.2f, %.2f)' % (
                        result_person_pose.x, result_person_pose.y)

                # Update overall score
                self.score['Recognized persons'] = recognized_sum
                self.score['% Recognized persons'] = str(
                    round(recognized_sum / float(N) * 100, 2)) + '%'
                self.score[
                    'Distance error'] = position_error_sum / detected_sum

            else:
                self.score["goal_%i" % i]['detected'] = False

            # Update execution time whether the goal was completed or not
            self.score['execution_time'] = execution_time.to_sec()

            self.save_and_publish_score()

            if not self.is_benchmark_running():
                if self.has_benchmark_timed_out():
                    print "BENCHMARK TIMEOUT"
                elif self.has_benchmark_been_stopped():
                    print "BENCHMARK STOPPED"
                else:
                    print "BENCHMARK ABORTED"
                return

            i += 1
Esempio n. 4
0
    def execute(self):

        # Benchmark runs
        N = 10

        # Ground truth accuracy
        TRANS_ACCURACY = 0.0  # meters
        ROT_ACCURACY = 0.0  # degrees

        # init tf
        tf_listener = tf.TransformListener()
        tf_broadcaster = tf.TransformBroadcaster()

        # Get items
        items_transform_path = path.join(
            path.join(rospy.get_param("~resources_directory"),
                      "objects_informations"), "items_with_pose.yaml")
        try:
            with open(items_transform_path, 'r') as items_transform_file:
                items = yaml.load(items_transform_file)["items"]
        except (EnvironmentError):
            rospy.logerr("Could not open items pose file [%s]" %
                         (items_transform_path))
            return

        # Variables to compute score
        i = 0
        class_positives = 0.0
        instance_positives = 0.0
        pose_score_sum = 0.0
        execution_time = rospy.Duration(0)

        # Maximum pose error
        max_pose_error = sqrt(
            pow(float(self.params['table_size']['x']), 2) +
            pow(float(self.params['table_size']['y']), 2))

        # Start the benchmark
        while self.is_benchmark_running() and (i < N):
            i += 1

            # Pick a random item and remove it from the list
            item = random.choice(items)
            items.remove(item)
            self.score["goal_%i" % i] = {}

            ##########################################
            #            MANUAL OPERATION            #
            ##########################################

            manual_operation_1 = ManualOperationObject(
                "Place the positioner and the %s (id: %d, instance: %s) on the table"
                % (item['description'], item['id'], item['instance']))

            self.request_manual_operation(manual_operation_1)

            ##########################################
            #     CHECK RESULT AND UPDATE SCORE      #
            ##########################################

            if manual_operation_1.has_been_completed():
                print "First Manual Operation result: %s" % manual_operation_1.get_result(
                )
            else:
                print "First Manual Operation NOT EXECUTED"

            if not self.is_benchmark_running():
                if self.has_benchmark_timed_out():
                    print "BENCHMARK TIMEOUT"
                    return
                elif self.has_benchmark_been_stopped():
                    print "BENCHMARK STOPPED"
                    return
                else:
                    print "BENCHMARK ABORTED"
                    return

#			now = rospy.Time.now()

##########################################
#   ACQUIRE OBJECT POSITION WITH MOCAP   #
##########################################

            acquired_object_pose = None
            acquired_object_pose = Pose2D_from_tf_concatenation(
                self,
                "/item",
                item['trans'],
                item['rot'],
                "/positioner_origin",
                "/table_origin",
                tf_broadcaster,
                tf_listener,
                max_failed_attempts=3)

            # If the position could not be acquired, try again by requesting to move the positioner
            while acquired_object_pose == None:
                rospy.logwarn(
                    "The position of the positioner could not be acquired")

                manual_operation_3 = ManualOperationObject(
                    "The position of the positioner could not be acquired!\nPlace in a different position the positioner and the %s (id: %d, instance: %s) on the table"
                    % (item['description'], item['id'], item['instance']))

                self.request_manual_operation(manual_operation_3)

                ##########################################
                #     CHECK RESULT AND UPDATE SCORE      #
                ##########################################

                if manual_operation_3.has_been_completed():
                    print "First Manual Operation result: %s" % manual_operation_3.get_result(
                    )
                else:
                    print "First Manual Operation NOT EXECUTED"

                if not self.is_benchmark_running():
                    if self.has_benchmark_timed_out():
                        print "BENCHMARK TIMEOUT"
                        return
                    elif self.has_benchmark_been_stopped():
                        print "BENCHMARK STOPPED"
                        return
                    else:
                        print "BENCHMARK ABORTED"
                        return

                acquired_object_pose = Pose2D_from_tf_concatenation(
                    self,
                    "/item",
                    item['trans'],
                    item['rot'],
                    "/positioner_origin",
                    "/table_origin",
                    tf_broadcaster,
                    tf_listener,
                    max_failed_attempts=3)

            ##########################################
            #            MANUAL OPERATION            #
            ##########################################

            manual_operation_2 = ManualOperationObject("Remove the positioner")

            self.request_manual_operation(manual_operation_2)

            ##########################################
            #     CHECK RESULT AND UPDATE SCORE      #
            ##########################################

            if manual_operation_2.has_been_completed():
                print "First Manual Operation result: %s" % manual_operation_2.get_result(
                )
            else:
                print "First Manual Operation NOT EXECUTED"

            if not self.is_benchmark_running():
                if self.has_benchmark_timed_out():
                    print "BENCHMARK TIMEOUT"
                    return
                elif self.has_benchmark_been_stopped():
                    print "BENCHMARK STOPPED"
                    return
                else:
                    print "BENCHMARK ABORTED"
                    return

            ##########################################
            #                 GOAL i                 #
            ##########################################

            goal = GoalObject({}, self.params['goal_timeout'])

            self.request_goal(goal)
            start_time = rospy.Time.now()

            print "wait_goal_result"

            self.wait_goal_result()
            end_time = rospy.Time.now()

            segment_time = end_time - start_time

            execution_time += segment_time
            rospy.loginfo("Execution time - %f" % execution_time.to_sec())

            ##########################################
            #    CHECK RESULT i AND UPDATE SCORE     #
            ##########################################

            self.score["goal_%i" % i]["timeout"] = goal.has_timed_out()
            self.score["goal_%i" % i]["completed"] = goal.has_been_completed()
            self.score["goal_%i" % i]["segment_time"] = segment_time.to_sec()

            if goal.has_timed_out():
                print "GOAL TIMEOUT"
            elif goal.has_been_completed():
                print "GOAL COMPLETED"
                print "RESULT:  %s" % (goal.get_result())
            else:
                print "GOAL NOT COMPLETED"

            self.save_and_publish_score()

            if not self.is_benchmark_running():
                print self.get_end_description()

                if self.has_benchmark_timed_out():
                    print "BENCHMARK TIMEOUT"
                elif self.has_benchmark_been_stopped():
                    print "BENCHMARK STOPPED"
                else:
                    print "BENCHMARK ABORTED"

            if goal.has_been_completed():

                result = goal.get_result()

                result_object_pose = Pose2D(result['x'], result['y'],
                                            result['theta'])

                rospy.loginfo(
                    "Received result - X: %.2f \t Y: %.2f \t W: %.2f rad \t Class: %s \t Instance: %s"
                    % (result_object_pose.x, result_object_pose.y,
                       result_object_pose.theta, result['class'],
                       result['instance']))

                # Evaluate position error
                tf_broadcaster.sendTransform(
                    [result_object_pose.x, result_object_pose.y, 0],
                    tf.transformations.quaternion_from_euler(
                        0, 0, result_object_pose.theta), rospy.Time.now(),
                    "result_%d" % i, "origin")

                # Evaluate class
                is_class_correct = (result['class'] == item['class'])
                if (is_class_correct):
                    class_positives += 1

                # Evaluate instance
                is_instance_correct = (result['instance'] == item['instance'])
                if (is_instance_correct):
                    instance_positives += 1

                x_err = result_object_pose.x - acquired_object_pose.x
                y_err = result_object_pose.y - acquired_object_pose.y
                w_err = (result_object_pose.theta - acquired_object_pose.theta)

                if w_err > pi: w_err = w_err - 2 * pi
                if w_err < -pi: w_err = w_err + 2 * pi
                if abs(x_err) < TRANS_ACCURACY: x_err = 0.0
                if abs(y_err) < TRANS_ACCURACY: y_err = 0.0
                if abs(w_err) < ROT_ACCURACY: w_err = 0.0

                pose_err_trans = sqrt(pow(x_err, 2) + pow(y_err, 2))
                #				if pose_err_trans > max_pose_error : pose_err_trans = 1.0 # corrected with version from Lisbon 2017 Aug event

                pose_err_rot = abs(w_err) / pi
                if pose_err_rot > 1: pose_err_rot = 1.0

                pose_score = 1 - (pose_err_trans / 2) - (pose_err_rot / 2)
                pose_score_sum += pose_score

                # Update the score with the result of the current goal
                self.score["goal_%i" % i]['item_acquired'] = {}
                self.score["goal_%i" % i]['item_acquired']['pose'] = [
                    acquired_object_pose.x, acquired_object_pose.y,
                    acquired_object_pose.theta
                ]
                self.score["goal_%i" % i]['item_acquired']['pose_units'] = [
                    'm', 'm', 'rad'
                ]
                self.score["goal_%i" %
                           i]['item_acquired']['class'] = item['class']
                self.score["goal_%i" %
                           i]['item_acquired']['instance'] = item['instance']

                self.score["goal_%i" % i]['item_received'] = {}
                self.score["goal_%i" % i]['item_received']['pose'] = [
                    result_object_pose.x, result_object_pose.y,
                    result_object_pose.theta
                ]
                self.score["goal_%i" % i]['item_received']['pose_units'] = [
                    'm', 'm', 'rad'
                ]
                self.score["goal_%i" %
                           i]['item_received']['class'] = result['class']
                self.score["goal_%i" %
                           i]['item_received']['instance'] = result['instance']

                self.score["goal_%i" % i]['partial_score'] = {}
                self.score["goal_%i" %
                           i]['partial_score']['pose_score'] = pose_score
                self.score["goal_%i" % i]['partial_score'][
                    'pose_error_translation_score'] = pose_err_trans
                self.score["goal_%i" % i]['partial_score'][
                    'pose_error_translation_score_unit'] = 'pure number'
                self.score["goal_%i" % i]['partial_score'][
                    'pose_error_rotation_score'] = pose_err_rot
                self.score["goal_%i" % i]['partial_score'][
                    'pose_error_rotation_score_unit'] = 'pure number'
                self.score["goal_%i" % i]['partial_score']['pose_error'] = [
                    x_err, y_err, w_err
                ]
                self.score["goal_%i" %
                           i]['partial_score']['pose_error_units'] = [
                               'm', 'm', 'rad'
                           ]
                self.score[
                    "goal_%i" %
                    i]['partial_score']['is_class_correct'] = is_class_correct
                self.score["goal_%i" % i]['partial_score'][
                    'is_instance_correct'] = is_instance_correct

                self.save_and_publish_score()

            # Update the global score with the result of the current goal (whether the goal was completed or not)
            self.score['class_accuracy'] = class_positives / i
            self.score['instance_accuracy'] = instance_positives / i
            self.score['pose_score'] = pose_score_sum / i
            self.score['execution_time'] = execution_time.to_sec()

            self.save_and_publish_score()
Esempio n. 5
0
	def execute(self):
		
		N = 2
		i = 1
		execution_time = rospy.Duration(0.0)
		
		
		print "params:\n", self.params
				
		print "referee_score:\n", self.referee_score
		
		##########################################
		#            MANUAL OPERATION            #
		##########################################
		
		manual_operation_first = ManualOperationObject("First Manual Operation")
		
		self.start_manual_operation(manual_operation_first)
		self.wait_manual_operation()
		
		##########################################
		#     CHECK RESULT AND UPDATE SCORE      #
		##########################################
		
		if manual_operation_first.has_been_completed():
			print "First Manual Operation result: %s" % manual_operation_first.get_result()
			self.score["first_manual_operation"] = manual_operation_first.get_result()
			self.save_and_publish_score()
		else:
			print "First Manual Operation NOT EXECUTED"
			self.score["first_manual_operation"] = "not executed"
			self.save_and_publish_score()
		
		if not self.is_benchmark_running():
			if self.has_benchmark_timed_out():
				print "BENCHMARK TIMEOUT"
				return
			elif self.has_benchmark_been_stopped():
				print "BENCHMARK STOPPED"
				return
			else:
				print "BENCHMARK ABORTED"
				return
		
		
		while self.is_benchmark_running() and i <= N:
			
			##########################################
			#                 GOAL i                 #
			##########################################
			
			goal = GoalObject({"goal": "GOAL %d"%i, "details": 4}, 15.0)
			
			self.request_goal(goal)
			start_time = rospy.Time.now()
			
			print "wait_goal_result"
			
			self.wait_goal_result()
			end_time = rospy.Time.now()
			
#T			while not goal.has_been_completed() and not goal.has_timed_out(): rate.sleep(); self.complete_goal()
			
			execution_time += end_time - start_time
			rospy.loginfo("Execution time - %f" % execution_time.to_sec())
			
			
			##########################################
			#    CHECK RESULT i AND UPDATE SCORE     #
			##########################################
			
			self.score["goal_%i"%i] = {}
			self.score["goal_%i"%i]["timeout"] = goal.has_timed_out()
			self.score["goal_%i"%i]["completed"] = goal.has_been_completed()
			
			if goal.has_timed_out():
				print "GOAL TIMEOUT"
			elif goal.has_been_completed():
				print "GOAL COMPLETED:"
				result = goal.get_result()
				print "result:\n", result
				self.score["goal_%i"%i]["result"] = result
			else:
				print "GOAL NOT COMPLETED"
			
			self.save_and_publish_score()
			
			if not self.is_benchmark_running():
				print self.get_end_description()
				
				if self.has_benchmark_timed_out():
					print "BENCHMARK TIMEOUT"
				elif self.has_benchmark_been_stopped():
					print "BENCHMARK STOPPED"
				else:
					print "BENCHMARK ABORTED"
			
			i += 1
		
		##########################################
		#            MANUAL OPERATION            #
		##########################################
		
		manual_operation_last = ManualOperationObject("Last Manual Operation")
		
		self.request_manual_operation(manual_operation_last)
		
		
		##########################################
		#     CHECK RESULT AND UPDATE SCORE      #
		##########################################
		
		if manual_operation_last.has_been_completed():
			print "Last Manual Operation result: %s" % manual_operation_last.get_result()
			self.score["last_manual_operation"] = manual_operation_last.get_result()
			self.save_and_publish_score()
		else:
			print "Last Manual Operation NOT EXECUTED"
			self.score["last_manual_operation"] = "not executed"
			self.save_and_publish_score()
		
		if not self.is_benchmark_running():
			if self.has_benchmark_timed_out():
				print "BENCHMARK TIMEOUT"
			elif self.has_benchmark_been_stopped():
				print "BENCHMARK STOPPED"
			else:
				print "BENCHMARK ABORTED"
		
		
		##########################################
		#            UPDATE SCORE                #
		##########################################
		
		self.score["execution_time"] = execution_time.to_sec()
		self.save_and_publish_score()
    def execute(self):
        global tf_listener
        global object_height_reference
        global object_grasped

        # Load the list of objects from the config file
        objects = self.params["objects"]

        # Num. of requested goals: equal to the num. of objects
        N = len(objects)

        # Calculate range where objects can be placed: 15cm margin from the table edges
        table_size = self.params["table_size"]
        min_x = 0.15
        min_y = 0.15
        max_x = table_size['x'] - 0.15
        max_y = table_size['y'] - 0.15

        # init tf
        tf_broadcaster = tf.TransformBroadcaster()
        tf_listener = tf.TransformListener()

        # Variables to compute score
        i = 0
        objects_grasped_sum = 0
        goals_completed_sum = 0
        distance_error_sum = 0
        angle_error_sum = 0
        execution_time = rospy.Duration(0)

        # Start the benchmark
        while self.is_benchmark_running() and (i < N):
            # Pick a random object and remove it from the list
            obj = random.choice(objects)
            objects.remove(obj)
            self.score["goal_%i" % i] = {}
            object_grasped = False

            # Pick a random target position for the object
            target_x = random.uniform(min_x, max_x)
            target_y = random.uniform(min_y, max_y)

            ##########################################
            #            MANUAL OPERATION            #
            ##########################################

            manual_operation_1 = ManualOperationObject(
                "Place the %s on the table, with MoCap markers attached." %
                obj['description'])

            self.request_manual_operation(manual_operation_1)

            ##########################################
            #     CHECK RESULT AND UPDATE SCORE      #
            ##########################################

            if manual_operation_1.has_been_completed():
                print "First Manual Operation result: %s" % manual_operation_1.get_result(
                )
            else:
                print "First Manual Operation NOT EXECUTED"

            if not self.is_benchmark_running():
                if self.has_benchmark_timed_out():
                    print "BENCHMARK TIMEOUT"
                elif self.has_benchmark_been_stopped():
                    print "BENCHMARK STOPPED"
                else:
                    print "BENCHMARK ABORTED"
                return

            ## Fist manual operation finished, the object should be trackable on the table
            # Check if we can get the object's pose, and if it's not too close to the target position
            object_pose_mocap = None
            # We assume the markerset is perfectly aligned with the object - if it's not, add a markerset-acquiring step and use the results here
            (object_pose_mocap,
             object_height_reference) = Pose2D_from_tf_concatenation(
                 self,
                 "/object", (0, 0, 0), (0, 0, 0, 1),
                 "/actor_markerset",
                 "/table_origin",
                 tf_broadcaster,
                 tf_listener,
                 max_failed_attempts=3)

            while object_pose_mocap == None:
                rospy.logwarn(
                    "The position of the object markerset could not be acquired - asking RefOp to retry"
                )
                manual_operation_2 = ManualOperationObject(
                    "Could not get the %s's pose. Make sure it's inside the designated area with a markerset attached."
                    % obj['description'])

                self.request_manual_operation(manual_operation_2)

                if manual_operation_2.has_been_completed():
                    print "Second Manual Operation result: %s" % manual_operation_2.get_result(
                    )
                else:
                    print "Second Manual Operation NOT EXECUTED"

                if not self.is_benchmark_running():
                    if self.has_benchmark_timed_out():
                        print "BENCHMARK TIMEOUT"
                    elif self.has_benchmark_been_stopped():
                        print "BENCHMARK STOPPED"
                    else:
                        print "BENCHMARK ABORTED"
                    return

                (object_pose_mocap,
                 object_height_reference) = Pose2D_from_tf_concatenation(
                     self,
                     "/object", (0, 0, 0), (0, 0, 0, 1),
                     "/actor_markerset",
                     "/table_origin",
                     tf_broadcaster,
                     tf_listener,
                     max_failed_attempts=3)

            # If target location is too close to the object, pick a different one
            distance_from_target = sqrt((object_pose_mocap.x - target_x)**2 +
                                        (object_pose_mocap.y - target_y)**2)
            while distance_from_target < 0.05:
                target_x = random.uniform(min_x, max_x)
                target_y = random.uniform(min_y, max_y)
                distance_from_target = sqrt(
                    (object_pose_mocap.x - target_x)**2 +
                    (object_pose_mocap.y - target_y)**2)

            #########################################################
            #   OBJECT IS IN THE TABLE, SEND THE GOAL TO THE ROBOT  #
            #########################################################

            ##########################################
            #                 GOAL i                 #
            ##########################################

            goal = GoalObject([target_x, target_y, obj['id']],
                              self.params['goal_timeout'])

            self.request_goal(goal)
            start_time = rospy.Time.now()

            # Start a timer: 10x / second, acquire the object pose and check if it has lifted from the table.
            timer = rospy.Timer(rospy.Duration(0.1), timer_callback)

            print "wait_goal_result"

            self.wait_goal_result()
            end_time = rospy.Time.now()

            timer.shutdown()

            segment_time = end_time - start_time

            execution_time += segment_time

            self.score["goal_%i" %
                       i]["segment_time [s]"] = segment_time.to_sec()
            self.save_and_publish_score()
            rospy.loginfo("segment_time: %f" % segment_time.to_sec())

            ##########################################
            #    CHECK RESULT i AND UPDATE SCORE     #
            ##########################################

            self.score["goal_%i" % i]["timeout"] = goal.has_timed_out()
            self.score["goal_%i" % i]["completed"] = goal.has_been_completed()

            if goal.has_timed_out():
                print "GOAL TIMEOUT"
            elif goal.has_been_completed():
                print "GOAL COMPLETED"
                print "RESULT: %s" % (goal.get_result())
            else:
                print "GOAL NOT COMPLETED"

            if goal.has_been_completed():
                goals_completed_sum += 1

                if object_grasped:
                    objects_grasped_sum += 1

                # Acquire object position from actor_markerset, which is the ground truth
                object_pose_mocap = None
                object_pose_mocap = Pose2D_from_tf_concatenation(
                    self,
                    "/object", (0, 0, 0), (0, 0, 0, 1),
                    "/actor_markerset",
                    "/table_origin",
                    tf_broadcaster,
                    tf_listener,
                    max_failed_attempts=3)[0]
                if object_pose_mocap == None:
                    # Robot has finished the goal, but the pose couldn't be acquired from MoCap: restart the goal
                    rospy.logwarn(
                        "The position of the person markerset could not be acquired - restarting current goal"
                    )

                    execution_time -= segment_time
                    objects.append(obj)

                    continue

                x_err = target_x - object_pose_mocap.x
                y_err = target_y - object_pose_mocap.y
                distance_error = sqrt(x_err**2 + y_err**2)

                distance_error_sum += distance_error

                # TODO no orientation error for now: should it be added?

                # Update the score with the result of the current goal
                self.score["goal_%i" % i]['Object grasped'] = object_grasped
                self.score["goal_%i" % i]['Distance error'] = distance_error
                self.score["goal_%i" %
                           i]['Target object position'] = '(%.2f, %.2f)' % (
                               target_x, target_y)
                self.score["goal_%i" %
                           i]['Measured object position'] = '(%.2f, %.2f)' % (
                               object_pose_mocap.x, object_pose_mocap.y)
                self.score["goal_%i" % i]['Object type'] = '%s (id %d)' % (
                    obj['description'], obj['id'])

                # Update overall score
                self.score["Objects grasped"] = str(
                    objects_grasped_sum) + " out of " + str(N)
                self.score[
                    'Avg distance error'] = distance_error_sum / goals_completed_sum

            else:
                self.score["goal_%i" % i]['result'] = 'Not completed'

            # Update execution time whether the goal was completed or not
            self.score['execution_time'] = execution_time.to_sec()

            self.save_and_publish_score()

            if not self.is_benchmark_running():
                if self.has_benchmark_timed_out():
                    print "BENCHMARK TIMEOUT"
                elif self.has_benchmark_been_stopped():
                    print "BENCHMARK STOPPED"
                else:
                    print "BENCHMARK ABORTED"
                return

            i += 1
Esempio n. 7
0
	def execute(self):
		# Load acceptable distance from config
		MIN_FOLLOWING_DISTANCE = self.params['min_following_distance']
		MAX_FOLLOWING_DISTANCE = self.params['max_following_distance']
		DESIRED_FOLLOWING_DISTANCE = self.params['desired_following_distance']

		# Read markerset_to_robot transform from the transform file
		team_name = self.get_benchmark_team()
		transform_path = path.join( path.join(rospy.get_param("~resources_directory"), "transforms" ), "transform-%s.yaml" % team_name )
		
		with open(transform_path, 'r') as transform_file:
			markerset_to_robot_transform = yaml.load(transform_file)["markerset_to_robot_transform"]
		
		print "team_name: ", team_name, "\ttransform_path: ", transform_path, "\tmarkerset_to_robot_transform: ", markerset_to_robot_transform

		# init tf
		tf_listener = tf.TransformListener()
		tf_broadcaster = tf.TransformBroadcaster()
		
		# Variables to compute score
		mocap_success_sum = 0
		mocap_failure_sum = 0
		deviation_from_desired_distance_sum = 0
		distance_covered_while_following = 0
		last_absolute_robot_pose = None
		
		# Start the benchmark
		while self.is_benchmark_running():
			self.score = {}

			## Check if both person and robot's poses can be acquired
			robot_to_human_pose = None
			robot_to_human_pose = Pose2D_from_tf_concatenation(self, "/robot", markerset_to_robot_transform[0], markerset_to_robot_transform[1], "/robot_markerset", "/actor_markerset", tf_broadcaster, tf_listener, max_failed_attempts=3, timeout=0.5)
			while robot_to_human_pose == None:
				manual_operation = ManualOperationObject("Failed to acquire pose of human or robot. Make sure they have detectable markersets.")
				self.request_manual_operation(manual_operation)
				robot_to_human_pose = Pose2D_from_tf_concatenation(self, "/robot", markerset_to_robot_transform[0], markerset_to_robot_transform[1], "/robot_markerset", "/actor_markerset", tf_broadcaster, tf_listener, max_failed_attempts=3, timeout=0.5)

			## Send request for robot to start following
			goal = GoalObject({}, self.params['goal_timeout'])
			self.request_goal(goal)

			## Start tracking pose of human and robot.
			benchmark_start_time = rospy.Time.now()

			# Sleep for 8 seconds so that the robot has time to position itself at the desired following distance.
			rospy.sleep(8.0)

			while (not goal.has_timed_out()) and (not goal._executed):
				sample_start_time = rospy.Time.now()
				
				## Acquire robot_markerset in relation to actor_markerset
				robot_to_human_pose = None
				robot_to_human_pose = Pose2D_from_tf_concatenation(self, "/robot", markerset_to_robot_transform[0], markerset_to_robot_transform[1], "/robot_markerset", "/actor_markerset", tf_broadcaster, tf_listener, max_failed_attempts=1, timeout=0.2)

				absolute_robot_pose = None
				absolute_robot_pose = Pose2D_from_tf_concatenation(self, "/robot", markerset_to_robot_transform[0], markerset_to_robot_transform[1], "/robot_markerset", "/testbed_origin", tf_broadcaster, tf_listener, max_failed_attempts=1, timeout=0.2)
				
				## Update score
				if robot_to_human_pose != None:
					mocap_success_sum += 1

					rospy.loginfo('robot_to_human_pose: (%f, %f)' % (robot_to_human_pose.x, robot_to_human_pose.y))

					# Calculate distance
					distance = sqrt( robot_to_human_pose.x**2 + robot_to_human_pose.y**2 )
					rospy.loginfo('Current following distance: %f' % distance)

					deviation_from_desired_distance = abs(distance - DESIRED_FOLLOWING_DISTANCE)
					deviation_from_desired_distance_sum += deviation_from_desired_distance

					if (distance >= MIN_FOLLOWING_DISTANCE and distance <= MAX_FOLLOWING_DISTANCE):
						# Update distance_covered_while_following
						if last_absolute_robot_pose != None and absolute_robot_pose != None:
							x_dist = absolute_robot_pose.x - last_absolute_robot_pose.x
							y_dist = absolute_robot_pose.y - last_absolute_robot_pose.y
							dist = sqrt( x_dist**2 + y_dist**2 )
							distance_covered_while_following += dist

				else:
					mocap_failure_sum += 1

				last_absolute_robot_pose = absolute_robot_pose

				## Sleep for the remaining amount of time to 0.5s (position should be sampled every 0.5s)
				current_time = rospy.Time.now()
				elapsed_time = current_time - sample_start_time
				remaining_time = 0.5 - elapsed_time.to_sec()
				if remaining_time > 0:
					rospy.sleep(remaining_time)

				if not self.is_benchmark_running():
					if self.has_benchmark_timed_out():
						print "BENCHMARK TIMEOUT"
					elif self.has_benchmark_been_stopped():
						print "BENCHMARK STOPPED"
					else:
						print "BENCHMARK ABORTED"
					return

			# Check if benchmark duration was within 15 seconds of the goal_timeout
			benchmark_end_time = rospy.Time.now()
			benchmark_duration = (benchmark_end_time - benchmark_start_time).to_sec()
			target_duration = self.params['goal_timeout']
			if benchmark_duration < (target_duration-15) or benchmark_duration > (target_duration+15):
				rospy.logerr('Benchmark failed: finished prematurely or took too long')
				self.score['Result'] = 'Benchmark failed: finished prematurely or took too long'
				self.save_and_publish_score()
				self.abort_benchmark(message='Benchmark failed! Finished prematurely or took too long')
				return

			# Calculate score statistics and print.
			if mocap_success_sum > 0:
				self.score['Average deviation from desired distance'] = deviation_from_desired_distance_sum / float(mocap_success_sum)				
			self.score['Distance covered while following'] = distance_covered_while_following

			percent_mocap_failures = mocap_failure_sum / float(mocap_success_sum + mocap_failure_sum) * 100
			self.score['Benchmark reliability'] = str(round(100 - percent_mocap_failures, 1)) + '%'

			if percent_mocap_failures > 50:
				self.score['WARNING'] = ('%.1f%% of the time, mocap pose acquiring failed. Consider repeating the benchmark.' % percent_mocap_failures)
			
			self.save_and_publish_score()

			# Since the goal hasn't finished, simulate clicking 'Stop' on the GUI
			self.abort_benchmark()
			return