def __init__(self, model_to_track_list):

        # This are the models that we will generate information about.
        self.gz_model_obj = GazeboModel(model_to_track_list)
        self.people = People()
        self.person = Person()
        self.pub = rospy.Publisher("/people", People, queue_size=10)
        self.pub2 = rospy.Publisher("/person_pose", People, queue_size=10)
Example #2
0
    def __init__(self, environment_name, weight_file_name, image_size, ALPHA,
                 grasp_activated, number_of_elements_to_be_output):

        self._image_size = image_size
        self._number_of_elements_to_be_output = number_of_elements_to_be_output

        self.init_rviz_markers()

        # Init service to move fetch, this is because moveit doenst work in python 3
        self._grasp_activated = grasp_activated
        if self._grasp_activated == True:
            self.fetch_move_client = MoveFetchClient()
            self.fetch_move_client.go_to_safe_arm_pose()

        # Init camera RGB object
        self.rgb_camera_object = RGBCamera("/dynamic_objects/camera/raw_image")

        # This are the models that we will generate information about.
        self.model_to_track_name = "demo_spam1"
        self.table_to_track_name = "demo_table1"

        model_to_track_list = [
            self.model_to_track_name, self.table_to_track_name
        ]
        self.gz_model_obj = GazeboModel(model_to_track_list)

        # We start the model in Keras
        self.model = create_model(self._image_size, ALPHA,
                                  self._number_of_elements_to_be_output)

        rospack = rospkg.RosPack()
        # get the file path for rospy_tutorials
        path_to_package = rospack.get_path('my_dcnn_training_pkg')
        models_weight_checkpoints_folder = os.path.join(path_to_package, "bk")
        model_file_path = os.path.join(models_weight_checkpoints_folder,
                                       weight_file_name)

        print(model_file_path)

        self.model.load_weights(model_file_path)

        self.testing_unscaled_img_folder = os.path.join(
            path_to_package, "testing/dataset_gen/images")
        self.testing_unscaled_anotations_folder = os.path.join(
            path_to_package, "testing/dataset_gen_annotations")

        # We reset the environent to a random state
        print("Starting Service to Reset World Randomly....")
        self.dynamic_world_service_call = rospy.ServiceProxy(
            '/dynamic_world_service', Empty)
        self.change_env_request = EmptyRequest()
        self.dynamic_world_service_call(self.change_env_request)
        print("Starting Service to Reset World Randomly....DONE")
Example #3
0
class ModelPoseStore():
    def __init__(self, model_to_track_list):

        # This are the models that we will generate information about.
        self.gz_model_obj = GazeboModel(model_to_track_list)

    def get_pose_of_model(self, model_name):
        """
        Retrieves the position of an object from the world
        """
        pose_now = self.gz_model_obj.get_model_pose(model_name)

        return pose_now

    def store_model_poses_for_duration(self,
                                       model_name,
                                       duration=1.0,
                                       frequency_save_pose=1.0,
                                       file_to_store="poses.yaml"):
        """
        We store in the Given File Name the poses of the given model, whihc has to be inside the init model list
        inside a Yaml file
        """

        rate = rospy.Rate(frequency_save_pose)

        init_time_stamp = rospy.get_time()
        now_time_stamp = rospy.get_time()
        delta_time = now_time_stamp - init_time_stamp

        with open(file_to_store, 'w') as outfile:

            while delta_time <= duration:
                now_pose = self.get_pose_of_model(model_name)
                now_dict = self.reformat_pose_to_dict(now_pose)

                rospy.logdebug(str(now_dict))

                yaml.dump(now_dict, outfile, default_flow_style=False)

                rate.sleep()

                now_time_stamp = rospy.get_time()
                delta_time = now_time_stamp - init_time_stamp

                rospy.logdebug("TIME SAVING==>" + str(delta_time))

    def reformat_pose_to_dict(self, pose):
        """
        Converts Pose to dict
        """

        dictionary = message_converter.convert_ros_message_to_dictionary(pose)

        now_time_stamp = rospy.get_time()

        stamp_dict = {str(now_time_stamp): dictionary}

        return stamp_dict
class ModelPoseStore():
    def __init__(self, model_to_track_list):

        # This are the models that we will generate information about.
        self.gz_model_obj = GazeboModel(model_to_track_list)
        self.people = People()
        self.person = Person()
        self.pub = rospy.Publisher("/people", People, queue_size=10)
        self.pub2 = rospy.Publisher("/person_pose", People, queue_size=10)
    def get_pose_of_model(self, model_name):
        """
        Retrieves the position of an object from the world
        """
        pose_now = self.gz_model_obj.get_model_pose(model_name)

        return pose_now

    def store_model_poses_for_duration(self, model_name, duration=1.0, frequency_save_pose=1.0, file_to_store="poses.yaml"):
        """
        We store in the Given File Name the poses of the given model, whihc has to be inside the init model list
        inside a Yaml file
        """
        
        rate = rospy.Rate(10)

        
        
        while True:
            now_pose = self.get_pose_of_model(model_name)

            self.person.position.x = now_pose.position.x
            self.person.position.y = now_pose.position.y
            self.people.header.frame_id = "robot_map"
            self.people.people.append(self.person)
            rospy.logdebug(str(self.people.people[0]))
            #rospy.logdebug(str(self.person))
            self.pub.publish(self.people)
            self.pub2.publish(self.people)
            self.people.people.pop()
            

            rate.sleep()



    def reformat_pose_to_dict(self, pose):
        """
        Converts Pose to dict
        """

        dictionary = message_converter.convert_ros_message_to_dictionary(pose)

        now_time_stamp = rospy.get_time()

        stamp_dict = {str(now_time_stamp): dictionary}

        return stamp_dict
Example #5
0
    def __init__(self, model_name, init_coordinates):
        self.found_coordinates = False
        self._model_name = model_name
        self._model_coordinates = init_coordinates
        self.current_cmd_speed = Twist()
        self.g_pause = rospy.ServiceProxy("/gazebo/pause_physics", EmptySrv)
        self.g_unpause = rospy.ServiceProxy("/gazebo/unpause_physics",
                                            EmptySrv)
        self.g_set_state = rospy.ServiceProxy("/gazebo/set_model_state",
                                              SetModelState)
        rospy.Subscriber(model_name + "/cmd_vel", Twist, self.move_callback)

        self.gz_model = GazeboModel(robots_name_list=[model_name])

        self._trajectory = Trajectory(num_points=100,
                                      type="circle",
                                      radius=3.0,
                                      height=1.0)
def publisher_of_tf():
    
    rospy.init_node('publisher_of_tf_node', anonymous=True)
    robot_name_list = ["dahei"]
    gazebo_model_object = GazeboModel(robot_name_list)
    
    for robot_name in robot_name_list:
        pose_now = gazebo_model_object.get_model_pose(robot_name)
    
    # Leave time enough to be sure the Gazebo Model data is initalised
    time.sleep(1)
    rospy.loginfo("Ready..Starting to record now...")
    
    rate = rospy.Rate(5) # 5hz
    while not rospy.is_shutdown():
        for robot_name in robot_name_list:
            pose_now = gazebo_model_object.get_model_pose(robot_name)
            if not pose_now:
                print("The Pose is not yet"+str(robot_name)+" available...Please try again later")
            else:
                x.append(round(pose_now.position.x,3))
                y.append(round(pose_now.position.y,3))
                print("recording")  
        rate.sleep()
Example #7
0
    def __init__(self, model_to_track_list):

        # This are the models that we will generate information about.
        self.gz_model_obj = GazeboModel(model_to_track_list)
Example #8
0
def dataset_generator(models_of_interest_list,
                      models_to_be_list,
                      full_path_to_dataset_gen='./dataset_gen/',
                      number_of_dataset_elements=10,
                      frequency=1,
                      show_images=False,
                      env_change_settle_time=2.0,
                      move_arm=False,
                      move_time=2.0,
                      index_img=0):

    # We first wait for the service for RandomEnvironment change to be ready
    rospy.loginfo("Waiting for service /dynamic_world_service to be ready...")
    rospy.wait_for_service('/dynamic_world_service')
    rospy.loginfo("Service /dynamic_world_service READY")
    dynamic_world_service_call = rospy.ServiceProxy('/dynamic_world_service',
                                                    Empty)
    change_env_request = EmptyRequest()

    dynamic_world_service_call(change_env_request)

    # Init to move the robot arm
    move_arm_client_object = MoveArmClient()

    x_range = [0.35, 0.35]
    y_range = [0.1, 0.5]
    z_range = [0.64 + 0.3, 0.64 + 0.3]

    orientation_XYZW = [-0.707, 0.000, 0.707, 0.001]

    #Init Pose
    move_arm_client_object.go_to_safe_arm_pose()

    # Period show the images in miliseconds
    period = int((1 / frequency) * 1000)

    # We first check if the path given exists, if not its generated the directory
    if os.path.exists(full_path_to_dataset_gen):
        shutil.rmtree(full_path_to_dataset_gen)

    os.makedirs(full_path_to_dataset_gen)
    rospy.logfatal("Created folder=" + str(full_path_to_dataset_gen))

    # Init camera RGB object
    camera_topic = "/robot1_camera/rgb/image_raw"
    camera_info_topic = "/robot1_camera/rgb/camera_info"
    camera_obj = RGBCamera(camera_topic, camera_info_topic)

    # This are the models that we will generate information about.
    gz_model_obj = GazeboModel(models_to_be_list)

    # XML generator :
    xml_generator_obj = XMLGenerator(path_out=full_path_to_dataset_gen)

    # index to name the pictures :
    now = datetime.datetime.now()
    str_date = now.strftime("%Y_%m_%d_%H_%M_")

    # create the images folder :
    path_img_dir = os.path.join(full_path_to_dataset_gen, 'images/')

    if not os.path.exists(path_img_dir):
        os.makedirs(path_img_dir)

    # We generate as many dataset elements as it was indicated.
    for i in range(number_of_dataset_elements):

        # We Randomise Environment
        dynamic_world_service_call(change_env_request)
        rospy.logwarn("Waiting for new env to settle...")
        rospy.sleep(env_change_settle_time)
        rospy.logwarn("Waiting for new env to settle")

        # We move Arm to random pose
        if move_arm:
            rospy.logwarn("Moving to Random Pose TCP")
            move_arm_client_object.move_tcp_to_random_pose(
                x_range, y_range, z_range, orientation_XYZW)
            rospy.sleep(move_time)
            rospy.logwarn("Moving to Random Pose TCP DONE")
        else:
            move_time = 0.0

        estimated_time_of_completion = ((number_of_dataset_elements - i) *
                                        (env_change_settle_time +
                                         (period / 1000.0) + move_time)) / 60.0
        rospy.logwarn("Time Estimated of completion [min]==>" +
                      str(estimated_time_of_completion))

        # We take picture
        rospy.logerr("Takeing PICTURE")
        #image = camera_obj.getImage()
        image = camera_obj.get_latest_image()
        rospy.logerr("Takeing PICTURE...DONE")
        #cam_info = camera_obj.get_camera_info()
        cam_info = camera_obj.get_camera_info()

        if image is None:
            rospy.logwarn("Image value was none")
            image = numpy.zeros((cam_info.height, cam_info.width))

        filename = str_date + str(index_img)

        # We retrieve objects position
        pose_models_oi_dict = {}
        for model_name in models_of_interest_list:
            rospy.logdebug("model_name==>" + str(model_name))
            pose_now = gz_model_obj.get_model_pose(model_name)
            pose_models_oi_dict[model_name] = pose_now

        rospy.logdebug("ObjectsOfInterest poses==>" + str(pose_models_oi_dict))
        #raw_input("Press After Validate Position in simulation....")
        # We create the XML list tags for each Model of Interest captured before
        listobj = []
        for model_name, model_3dpose in pose_models_oi_dict.items():

            x_com = model_3dpose.position.x
            y_com = model_3dpose.position.y
            z_com = model_3dpose.position.z
            quat_x = model_3dpose.orientation.x
            quat_y = model_3dpose.orientation.y
            quat_z = model_3dpose.orientation.z
            quat_w = model_3dpose.orientation.w

            pose3d = [x_com, y_com, z_com, quat_x, quat_y, quat_z, quat_w]
            listobj.append(XMLObjectTags(name=model_name, pose3d=pose3d))

        image_format_extension = ".png"
        xml_generator_obj.generate(object_tags=listobj,
                                   filename=filename,
                                   extension_file=image_format_extension,
                                   camera_width=cam_info.width,
                                   camera_height=cam_info.height)

        index_img += 1

        file_name_ext = filename + image_format_extension
        path_img_file = os.path.join(path_img_dir, file_name_ext)

        cv2.imwrite(path_img_file, image)

        if show_images:
            cv2.imshow(filename, image)
            cv2.waitKey(period)
            cv2.destroyAllWindows()
Example #9
0
class MoveModel(object):
    def __init__(self, model_name, init_coordinates):
        self.found_coordinates = False
        self._model_name = model_name
        self._model_coordinates = init_coordinates
        self.current_cmd_speed = Twist()
        self.g_pause = rospy.ServiceProxy("/gazebo/pause_physics", EmptySrv)
        self.g_unpause = rospy.ServiceProxy("/gazebo/unpause_physics",
                                            EmptySrv)
        self.g_set_state = rospy.ServiceProxy("/gazebo/set_model_state",
                                              SetModelState)
        rospy.Subscriber(model_name + "/cmd_vel", Twist, self.move_callback)

        self.gz_model = GazeboModel(robots_name_list=[model_name])

        self._trajectory = Trajectory(num_points=100,
                                      type="circle",
                                      radius=3.0,
                                      height=1.0)

    def move_callback(self, msg):
        self.current_cmd_speed = msg

    def move_model(self, coordinates_to_move_to, pause_physics=False):

        if pause_physics:
            rospy.loginfo("Start Pausing Physics")
            rospy.wait_for_service("/gazebo/pause_physics")
            rospy.loginfo("Pausing physics")
            try:
                self.g_pause()
            except Exception as e:
                rospy.logerr('Error on calling service: %s', str(e))

        #rospy.loginfo("Filling Pose Data")
        pose = Pose()

        pose.position.x = coordinates_to_move_to.x
        pose.position.y = coordinates_to_move_to.y
        pose.position.z = coordinates_to_move_to.z

        quaternion = tf.transformations.quaternion_from_euler(
            coordinates_to_move_to.roll, coordinates_to_move_to.pitch,
            coordinates_to_move_to.yaw)
        pose.orientation.x = quaternion[0]
        pose.orientation.y = quaternion[1]
        pose.orientation.z = quaternion[2]
        pose.orientation.w = quaternion[3]

        # We set twist to Null to remove any prior movements
        twist = Twist()
        linear = Vector3()
        angular = Vector3()

        linear.x = 0.0
        linear.y = 0.0
        linear.z = 0.0

        angular.x = 0.0
        angular.y = 0.0
        angular.z = 0.0

        twist.linear = linear
        twist.angular = angular

        state = ModelState()

        state.model_name = self._model_name
        state.pose = pose
        state.twist = twist

        #print state
        #rospy.loginfo("Moving model = " + str(self._model_name))
        try:
            ret = self.g_set_state(state)
            #print ret.status_message
        except Exception as e:
            rospy.logerr('Error on calling service: %s', str(e))

        if pause_physics:
            rospy.loginfo("Unpausing physics")
            try:
                self.g_unpause()
            except Exception as e:
                rospy.logerr('Error on calling service: %s', str(e))

    def move_step_trajectory(self):
        coordinates_to_move_to = self._trajectory.step_trajectory(loop=True)
        self.move_model(coordinates_to_move_to)

    def calculate_coord_for_speed(self, publish_rate, ball=False):
        """
        Gets the current position of the model and adds the increment based on the Publish rate
        """
        pose_now = self.gz_model.get_model_pose(robot_name=self._model_name)
        if pose_now:
            # space_increment = speed * Frequency

            z_increment = self.current_cmd_speed.angular.z * (1.0 /
                                                              publish_rate)

            explicit_quat = [
                pose_now.orientation.x, pose_now.orientation.y,
                pose_now.orientation.z, pose_now.orientation.w
            ]
            pose_now_euler = tf.transformations.euler_from_quaternion(
                explicit_quat)

            if ball:

                roll = 0.0
                pitch = 0.0
                yaw = 1.6

                planar_increment = self.current_cmd_speed.linear.x * \
                    (1.0/publish_rate)
                x_yaw_comp = math.cos(yaw)
                y_yaw_comp = math.sin(yaw)

                elevation_increment = self.current_cmd_speed.linear.z * \
                    (1.0/publish_rate)
                #quaternion = tf.transformations.quaternion_from_euler(roll,pitch,yaw)

                coordinates_to_move_to = Coordinates(
                    x=(pose_now.position.x + planar_increment),
                    y=(pose_now.position.y + z_increment),
                    z=(pose_now.position.z + elevation_increment),
                    roll=roll,
                    pitch=pitch,
                    yaw=yaw)
            else:

                roll = pose_now_euler[0]
                pitch = pose_now_euler[1]
                yaw = pose_now_euler[2] + z_increment

                planar_increment = self.current_cmd_speed.linear.x * \
                    (1.0/publish_rate)
                x_yaw_comp = math.cos(yaw)
                y_yaw_comp = math.sin(yaw)

                elevation_increment = self.current_cmd_speed.linear.z * \
                    (1.0/publish_rate)
                #quaternion = tf.transformations.quaternion_from_euler(roll,pitch,yaw)

                coordinates_to_move_to = Coordinates(
                    x=(pose_now.position.x + planar_increment * x_yaw_comp),
                    y=(pose_now.position.y + planar_increment * y_yaw_comp),
                    z=(pose_now.position.z + elevation_increment),
                    roll=roll,
                    pitch=pitch,
                    yaw=yaw)
        else:
            coordinates_to_move_to = None

        return coordinates_to_move_to

    def move_step_speed(self, publish_rate):
        coordinates_to_move_to = self.calculate_coord_for_speed(publish_rate)
        if coordinates_to_move_to:
            self.move_model(coordinates_to_move_to)
            if not self.found_coordinates:
                rospy.loginfo("Coordinates Available...")
                self.found_coordinates = True
        else:
            rospy.logwarn("No Coordinates available yet...")

    def move_ball_step_speed(self, publish_rate):
        """
        We move based on the cmd vel making the object move without orientation change
        """
        coordinates_to_move_to = self.calculate_coord_for_speed(
            publish_rate, True)
        if coordinates_to_move_to:
            self.move_model(coordinates_to_move_to)
            if not self.found_coordinates:
                rospy.loginfo("Coordinates Available...")
                self.found_coordinates = True
        else:
            rospy.logwarn("No Coordinates available yet...")
Example #10
0
class FetchDCNN():
    def __init__(self, environment_name, weight_file_name, image_size, ALPHA,
                 grasp_activated, number_of_elements_to_be_output):

        self._image_size = image_size
        self._number_of_elements_to_be_output = number_of_elements_to_be_output

        self.init_rviz_markers()

        # Init service to move fetch, this is because moveit doenst work in python 3
        self._grasp_activated = grasp_activated
        if self._grasp_activated == True:
            self.fetch_move_client = MoveFetchClient()
            self.fetch_move_client.go_to_safe_arm_pose()

        # Init camera RGB object
        self.rgb_camera_object = RGBCamera("/dynamic_objects/camera/raw_image")

        # This are the models that we will generate information about.
        self.model_to_track_name = "demo_spam1"
        self.table_to_track_name = "demo_table1"

        model_to_track_list = [
            self.model_to_track_name, self.table_to_track_name
        ]
        self.gz_model_obj = GazeboModel(model_to_track_list)

        # We start the model in Keras
        self.model = create_model(self._image_size, ALPHA,
                                  self._number_of_elements_to_be_output)

        rospack = rospkg.RosPack()
        # get the file path for rospy_tutorials
        path_to_package = rospack.get_path('my_dcnn_training_pkg')
        models_weight_checkpoints_folder = os.path.join(path_to_package, "bk")
        model_file_path = os.path.join(models_weight_checkpoints_folder,
                                       weight_file_name)

        print(model_file_path)

        self.model.load_weights(model_file_path)

        self.testing_unscaled_img_folder = os.path.join(
            path_to_package, "testing/dataset_gen/images")
        self.testing_unscaled_anotations_folder = os.path.join(
            path_to_package, "testing/dataset_gen_annotations")

        # We reset the environent to a random state
        print("Starting Service to Reset World Randomly....")
        self.dynamic_world_service_call = rospy.ServiceProxy(
            '/dynamic_world_service', Empty)
        self.change_env_request = EmptyRequest()
        self.dynamic_world_service_call(self.change_env_request)
        print("Starting Service to Reset World Randomly....DONE")

    def init_rviz_markers(self):
        """
        We initialise the markers used to visualise the predictions vs the 
        real data.
        """
        # We initialise the Markers for Center Of Mass estimation and the Object real position
        marker_type = "cube"
        namespace = "demo_table"
        mesh_package_path = ""
        self.demo_table_position = MarkerBasics(
            type=marker_type,
            namespace=namespace,
            index=0,
            red=0.0,
            green=0.0,
            blue=1.0,
            alfa=1.0,
            scale=[0.6, 0.6, 0.6],
            mesh_package_path=mesh_package_path)

        marker_type = "mesh"
        namespace = "spam_real_position"
        mesh_package_path = "dynamic_objects/models/demo_spam/meshes/nontextured_fixed.stl"

        self.spam_real_position = MarkerBasics(
            type=marker_type,
            namespace=namespace,
            index=0,
            red=1.0,
            green=0.0,
            blue=0.0,
            alfa=1.0,
            scale=[1.0, 1.0, 1.0],
            mesh_package_path=mesh_package_path)

        marker_type = "sphere"
        mesh_package_path = ""
        namespace = "com_prediction"
        self.com_prediction_marker = MarkerBasics(
            type=marker_type,
            namespace=namespace,
            index=0,
            red=0.0,
            green=1.0,
            blue=0.0,
            alfa=1.0,
            scale=[0.1, 0.1, 0.1],
            mesh_package_path=mesh_package_path)

        #self.pick_object_menu = PickObjectMenu()

    def publish_markers_new_data(self, pred, reality, reality_table):

        spam_real_pose = Pose()
        spam_real_pose.position.x = reality[0]
        spam_real_pose.position.y = reality[1]
        spam_real_pose.position.z = reality[2]

        com_prediction_pose = Pose()
        com_prediction_pose.position.x = pred[0]
        com_prediction_pose.position.y = pred[1]
        com_prediction_pose.position.z = pred[2]

        demo_table_pose = Pose()
        demo_table_pose.position.x = reality_table[0]
        demo_table_pose.position.y = reality_table[1]
        demo_table_pose.position.z = reality_table[2]

        self.spam_real_position.publish_marker(spam_real_pose)
        self.com_prediction_marker.publish_marker(com_prediction_pose)
        self.demo_table_position.publish_marker(demo_table_pose)

    """ 
    def publish_new_grasp_state(self, grasp_state):
        
        if (grasp_state == "grasp_success"):
            print(">>>>grasp_success....")
            self.pick_object_menu.update_menu(index=1)
        elif (grasp_state == "grasp_fail"):
            print(">>>>grasp_fail....")
            self.pick_object_menu.update_menu(index=2)
        else:
            print(">>>>nothing....")
            self.pick_object_menu.update_menu(index=0)
    """

    def predict_image(self, model, cv2_img=None, path=None):

        if cv2_img.all() != None:
            im = cv2_img
        else:
            if path != None:
                im = cv2.imread(path)
            else:
                print("Predict Image had no path image or image CV2")
                return None

        if im.shape[0] != self._image_size:
            im = cv2.resize(im, (self._image_size, self._image_size))
            """
            self.rgb_camera_object.display_image(   image_display=im,
                                                    life_time_ms=50,
                                                    name="ResizedCAM"
                                                )
            """

        image = np.array(im, dtype='f')
        image = preprocess_input(image)

        self.rgb_camera_object.display_image(image_display=image,
                                             life_time_ms=50,
                                             name="ImagePredict")

        prediction = model.predict(x=np.array([image]))[0]

        return prediction

    def show_image(self, path, wait_time=500):
        image = cv2.imread(path)
        cv2.imshow("image", image)
        print("Waiting " + str(wait_time) + "ms...")
        cv2.waitKey(wait_time)
        print("Waiting " + str(wait_time) + "ms...END")
        cv2.destroyAllWindows()

    def get_xyz_from_xml(self, path_xml_file):

        tree = ET.parse(path_xml_file)

        x_com = float(tree.findtext("./object/pose3d/x_com"))
        y_com = float(tree.findtext("./object/pose3d/y_com"))
        z_com = float(tree.findtext("./object/pose3d/z_com"))

        return [x_com, y_com, z_com]

    def get_xyz_from_world(self, model_name):
        """
        Retrieves the position of an object from the world
        """
        pose_now = self.gz_model_obj.get_model_pose(model_name)

        XYZ = [pose_now.position.x, pose_now.position.y, pose_now.position.z]

        return XYZ

    def start_prediction_test(self):

        print("\nTrying out unscaled image")
        for k in os.listdir(self.testing_unscaled_img_folder):
            print("Name File==>" + str(k))

            rospy.logwarn("We Reset Simulation")

            init_joints_config = [0.0] * 7
            self.move_joints(init_joints_config)

            if "png" in k:

                img_path = os.path.join(self.testing_unscaled_img_folder, k)
                pred = self.predict_image(img_path, self.model)

                base_name_file = os.path.splitext(k)[0]
                annotation_file = base_name_file + ".xml"
                img_anotation_path = os.path.join(
                    self.testing_unscaled_anotations_folder, annotation_file)
                reality = self.get_xyz_from_xml(img_anotation_path)

                print("Class Prediction=>" + str(pred))
                print("Class Reality File Image=>" + str(reality))

                self.publish_markers_new_data(pred, reality)

                self.show_image(img_path, wait_time=5000)

                if self._grasp_activated == True:
                    self.start_grasp_sequence(pred)

        rospy.loginfo("Start Prediction DONE...")

    def start_camera_rgb_prediction(self,
                                    number_of_tests=1,
                                    camera_period=5.0,
                                    wait_reset_period=3.0,
                                    go_dustbin=False):

        for i in range(number_of_tests):
            print("Number of Image=>" + str(i))

            self.dynamic_world_service_call(self.change_env_request)
            print("Waiting for Reset Env to settle=>")
            rospy.sleep(wait_reset_period)
            print("Waiting for Reset Env to settle...DONE")

            cv2_img = self.rgb_camera_object.get_latest_image()
            pred = self.predict_image(self.model, cv2_img)

            # Add Z Axis value
            z_value = 0.6 + 0.042
            pred_mod = [pred[0], pred[1], z_value]

            reality = self.get_xyz_from_world(self.model_to_track_name)
            reality_table = self.get_xyz_from_world(self.table_to_track_name)

            print("Class Prediction=>" + str(pred))
            print("Class Prediction Corrected=>" + str(pred_mod))
            print("Class Reality SIMULATION=>" + str(reality))
            print("Class RealityTabel SIMULATION=>" + str(reality_table))
            self.publish_markers_new_data(pred_mod, reality, reality_table)

            #self.rgb_camera_object.display_latest_image(int(camera_period*1000))

            if self._grasp_activated == True:
                if go_dustbin:
                    self.start_grasp_sequence_leave_dustbin(pred_mod)
                else:
                    self.start_grasp_sequence(pred_mod)

        rospy.loginfo("Start Prediction DONE...")

    def start_camera_rgb_prediction_continuous(self, image_freq=20.0):
        """
        It continuously make predictions
        """
        # We reset the world Once
        self.dynamic_world_service_call(self.change_env_request)
        print("Waiting for Reset Env to settle=>")
        wait_reset_period = 3.0
        rospy.sleep(wait_reset_period)
        print("Waiting for Reset Env to settle...DONE")

        rate = rospy.Rate(image_freq)
        life_time_ms = int((1.0 / image_freq) * 1000)
        while not rospy.is_shutdown():

            cv2_img = self.rgb_camera_object.get_latest_image()

            self.rgb_camera_object.display_image(image_display=cv2_img,
                                                 life_time_ms=life_time_ms)

            pred = self.predict_image(self.model, cv2_img)
            # Add Z Axis value
            z_value = 0.6 + 0.042
            pred_mod = [pred[0], pred[1], z_value]

            reality = self.get_xyz_from_world(self.model_to_track_name)
            reality_table = self.get_xyz_from_world(self.table_to_track_name)

            print("Class Prediction=>" + str(pred))
            print("Class PredictionMod=>" + str(pred_mod))
            print("Class Reality SIMULATION=>" + str(reality))
            self.publish_markers_new_data(pred_mod, reality, reality_table)
            rate.sleep()

        rospy.loginfo("Start Prediction DONE...")

    def move_endeffector(self, position_XYZ):
        """
        Move the EndEffector to the position given, orientation set
        """
        rospy.logwarn("START Move ENd Effector")

        orientation_XYZW = [-0.707, 0.000, 0.707, 0.001]

        result = self.fetch_move_client.move_endeffector(
            position_XYZ, orientation_XYZW)

        rospy.logwarn("END Move End Effector, result=" + str(result))

        return result

    def move_joints(self, joints_config):
        """
        Move the joints to the specified configuration.
        """
        rospy.logwarn("START Move Joints")

        result = self.fetch_move_client.move_joints(joints_config)

        rospy.logwarn("END Move Joints, result=" + str(result))

        return result

    def open_close_gripper(self, open_or_close):
        """
        Choose if you want to open or close
        """
        if open_or_close == "open":
            # Open
            gripper_x = 0.4
            max_effort = 10.0
        else:
            # Close
            gripper_x = 0.05
            max_effort = 20.0

        self.fetch_move_client.move_gripper(gripper_x, max_effort)

    def check_if_object_grasped(self, tcp_xyz, wait_time=2.0, max_delta=0.2):
        """
        It checks if the object to graps has been lifted from the table
        """

        model_to_track_reality = self.get_xyz_from_world(
            self.model_to_track_name)

        print("model_to_track_reality=>" + str(model_to_track_reality))
        print("tcp_xyz=>" + str(tcp_xyz))

        z_model = model_to_track_reality[2]
        z_tcp = tcp_xyz[2]

        delta = z_tcp - z_model

        rospy.logwarn("delta==" + str(delta) + " <= " + str(max_delta))

        if delta <= max_delta:
            grasp_state = "grasp_success"
        else:
            grasp_state = "grasp_fail"

        #self.publish_new_grasp_state(grasp_state)

        rospy.sleep(wait_time)

        return grasp_state == "grasp_success"

    def start_grasp_sequence(self, predicted_position_XYZ):

        #Init Pose
        self.fetch_move_client.go_to_safe_arm_pose()

        # Set optimum torso height
        height = 0.2
        result = self.fetch_move_client.move_torso(height)

        height_delta = 0.3
        position_XYZ = [
            predicted_position_XYZ[0], predicted_position_XYZ[1],
            predicted_position_XYZ[2] + height_delta
        ]
        orientation_XYZW = [-0.707, 0.000, 0.707, 0.001]
        self.fetch_move_client.move_endeffector(position_XYZ, orientation_XYZW)

        # Test Gripper
        self.open_close_gripper(open_or_close="open")
        self.open_close_gripper(open_or_close="close")
        self.open_close_gripper(open_or_close="open")

        # Lower ARM
        height_delta = 0.19
        position_XYZ = [
            predicted_position_XYZ[0], predicted_position_XYZ[1],
            predicted_position_XYZ[2] + height_delta
        ]
        orientation_XYZW = [-0.707, 0.000, 0.707, 0.001]
        self.fetch_move_client.move_endeffector(position_XYZ, orientation_XYZW)

        # Close Gripper to grasp
        self.open_close_gripper(open_or_close="close")

        # Go Up with the object
        height_delta = 0.3
        position_XYZ = [
            predicted_position_XYZ[0], predicted_position_XYZ[1],
            predicted_position_XYZ[2] + height_delta
        ]
        orientation_XYZW = [-0.707, 0.000, 0.707, 0.001]
        self.fetch_move_client.move_endeffector(position_XYZ, orientation_XYZW)

        # We check if we grapsed the object
        self.check_if_object_grasped(tcp_xyz=position_XYZ)

        # Go down with the object
        height_delta = 0.19
        position_XYZ = [
            predicted_position_XYZ[0], predicted_position_XYZ[1],
            predicted_position_XYZ[2] + height_delta
        ]
        orientation_XYZW = [-0.707, 0.000, 0.707, 0.001]
        self.fetch_move_client.move_endeffector(position_XYZ, orientation_XYZW)

        # Release the object
        self.open_close_gripper(open_or_close="open")

        # We reset the Graps Marker
        #self.publish_new_grasp_state(grasp_state="nothing")

        # Up again
        height_delta = 0.3
        position_XYZ = [
            predicted_position_XYZ[0], predicted_position_XYZ[1],
            predicted_position_XYZ[2] + height_delta
        ]
        orientation_XYZW = [-0.707, 0.000, 0.707, 0.001]
        self.fetch_move_client.move_endeffector(position_XYZ, orientation_XYZW)

        # Go to Init Safe pose
        self.fetch_move_client.go_to_safe_arm_pose()

    def start_grasp_sequence_leave_dustbin(self, predicted_position_XYZ):

        #Init Pose
        self.fetch_move_client.go_to_safe_arm_pose()

        # Set optimum torso height
        height = 0.2
        result = self.fetch_move_client.move_torso(height)

        height_delta = 0.3
        position_XYZ = [
            predicted_position_XYZ[0], predicted_position_XYZ[1],
            predicted_position_XYZ[2] + height_delta
        ]
        orientation_XYZW = [-0.707, 0.000, 0.707, 0.001]
        self.fetch_move_client.move_endeffector(position_XYZ, orientation_XYZW)

        # Test Gripper
        self.open_close_gripper(open_or_close="open")
        self.open_close_gripper(open_or_close="close")
        self.open_close_gripper(open_or_close="open")

        # Lower ARM
        height_delta = 0.19
        position_XYZ = [
            predicted_position_XYZ[0], predicted_position_XYZ[1],
            predicted_position_XYZ[2] + height_delta
        ]
        orientation_XYZW = [-0.707, 0.000, 0.707, 0.001]
        self.fetch_move_client.move_endeffector(position_XYZ, orientation_XYZW)

        # Close Gripper to grasp
        self.open_close_gripper(open_or_close="close")

        # Go Up with the object
        height_delta = 0.3
        position_XYZ = [
            predicted_position_XYZ[0], predicted_position_XYZ[1],
            predicted_position_XYZ[2] + height_delta
        ]
        orientation_XYZW = [-0.707, 0.000, 0.707, 0.001]
        self.fetch_move_client.move_endeffector(position_XYZ, orientation_XYZW)

        # We check if we grapsed the object
        grasp_success = self.check_if_object_grasped(tcp_xyz=position_XYZ)

        if grasp_success:
            # The Graps Succeeeded We place object in DustBin
            # Move to dustbin Pose
            self.fetch_move_client.go_to_dustbin_arm_pose()

        # Release the object
        self.open_close_gripper(open_or_close="open")

        # We reset the Graps Marker
        #self.publish_new_grasp_state(grasp_state="nothing")

        # Go to Init Safe pose
        self.fetch_move_client.go_to_safe_arm_pose()