示例#1
0
def Safe_To_Cross(Image):
    global time_1
    global time_2
    Im = Image.copy()
    Stop()
    Is_safe = False
    Im[Im < 200] = 0
    Im = Im[300:400, 750:900]
    # cv2.imshow('im', Image)
    # cv2.waitKey(25)
    print(np.sum(Im))
    if (np.sum(Im) > 0):
        Is_safe = True

    if (Is_safe):
        Drive(0.2, 0)
        time.sleep(0.5)
        time_1 = time.time()
        time_2 = time.time()

    return Is_safe
class transport_process():
    def __init__(self):
        # Instantiate CvBridge
        self.bridge = CvBridge()

        # Init Listener for tf-transformation
        self.tfListener = tf.TransformListener()
        self.tfBroadcaster = tf.TransformBroadcaster()

        self.ur5 = ur5_control.ur5Controler("gripper", "/base_link", False)
        self.gripper = gripper_control.gripper()

        self.reset_poses()
        self.rgb_img = Image()
        self.poseIsUpdated_carrier = False
        self.poseIsUpdated_holder = False

        self.showObjPose = Pose()
        self.showObjPose.position.z = -0.03
        self.showObjPose.position.y = -0.045
        self.showObjPose.orientation.y = -0.707
        self.showObjPose.orientation.w = 0.707

        self.publishResized = True

        self.imgOutputSize = rospy.get_param("outputImage_size")
        self.imgWidth = rospy.get_param("camera_width")
        self.imgHeight = rospy.get_param("camera_height")
        self.img_scaleFac = float(self.imgHeight) / self.imgOutputSize
        self.resized_imgWidth = int(
            round(float(self.imgWidth) / self.img_scaleFac, 0))
        self.resized_img_horizontalStart = int(
            round(self.resized_imgWidth / 2., 0)) - self.imgOutputSize / 2

        # Subscriber to pose update
        rospy.Subscriber("/dope/pose_update_carrier", Bool,
                         self.pose_update_carrier_callback)
        rospy.Subscriber("/dope/pose_update_holder", Bool,
                         self.pose_update_holder_callback)
        rospy.Subscriber("/camera/color/image_raw", Image,
                         self.rgb_image_callback)  # RGB-Image
        self.rawImgPub = rospy.Publisher("/dope/camera_images",
                                         Image,
                                         queue_size=10)
        self.hasGraspedPub = rospy.Publisher(
            "/dope/has_grasped", Bool, queue_size=10
        )  # Publish Signal so pose is no more published to tf by other program
        self.hasPutPub = rospy.Publisher(
            "/dope/has_put", Bool, queue_size=10
        )  # Publish Signal so pose is no more published to tf by other program

        rospy.sleep(1)  # Wait to register at tf

    def reset_poses(self):
        self.graspPose = Pose()
        self.preGraspPose = Pose()
        self.putPose = Pose()
        self.prePutPose = Pose()
        self.postPutPose = Pose()

    def rgb_image_callback(self, data):
        try:
            self.img_raw = data
            cv_rgb_image = self.bridge.imgmsg_to_cv2(data, "bgr8")
            # Store image locally and resize it
            self.rgb_img = cv_rgb_image.copy()
            self.rgb_resize()
        except CvBridgeError as e:
            print(e)

    # Resize the RGB-image to be a square image of output-size
    def rgb_resize(self):
        # Copy image
        rgb_img_resized = self.rgb_img.copy()
        # Change scale so 720px become 400px
        rgb_img_resized = cv2.resize(
            rgb_img_resized, (self.resized_imgWidth, self.imgOutputSize),
            interpolation=cv2.INTER_AREA)
        # Cut off pixels at left and right to make image squared
        self.rgb_img_resized = rgb_img_resized[
            0:self.imgOutputSize,
            self.resized_img_horizontalStart:self.resized_img_horizontalStart +
            self.imgOutputSize]
        #print len(img), len(img[0])
        #output = self.rgb_img_resized
        #cv2.imshow("Image-Stream", self.rgb_img_resized)
        #cv2.waitKey(1)

    # Get the last grasp-pose, if a new pose has been sent to tf
    def pose_update_carrier_callback(self, data):
        self.update_grasp_pose()

    # Get the last put-pose, if a new pose has been sent to tf
    def pose_update_holder_callback(self, data):
        self.update_put_pose()

    def get_pose(self, fromF, toF):
        try:
            (trans,
             rot) = self.tfListener.lookupTransform(fromF, toF, rospy.Time(0))
            return self.listToPose(trans, rot)
        except (tf.LookupException, tf.ConnectivityException,
                tf.ExtrapolationException) as e:
            rospy.logerr(e)

    # Get transformation between base and grasp-point
    def update_grasp_pose(self):
        rospy.sleep(0.1)
        self.graspPose = self.get_pose('/base_link', 'dope_grasp_pose_carrier')
        self.preGraspPose = self.get_pose('/base_link',
                                          'dope_grasp_pose_carrier_pre')
        self.poseIsUpdated_carrier = True

    def update_put_pose(self):
        rospy.sleep(0.1)
        self.putPose = self.get_pose('/base_link', 'dope_put_pose_holder')
        self.prePutPose = self.get_pose('/base_link',
                                        'dope_put_pose_holder_pre')
        self.postPutPose = self.get_pose('/base_link',
                                         'dope_put_pose_holder_post')
        self.poseIsUpdated_holder = True

    def publish_image(self):
        self.poseIsUpdated_carrier = False
        self.poseIsUpdated_holder = False
        if self.publishResized == False:
            self.rawImgPub.publish(self.img_raw)
        else:
            self.rawImgPub.publish(
                self.bridge.cv2_to_imgmsg(self.rgb_img_resized, "bgr8"))

    def prepare_grasp(self):
        actJointValues = self.ur5.group.get_current_joint_values()
        # Pre-position last joints so robot does not jeopardize environment
        self.ur5.execute_move([
            actJointValues[0], actJointValues[1], actJointValues[2],
            -240 * pi / 180, -85 * pi / 180, 0 * pi / 180
        ])
        actJointValues = self.ur5.group.get_current_joint_values()
        self.ur5.execute_move([
            actJointValues[0], -90 * pi / 180, 150 * pi / 180,
            actJointValues[3], actJointValues[4], actJointValues[5]
        ])

    def prepare_put(self, side):
        print "Moving joints in put-configuration..."
        if side == "front":
            r1_angle = 0
        elif side == "left":
            r1_angle = 90
        elif side == "right":
            r1_angle = -90
        actJointValues = self.ur5.group.get_current_joint_values()
        self.ur5.execute_move([
            r1_angle * pi / 180, actJointValues[1], actJointValues[2],
            actJointValues[3], actJointValues[4], actJointValues[5]
        ])

    def make_grasp(self):
        print "Moving joints in grasp-configuration..."
        self.prepare_grasp()
        print "Driving to object"
        self.ur5.move_to_pose(self.preGraspPose)
        self.ur5.move_to_pose(self.graspPose)

        ##### Close the gripper to grasp object
        self.gripper.close()
        self.ur5.attachObjectToEEF()
        rospy.sleep(5)
        if self.gripper.hasGripped() == True:
            print "Successfully grasped object!"
        else:
            print "Error grasping object!"
            return False
        self.hasGraspedPub.publish(Bool(True))
        #rospy.sleep(0.5)
        return True

    def put_down(self, side):
        self.prepare_put(side)
        print "Driving to put-down-position"
        self.ur5.move_to_pose(self.prePutPose)
        self.ur5.move_to_pose(self.putPose)

        ##### Open the gripper
        self.gripper.open()
        self.ur5.removeAttachedObject()
        rospy.sleep(5)
        self.hasPutPub.publish(Bool(True))
        #self.ur5.removeAttachedObject()
        return True

    def store(self):
        actJointValues = self.ur5.group.get_current_joint_values()
        # Einfahren
        self.ur5.execute_move([
            actJointValues[0], -71 * pi / 180, 153 * pi / 180, -263 * pi / 180,
            -90 * pi / 180, 0 * pi / 180
        ])
        # Drehen
        actJointValues = self.ur5.group.get_current_joint_values()
        self.ur5.execute_move([
            90 * pi / 180, actJointValues[1], actJointValues[2],
            actJointValues[3], actJointValues[4], actJointValues[5]
        ])
        # Ausfahren
        self.ur5.execute_move([
            90 * pi / 180, -53 * pi / 180, 117 * pi / 180, -244 * pi / 180,
            -90 * pi / 180, 0 * pi / 180
        ])
        # Vorletzte Achse drehen
        actJointValues = self.ur5.group.get_current_joint_values()
        self.ur5.execute_move([
            actJointValues[0], actJointValues[1], actJointValues[2],
            actJointValues[3], 0 * pi / 180, actJointValues[5]
        ])
        # Drive over position
        self.ur5.execute_move([
            121 * pi / 180, -49 * pi / 180, 106 * pi / 180, -240 * pi / 180,
            57 * pi / 180, 0 * pi / 180
        ])
        # Drive to put down position
        self.ur5.execute_move([
            121 * pi / 180, -41 * pi / 180, 106 * pi / 180, -248 * pi / 180,
            57 * pi / 180, 0 * pi / 180
        ])

        ##### Open the gripper
        self.gripper.open()
        self.ur5.removeAttachedObject()
        rospy.sleep(5)
        self.hasPutPub.publish(Bool(True))
        #self.ur5.removeAttachedObject()

        # Vorletzte Achse drehen
        self.ur5.execute_move([
            121 * pi / 180, -41 * pi / 180, 106 * pi / 180, -248 * pi / 180,
            35 * pi / 180, 0 * pi / 180
        ])

    def unstore(self):
        # In Nebenposition bewegen
        self.ur5.execute_move([
            121 * pi / 180, -41 * pi / 180, 106 * pi / 180, -248 * pi / 180,
            35 * pi / 180, 0 * pi / 180
        ])
        # Hineinschwenken
        self.ur5.execute_move([
            121 * pi / 180, -41 * pi / 180, 106 * pi / 180, -248 * pi / 180,
            57 * pi / 180, 0 * pi / 180
        ])

        self.gripper.close()
        self.ur5.attachObjectToEEF()
        rospy.sleep(5)
        if self.gripper.hasGripped() == True:
            print "Successfully grasped object!"
        else:
            print "Error grasping object!"
            return False
        self.hasGraspedPub.publish(Bool(True))

        # lift
        self.ur5.move_xyz(0, 0, 0.05)
        # turn
        self.ur5.execute_move([
            90 * pi / 180, -53 * pi / 180, 117 * pi / 180, -244 * pi / 180,
            0 * pi / 180, 0 * pi / 180
        ])
        # Vorletzte Achse drehen
        actJointValues = self.ur5.group.get_current_joint_values()
        self.ur5.execute_move([
            actJointValues[0], actJointValues[1], actJointValues[2],
            actJointValues[3], -90 * pi / 180, actJointValues[5]
        ])

        return True

    def move_and_publish(self, jointID, angle):
        self.ur5.move_joint(jointID, angle)
        rospy.sleep(0.1)
        self.publish_image()
        timeout = time.time() + 1.5
        # Wait until updated pose arrives or timeout occurs (pose not visible).
        # publish_image sets both poseIsUpdated to False. If an object is detected it is set True in the according callback and this function returns True
        while self.poseIsUpdated_carrier == False and self.poseIsUpdated_holder == False:
            rospy.sleep(0.05)
            if time.time() >= timeout:
                print "Object not detectable"
                return False
        return True

    def refine_pose(self):
        print "Refining pose..."
        self.move_and_publish(5, 20)
        self.move_and_publish(5, -40)
        self.ur5.move_joint(5, 20)  # move back to base position
        self.move_and_publish(4, 10)
        self.move_and_publish(4, -20)

    # Convert lists to pose-obect so a standard pose message can be published
    def listToPose(self, trans, rot):
        pose = Pose()
        pose.position.x = trans[0]
        pose.position.y = trans[1]
        pose.position.z = trans[2]
        pose.orientation.x = rot[0]
        pose.orientation.y = rot[1]
        pose.orientation.z = rot[2]
        pose.orientation.w = rot[3]
        return pose

    def copyPose(self, pose):
        newPose = Pose()
        newPose.position.x = pose.position.x
        newPose.position.y = pose.position.y
        newPose.position.z = pose.position.z
        newPose.orientation.x = pose.orientation.x
        newPose.orientation.y = pose.orientation.y
        newPose.orientation.z = pose.orientation.z
        newPose.orientation.w = pose.orientation.w
        return newPose
示例#3
0
def gate_finder(image):

    try:
        img = bridge.imgmsg_to_cv2(image, "bgr8")
    except CvBridgeError as e:
        print(e)

    ## Split the H channel in HSV, and get the saturation channel
    hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
    h, s, v = cv2.split(hsv)

    sides = side_sat(s)

    #bounding_boxes contains box2D objects, have ( center (x,y), (width, height), angle of rotation )?
    #[] - bottom left
    #[] - top right
    #[] - top left
    #[] - bottom right
    bounding_boxes = find_sides(sides)

    if len(bounding_boxes) > 1:

        #get rid of negatives
        for i in range(len(bounding_boxes)):
            for j in range(len(bounding_boxes[i])):
                for k in range(len(bounding_boxes[i][j])):
                    if bounding_boxes[i][j][k] < 0:
                        bounding_boxes[i][j][k] = 0

        #subtract bottom left from top left for each bounding box:
        diff1 = abs(bounding_boxes[0][2] - bounding_boxes[0][0])
        diff2 = abs(bounding_boxes[1][2] - bounding_boxes[1][0])

        # if difference in x > diff in y delete (horizontal rectangle)
        if diff1[0] > diff1[1]:
            bounding_boxes.pop(0)
        if diff2[0] > diff2[1]:
            bounding_boxes.pop(1)

#find halfway pt between midpts

        if len(bounding_boxes) > 1:

            #find midpts (x)
            #average top, bottom midpoints
            top_mid = (bounding_boxes[0][1][0] + bounding_boxes[0][2][0]) / 2
            bot_mid = (bounding_boxes[0][0][0] + bounding_boxes[0][3][0]) / 2
            mid0 = (top_mid + bot_mid) / 2

            top_mid = (bounding_boxes[1][1][0] + bounding_boxes[1][2][0]) / 2
            bot_mid = (bounding_boxes[1][0][0] + bounding_boxes[1][3][0]) / 2
            mid1 = (top_mid + bot_mid) / 2

            image_mid = (mid0 + mid1) / 2
            mid0 = int(mid0)
            mid1 = int(mid1)
            image_mid = int(image_mid)

            output = img.copy()
            #Draw on image:
            if bounding_boxes is not None:
                for box in bounding_boxes:
                    cv2.drawContours(output, [box], 0, (0, 0, 255), 2)

            cv2.line(output, (mid1, 0), (mid1, 800), (0, 255, 0))
            cv2.line(output, (mid0, 0), (mid0, 800), (0, 255, 0))
            cv2.line(output, (image_mid, 0), (image_mid, 800), (0, 255, 0))

            print("boutta show")
            #cv2.imshow("Output", output)

            #cv2.waitKey(0)

            mid_box = [[image_mid + 10, bounding_boxes[0][0][1]],
                       [image_mid - 10, bounding_boxes[0][1][1]],
                       [image_mid + 10, bounding_boxes[0][2][1]],
                       [image_mid - 10, bounding_boxes[0][3][1]]]
            boxes = [bounding_boxes[1], mid_box]

        extractmath(boxes)