コード例 #1
0
    def callback(self, data):
        print('\n', 'Predicting')
        # try:
        #   cv_image = self.bridge.imgmsg_to_cv2(data, "bgr8")
        # except CvBridgeError as e:
        #   print(e)
        # cv_image = cv2.copyMakeBorder(cv_image, 100, 100, 100, 100, cv2.BORDER_CONSTANT, value=[85, 120, 68])
        # cv_image = cv2.imread("pic_buffer/R.png")
        # cv2.imwrite("pic_buffer/R.png", cv_image)

        # im = array_to_image(cv_image)
        # dn.rgbgr_image(im)
        # # print(dn.detect(net, meta, im))
        # print("===")
        r = []
        img = cv2.imread("pic_buffer/1_R.png")
        sp = img.shape
        r += self.subpredict(0, int(sp[0] / 2), 0, sp[1])
        r += self.subpredict(int(sp[0] / 2), sp[0], 0, sp[1])
        r += self.subpredict(int(sp[0] / 4), int(sp[0] * 3 / 4), 0, sp[1])
        r += self.subpredict(0, sp[0], 0, int(sp[1] / 2))
        r += self.subpredict(0, sp[0], int(sp[1] / 2), sp[1])
        r += self.subpredict(0, sp[0], int(sp[1] / 3), int(sp[1] * 2 / 3))
        r += self.subpredict(0, sp[0], 0, int(sp[1] * 2 / 3))
        r += self.subpredict(0, sp[0], int(sp[1] / 3), sp[1])
        r += self.subpredict(0, int(sp[0] * 2 / 3), 0, sp[1])
        r += self.subpredict(int(sp[0] / 3), sp[0], 0, sp[1])
        r = self.removeDuplicate(r)
        print()
        img = self.drawCrossings(img, r)
        cv2.imwrite("pic_buffer/2_D.png", img)

        boxes = BoundingBoxes()
        print('\n', 'Predict result:')
        for i in range(len(r)):
            box = BoundingBox()
            print('    ', r[i][0], r[i][1] * 100, '%')
            box.Class = r[i][0].decode('utf-8')
            box.probability = r[i][1]
            box.id = i
            box.xmin = int(r[i][2][0])
            box.ymin = int(r[i][2][1])
            box.xmax = int(r[i][2][2])
            box.ymax = int(r[i][2][3])
            boxes.bounding_boxes.append(box)
            # if b'endpoint' == r[i][0]:
            #     print('\t', r[i][0], r[i][1]*100, '%')
            print('    ', int(r[i][2][0]), int(r[i][2][1]), int(r[i][2][2]),
                  int(r[i][2][3]))
            # if b'cross' == r[i][0]:
            #     print('\t', r[i][0], r[i][1]*100, '%')
            #     # print('\t', r[i][2])
        print('\n', 'Darknet waiting for rgb_img')
        time.sleep(0.5)
        try:
            self.boxes_pub.publish(boxes)
        except CvBridgeError as e:
            print(e)
コード例 #2
0
def process(msg):
    t_begin = rospy.get_time()
    cv_img = bridge.imgmsg_to_cv2(msg, "bgr8")
    cv2.imwrite(img_path, cv_img)

    with open(img_path, "rb") as img:
        img_bytes = img.read()
        server.send(darksocket.pack(darksocket.Packet.IMAGE, img_bytes))

    res = server.recv()
    t_end = rospy.get_time()

    if telemetry:
        cls()
        rospy.loginfo("%s FPS" % (1 / (t_end - t_begin)))

    if res == darksocket.StreamEvent.DETECTIONS:
        dets = Detections()
        dets.image = msg

        for det in server.stream.detections:
            bbox = BoundingBox()
            bbox.Class = det[0]
            bbox.probability = det[1]
            bbox.xmin = det[2]
            bbox.ymin = det[3]
            bbox.xmax = det[2] + det[4]
            bbox.ymax = det[3] + det[5]

            dets.bboxes.append(bbox)

            if telemetry:
                rospy.loginfo("%s %s" % (bbox.Class, bbox.probability))

        pub_detections.publish(dets)
コード例 #3
0
    def msgDN(self, image, boxes, scores, classes):
        """
        Create the Object Detector message to publish with ROS

        This uses the Darknet BoundingBox[es] messages
        """
        msg = BoundingBoxes()
        msg.header = image.header
        scores_above_threshold = np.where(scores > self.threshold)[1]

        for s in scores_above_threshold:
            # Get the properties
            bb = boxes[0, s, :]
            sc = scores[0, s]
            cl = classes[0, s]

            # Create the bounding box message
            detection = BoundingBox()
            detection.Class = self.category_index[int(cl)]['name']
            detection.probability = sc
            detection.xmin = int((image.width - 1) * bb[1])
            detection.ymin = int((image.height - 1) * bb[0])
            detection.xmax = int((image.width - 1) * bb[3])
            detection.ymax = int((image.height - 1) * bb[2])

            msg.boundingBoxes.append(detection)

        return msg
コード例 #4
0
    def __init__(self):

        # cv_bridge handles
        self.cv_bridge = CvBridge()
        
        # outdoor object
        self.person_bbox        = BoundingBox()
        self.bicycle_bbox       = BoundingBox()
        self.car_bbox           = BoundingBox()
        self.motorcycle_bbox    = BoundingBox()
        self.bus_bbox           = BoundingBox()
        self.truck_bbox         = BoundingBox()
        self.traffic_light_bbox = BoundingBox()
        self.stop_sign_bbox     = BoundingBox()


        # ROS PARAM
        self.m_pub_threshold = rospy.get_param('~pub_threshold', 0.70)

        # detect width height
        self.WIDTH  = 50
        self.HEIGHT = 50

        # Subscribe
        sub_camera_rgb    =  rospy.Subscriber('/camera/color/image_raw', Image, self.CamRgbImageCallback)
        sub_camera_depth  =  rospy.Subscriber('/camera/aligned_depth_to_color/image_raw', Image, self.CamDepthImageCallback)
        sub_darknet_bbox  =  rospy.Subscriber('/darknet_ros/bounding_boxes', BoundingBoxes, self.DarknetBboxCallback)

        return
コード例 #5
0
    def create_d_msgs_box(self, track):
        one_box = BoundingBox()

        one_box.id = int(track.id[:3], 16)
        one_box.class_id = "face"
        one_box.probability = float(track.score)
        one_box.xmin = int(track.box[0])
        one_box.ymin = int(track.box[1])
        one_box.xmax = int(track.box[2])
        one_box.ymax = int(track.box[3])

        return one_box
コード例 #6
0
def handle_get_obj_loc(req):

    # Subscribe to the ROS topic that contains the grasps.
    sub = rospy.Subscriber('/darknet_ros/bounding_boxes', BoundingBoxes,
                           callback)
    # print req

    # wait until the subscriber gets location objects from topic
    while obj_loc == []:
        print '.'

    if req.obj_name == "teddy_bear":
        req.obj_name = "teddy bear"
    if req.obj_name == "sports`_ball":
        req.obj_name = "sports ball"

    #  get the location of the requested object
    # - if multiple objects, return one with highest probability!
    top_item = BoundingBox(probability=0.0)
    for item in obj_loc:
        if item.Class == req.obj_name:
            if item.probability > top_item.probability:
                top_item = item

                # TODO: move this logic into main script for now!!!
                # calculate the center of the bounding box
                # top_item.x = (item.xmax - item.xmin)/2 + item.xmin
                # top_item.y = (item.ymax - item.ymin)/2 + item.ymin

    print "Returning %s with highest probability [%s]" % (req.obj_name,
                                                          top_item)
    return [
        top_item.Class, top_item.probability, top_item.xmin, top_item.ymin,
        top_item.xmax, top_item.ymax
    ]
コード例 #7
0
    def __init__(self):

        # cv_bridge handles
        self.cv_bridge = CvBridge()

        self.person_bbox = BoundingBox()

        # ROS PARAM
        self.m_pub_threshold = rospy.get_param('~pub_threshold', 0.70)

        # detect width height
        self.WIDTH  = 50
        self.HEIGHT = 50

        # Publish
        self.pub_cmd = rospy.Publisher('cmd_vel', Twist, queue_size=10)

        # Twist 型のデータ
        self.t = Twist()

        # Subscribe
        sub_camera_rgb    =  rospy.Subscriber('/camera/color/image_raw', Image, self.CamRgbImageCallback)
        sub_camera_depth  =  rospy.Subscriber('/camera/aligned_depth_to_color/image_raw', Image, self.CamDepthImageCallback)
        sub_darknet_bbox  =  rospy.Subscriber('/darknet_ros/bounding_boxes', BoundingBoxes, self.DarknetBboxCallback)

        return
コード例 #8
0
 def compute_predicted_bounding_box(self):
     times = []
     x_samples = []
     y_samples = []
     last_box = None
     # figure out first and last "real" bounding_box samples
     for sample in self.bounding_boxes:
         if sample[1] is not None:
             last_box = sample[1]
             times.append(sample[0])
             ctr = center_of_bounding_box(sample[1])
             x_samples.append(ctr[0])
             y_samples.append(ctr[1])
     if len(times) < 3:
         return None
     x_m, x_b = np.polyfit(times, x_samples, 1)
     y_m, y_b = np.polyfit(times, y_samples, 1)
     # figure out average bounding_box change per unit time
     # compute the expected offset
     t = rospy.get_time()
     pred_ctr = np.array([x_m * t + x_b, y_m * t + y_b])
     offset_ctr = pred_ctr - center_of_bounding_box(last_box)
     # add this to the most recent sample to get a prediction
     time_since_last_sample = t - times[-1]
     prediction_confidence = max(
         0, last_box.probability -
         time_since_last_sample / CONFIDENCE_DECAY_TIME)
     return BoundingBox(Class="person",
                        probability=prediction_confidence,
                        xmin=last_box.xmin + offset_ctr[0],
                        xmax=last_box.xmax + offset_ctr[0],
                        ymin=last_box.ymin + offset_ctr[1],
                        ymax=last_box.ymax + offset_ctr[1])
コード例 #9
0
    def camera_image_callback(self, image, camera):
        """Gets images from camera to generate detections on

        Computes where the bounding boxes should be in the image, and
        fakes YOLO bounding boxes output as well as publishing a debug
        image showing where the detections are if turned on

        Parameters
        ----------
        image : sensor_msgs.Image
            The image from the camera to create detections for
        camera : Camera
            Holds camera parameters for projecting points

        """

        cv_image = self.bridge.imgmsg_to_cv2(image, desired_encoding="rgb8")

        if camera.is_ready():

            # Fake darknet yolo detections message
            bounding_boxes = BoundingBoxes()

            bounding_boxes.header = image.header
            bounding_boxes.image_header = image.header
            bounding_boxes.image = image
            bounding_boxes.bounding_boxes = []

            for _, obj in self.objects.iteritems():

                detections = self.get_detections(obj, camera, image.header.stamp)

                if detections is not None:

                    if camera.debug_image_pub:

                        self.render_debug_context(cv_image, detections, camera)

                    for det in detections:

                        bounding_box = BoundingBox()

                        bounding_box.Class = det[2]
                        bounding_box.probability = 1.0
                        bounding_box.xmin = int(det[0][0])
                        bounding_box.xmax = int(det[0][1])
                        bounding_box.ymin = int(det[0][2])
                        bounding_box.ymax = int(det[0][3])
                        bounding_boxes.bounding_boxes.append(bounding_box)

            # Only publish detection if there are boxes in it
            if bounding_boxes.bounding_boxes:
                self.darknet_detection_pub.publish(bounding_boxes)

        if camera.debug_image_pub:
            image_message = self.bridge.cv2_to_imgmsg(cv_image, encoding="rgb8")
            camera.debug_image_pub.publish(image_message)
コード例 #10
0
 def DarknetBboxCallback(self, darknet_bboxs):
     bboxs = darknet_bboxs.bounding_boxes
     person_bbox = BoundingBox()
     if len(bboxs) != 0 :
         for i, bb in enumerate(bboxs) :
             if bboxs[i].Class == 'person' and bboxs[i].probability >= self.m_pub_threshold:
                 person_bbox = bboxs[i]        
     self.person_bbox = person_bbox
コード例 #11
0
ファイル: 0728.py プロジェクト: mywnajsldkf/UR-ROS-Control
    def __init__(self):
        roscpp_initialize(sys.argv)
        rospy.init_node('ur3_move', anonymous=True)
      
        self.detected = {}
        self.detected_names = rospy.get_param('/darknet_ros/yolo_model/detection_classes/names')
        self.object_pose_sub = rospy.Subscriber('/cluster_decomposer/centroid_pose_array',PoseArray,self.collectJsk)
        self.listener = tf.TransformListener()

        # self.object_name_sub = rospy.Subscriber('/darknet_ros/bounding_boxes',BoundingBoxes,self.collect)

        self.tomatoboundingBox = BoundingBox()

        display_trajectory_publisher = rospy.Publisher('/move_group/display_planned_path', moveit_msgs.msg.DisplayTrajectory,queue_size=20)        

        global goal_x
        global goal_y
        global goal_z

        global goal_roll
        global goal_pitch
        global goal_yaw

        self.distance = 0

        self.trans = [0.0, 0.0, 0.0]

#        self.marker = []
        self.tomato = []
        self.position_list = []
        self.orientation_list = []

        self.tomato_pose = Pose()
        self.goalPoseFromTomato = Pose()
        self.br = tf.TransformBroadcaster()

        self.calculated_tomato = Pose()

        self.scene = PlanningSceneInterface()
        self.robot_cmd = RobotCommander()

        self.robot_arm = MoveGroupCommander(GROUP_NAME_ARM)
        self.robot_arm.set_goal_orientation_tolerance(0.005)
        self.robot_arm.set_planning_time(5)
        self.robot_arm.set_num_planning_attempts(5)

        self.display_trajectory_publisher = display_trajectory_publisher

        rospy.sleep(2)
        
        # Allow replanning to increase the odds of a solution
        self.robot_arm.allow_replanning(True)
コード例 #12
0
    def image_callback(self, msg):
        # print("Received an image!")
        try:
            # Convert your ROS Image message to numpy image data type
            cv2_img = np.frombuffer(msg.data, dtype=np.uint8).reshape(
                msg.height, msg.width, -1)
            image_ori = cv2.resize(cv2_img, (512, 512))
            #remove alpha channel if it exists
            if image_ori.shape[-1] == 4:
                image_ori = image_ori[..., :3]
            #normalize the image
            image = self.normalize(image_ori)

            with torch.no_grad():
                image = torch.Tensor(image)
                if torch.cuda.is_available():
                    image = image.cuda()
                boxes = self.model_.get_boxes(
                    image.permute(2, 0, 1).unsqueeze(0))

            Boxes_msg = BoundingBoxes()
            Boxes_msg.image_header = msg.header
            Boxes_msg.header.stamp = rospy.Time.now()
            for box in boxes[0]:
                # print(box.confidence)
                confidence = float(box.confidence)
                box = (box.box * torch.Tensor([512] * 4)).int().tolist()
                if confidence > 0.35:
                    cv2.rectangle(image_ori, (box[0], box[1]),
                                  (box[2], box[3]), (255, 0, 0), 2)
                    cv2.putText(image_ori,
                                str(confidence)[:4], (box[0] - 2, box[1] - 2),
                                cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 2)
                    # msg_frame = self.bridge.cv2_to_imgmsg(image_ori)
                    detection_box = BoundingBox()
                    # detection_box.Class=str(box.class_id)
                    detection_box.xmin = box[0]
                    detection_box.ymin = box[1]
                    detection_box.xmax = box[2]
                    detection_box.ymax = box[3]
                    detection_box.probability = confidence
                    Boxes_msg.bounding_boxes.append(detection_box)

            msg_frame = self.bridge.cv2_to_imgmsg(image_ori)
            self.img_pub.publish(msg_frame)
            self.ret_pub.publish(Boxes_msg)

        except CvBridgeError:
            print("error")
        if self.savefigure:
            cv2.imwrite('new_image.jpeg', cv2_img)
            print('save picture')
            # self.detected_msg.data="Take a photo"
            self.savefigure = False
コード例 #13
0
    def boxcallback(self, msg):
        dets = []
        for i in range(len(msg.bounding_boxes)):
            bbox = msg.bounding_boxes[i]
            dets.append(
                np.array([
                    bbox.xmin, bbox.ymin, bbox.xmax, bbox.ymax,
                    bbox.probability
                ]))
        dets = np.array(dets)
        start_time = time.time()
        trackers = self.update(dets)
        cycle_time = time.time() - start_time
        print(cycle_time)

        r = BoundingBoxes()
        rb = BoundingBox()
        for d in range(len(trackers)):
            rb.probability = 1
            rb.xmin = trackers[d, 0]
            rb.ymin = trackers[d, 1]
            rb.xmax = trackers[d, 2]
            rb.ymax = trackers[d, 3]
            rb.id = trackers[d, 4]
            rb.Class = 'tracked'
            r.bounding_boxes.append(rb)
            if self.img_in == 1 and self.display:
                res = trackers[d].astype(np.int32)
                rgb = self.colours[res[4] % 32, :] * 255
                cv2.rectangle(self.img, (res[0], res[1]), (res[2], res[3]),
                              (rgb[0], rgb[1], rgb[2]), 2)
                cv2.putText(self.img, "ID : %d" % (res[4]), (res[0], res[1]),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.8, (200, 85, 200), 2)
        if self.img_in == 1 and self.display:
            try:
                self.image = self.bridge.cv2_to_imgmsg(self.img, "bgr8")
                self.image.header.stamp = rospy.Time.now()
                self.pubimage.publish(self.image)
            except CvBridgeError as e:
                pass
        r.header.stamp = rospy.Time.now()
        self.pubb.publish(r)
        return
コード例 #14
0
    def __init__(self):
        self.detected = {}
        self.detected_names = rospy.get_param(
            '/darknet_ros/yolo_model/detection_classes/names')
        self.object_pose_sub = rospy.Subscriber(
            '/cluster_decomposer/centroid_pose_array', PoseArray,
            self.collectJsk)
        self.listener = tf.TransformListener()
        self.trans = [0.0, 0.0, 0.0]

        self.object_name_sub = rospy.Subscriber('/darknet_ros/bounding_boxes',
                                                BoundingBoxes, self.collect)

        self.tomatoboundingBox = BoundingBox()
コード例 #15
0
 def _write_message(self, detection_results, boxes, scores, classes):
     """ populate output message with input header and bounding boxes information """
     if boxes is None:
         return None
     for box, score, category in zip(boxes, scores, classes):
         # Populate darknet message
         left, bottom, right, top = box
         detection_msg = BoundingBox()
         detection_msg.xmin = left
         detection_msg.xmax = right
         detection_msg.ymin = top
         detection_msg.ymax = bottom
         detection_msg.probability = score
         detection_msg.Class = category
         detection_results.bounding_boxes.append(detection_msg)
     return detection_results
コード例 #16
0
    def DarknetBboxCallback(self, darknet_bboxs):
        bboxs = darknet_bboxs.bounding_boxes

        person_bbox = BoundingBox()
        bicycle_bbox = BoundingBox()
        car_bbox = BoundingBox()
        motorcycle_bbox = BoundingBox()
        bus_bbox = BoundingBox()
        truck_bbox = BoundingBox()
        traffic_light_bbox = BoundingBox()
        stop_sign_bbox = BoundingBox()

        if len(bboxs) != 0:
            for i, bb in enumerate(bboxs):
                if bboxs[i].Class == 'person' and bboxs[
                        i].probability >= self.m_pub_threshold:
                    person_bbox = bboxs[i]
                elif bboxs[i].Class == 'bicycle' and bboxs[
                        i].probability >= self.m_pub_threshold:
                    bicycle_bbox = bboxs[i]
                elif bboxs[i].Class == 'car' and bboxs[
                        i].probability >= self.m_pub_threshold:
                    car_bbox = bboxs[i]
                elif bboxs[i].Class == 'motorcycle' and bboxs[
                        i].probability >= self.m_pub_threshold:
                    motorcycle_bbox = bboxs[i]
                elif bboxs[i].Class == 'bus' and bboxs[
                        i].probability >= self.m_pub_threshold:
                    bus_bbox = bboxs[i]
                elif bboxs[i].Class == 'truck' and bboxs[
                        i].probability >= self.m_pub_threshold:
                    truck_bbox = bboxs[i]
                elif bboxs[i].Class == "traffic light" and bboxs[
                        i].probability >= self.m_pub_threshold:
                    traffic_light_bbox = bboxs[i]
                elif bboxs[i].Class == "stop sign" and bboxs[
                        i].probability >= self.m_pub_threshold:
                    stop_sign_bbox = bboxs[i]

        self.person_bbox = person_bbox
        self.bicycle_bbox = bicycle_bbox
        self.car_bbox = car_bbox
        self.motorcycle_bbox = motorcycle_bbox
        self.bus_bbox = bus_bbox
        self.truck_bbox = truck_bbox
        self.traffic_light_bbox = traffic_light_bbox
        self.stop_sign_bbox = stop_sign_bbox
コード例 #17
0
ファイル: store_features.py プロジェクト: Ruthrash/JRMOT_ROS
 def GetBBoxesMsg(self, label_file: str):
     output_bboxes_msg = BoundingBoxes()
     with open(label_file, 'r') as file:
         id_ = 0
         for line in file:
             gt = line.split()
             bbox_msg = BoundingBox()
             #print(gt)
             bbox_msg.xmin = int(gt[5])
             bbox_msg.ymin = int(gt[6])
             bbox_msg.xmax = int(gt[7])
             bbox_msg.ymax = int(gt[8])
             bbox_msg.id = id_
             bbox_msg.Class = gt[0]
             id_ += 1
             output_bboxes_msg.bounding_boxes.append(bbox_msg)
     return output_bboxes_msg
コード例 #18
0
ファイル: test.py プロジェクト: shingakitakurou/adp
    def __init__(self):

        # cv_bridge handles
        self.cv_bridge = CvBridge()

        self.person_bbox = BoundingBox()

        # ROS PARAM
        self.m_pub_threshold = rospy.get_param('~pub_threshold', 0.40)

        # Subscribe
        sub_camera_rgb     =  rospy.Subscriber('/Sensor/RealSense435i/color/image_raw', Image, self.CamRgbImageCallback)
        sub_camera_depth   =  rospy.Subscriber('/camera/aligned_depth_to_color/image_raw', Image, self.CamDepthImageCallback)
        sub_darknet_bbox   =  rospy.Subscriber('/darknet_ros/bounding_boxes', BoundingBoxes, self.DarknetBboxCallback)

        return
コード例 #19
0
def msg(image, boxes):
    """
    Create the Darknet BoundingBox[es] messages
    """
    msg = BoundingBoxes()
    msg.header = image.header

    for (x, y, w, h) in boxes:
        detection = BoundingBox()
        detection.Class = "human"
        detection.probability = 1  # Not really...
        detection.xmin = x
        detection.ymin = y
        detection.xmax = x + w
        detection.ymax = y + h
        msg.boundingBoxes.append(detection)

    return msg
コード例 #20
0
    def __init__(self):
        smach.State.__init__(self, outcomes=['succeeded', 'aborted','option1','option2','option3'],
                                    input_keys=['input_planning_group'])


        self.detected = {}
        self.detection_names = rospy.get_param('/darknet_ros/yolo_model/detection_classes/names')
	self.object_pose_sub = rospy.Subscriber('/cluster_decomposer/centroid_pose_array', PoseArray, self.collectJsk)
        self.listener = tf.TransformListener()


	self.object_pose_sub = rospy.Subscriber('/darknet_ros/bounding_boxes', BoundingBoxes, self.collect)


        self.cupboundingBox = BoundingBox()
        self.set_joint_position = rospy.ServiceProxy('/open_manipulator/goal_joint_space_path', SetJointPosition)
        self.set_kinematics_position = rospy.ServiceProxy('/open_manipulator/goal_task_space_path_position_only', SetKinematicsPose)
	self.set_joint_position_from_present = rospy.ServiceProxy('/open_manipulator/goal_joint_space_path_from_present', SetJointPosition)
        self.open_manipulator_joint_states_sub_ = rospy.Subscriber('/open_manipulator/joint_states', JointState, self.jointStatesCallback)
        self.open_manipulator_kinematics_pose_sub_ = rospy.Subscriber('/open_manipulator/gripper/kinematics_pose', KinematicsPose, self.kinematicsPoseCallback)
        self.open_manipulator_states_ = rospy.Subscriber('/open_manipulator/states', OpenManipulatorState, self.StatesCallback)
        self.last_detect_time = rospy.get_rostime()
        self.jointStates = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
	self.kinematicsStates = [0.0, 0.0, 0.0]
        rospy.logwarn(' cheersActionByYoloJsk ')
        self.trans = [0.0 , 0.0 , 0.0]
        self.command_Key_sub = rospy.Subscriber('command_Key', String, self.commandKeyCallback)
        self.mode = 9
        self.last_detect_time = rospy.get_rostime()
        self.last_yolodetect_time = rospy.get_rostime()
        self.open_manipulator_moving_state = "STOPPED"

        self.StepCupTracking = Enum('StepCupTracking',
                                       'waiting_signal \
                                        go_backward_location \
                                        wait_detecting_tf_of_cup \
                                        go_base_location \
                                        go_tf_location_of_cup \
                                        wait_finish_movement \
                                        close_cup \
                                        kick_cup \
                                        exit')

        self.pre_step = self.StepCupTracking.exit.value
        #self.step = self.StepCupTracking.waiting_signal.value
        self.step = self.StepCupTracking.go_backward_location.value
コード例 #21
0
 def __init__(self):
     rospy.init_node("calibration", anonymous=False)
     rospy.Subscriber("lidar_pub", PointCloud, self.get_lidar)
     rospy.Subscriber("obstacles", Obstacles, self.get_obstacles)
     rospy.Subscriber("/darknet_ros/bounding_boxes", BoundingBoxes,
                      self.get_bbox)
     self.lidar_msg = PointCloud()
     self.obstacle_msg = Obstacles()
     self.new_obstacle = Obstacles()
     self.bbox_msg = BoundingBox()
     self.flag = False
     self.camera_matrix = np.array([[1166.853156, 0.000000, 958.517180],
                                    [0.000000, 1172.932471, 556.692563],
                                    [0.000000, 0.000000, 1.000000]])
     rate = rospy.Rate(10)
     while not rospy.is_shutdown():
         self.circle_z()
         rate.sleep()
コード例 #22
0
def timercb(event):
    global seq_prev
    global detectedBoxes
    global automode
    global state_prev
    global state_pp

    if automode == 0:
        state_prev = 0  #reset
        state_pp = 0  #reset
        return None
    if detectedBoxes.header.seq == seq_prev:
        #if no new objects detected at all
        pub_pidEn.publish(0)
        rospy.loginfo('Timercb: No detection since prev')
        #Nd publish zero vel?
        twist = Twist()
        pub.publish(twist)
        return None
    seq_prev = detectedBoxes.header.seq
    corridorBox = BoundingBox()
    firstCorridor = 1
    for box in detectedBoxes.bounding_boxes:
        if box.Class == 'corridor':
            if firstCorridor == 1:
                corridorBox = box
                firstCorridor += 1
            if ((box.xmax - box.xmin) <
                (corridorBox.xmax - corridorBox.xmin)):  #Finding smallest box
                corridorBox = box

    if corridorBox.Class == "":
        pub_pidEn.publish(0)
        twist = Twist()
        pub.publish(twist)
        rospy.loginfo('Timercb: Detected but no corridor')
    else:
        pub_pidEn.publish(1)
        corridorCent = (corridorBox.xmin + corridorBox.xmax) / 2
        pub_state.publish(0.6 * corridorCent + 0.3 * state_prev +
                          0.1 * state_pp)  #smoothing
        state_pp = state_prev
        state_prev = corridorCent
コード例 #23
0
    def __init__(self):

        # cv_bridge handles
        self.cv_bridge = CvBridge()

        self.person_bbox = BoundingBox()

        # ROS PARAM
        self.m_pub_threshold = rospy.get_param('~pub_threshold', 0.40)

        # Subscribe
        sub_camera_rgb = rospy.Subscriber('/camera/rgb/image_raw', Image,
                                          self.CamRgbImageCallback)
        sub_camera_depth = rospy.Subscriber('/camera/depth/image_raw', Image,
                                            self.CamDepthImageCallback)
        sub_darknet_bbox = rospy.Subscriber('/darknet_ros/bounding_boxes',
                                            BoundingBoxes,
                                            self.DarknetBboxCallback)
        self._pub = rospy.Publisher('depth', some_position, queue_size=10)
        self._msg = some_position()

        return
コード例 #24
0
    def bbCallback(self, data):
        rospy.loginfo("BBox callback")
	buoybbs = []
        bbs = data.bounding_boxes
        for bb in bbs:
            bb = BoundingBox()
            if bb['class'] == 0 and bb['probability'] > 0.5:
                buoybb = {'class': bb['class'], 
                          'probability': bb['probability'], 
                          'x': bb['x'], 'y': bb['y'],
                          'w': bb['w'], 'h': bb['h']}
                buoybbs.append(buoybb)
	
        if buoybbs:
            nearestBuoy = self.nearestBuoy(buoybbs)
	    rospy.loginfo("nearest buoy")
	    rospy.loginfo(nearestBuoy)
            goalX = (nearestBuoy['x'] - 640) * self.Kx
            goalY = (nearestBuoy['y'] - 320) * self.Ky + self.pressure 
            resultX = self.send_goal('depthServer', depthAction, depthGoal(depth_setpoint=goalY))
            resultY = self.send_goal('swayServer', timeAction, timeGoal(time_setpoint=goalX))
            if resultX == "depthServerReached" and resultY == "swayServerReached":
                self.buoySuccess = True
コード例 #25
0
    def test_tracker_callback(self):
        args = None
        rclpy.init(args=args)
        node = ObjectTrackingNode()

        bboxes_msg = BoundingBoxes()

        for track in self.tracks:
            bbox_msg = BoundingBox()

            bbox_msg.xmin = track["x1"]
            bbox_msg.ymin = track["y1"]
            bbox_msg.xmax = track["x2"]
            bbox_msg.ymax = track["y2"]

            bboxes_msg.bounding_boxes.append(bbox_msg)

        node.tracker_callback(bboxes_msg)
コード例 #26
0
from moveit_commander import PlanningSceneInterface, roscpp_initialize, roscpp_shutdown
from geometry_msgs.msg import *
from moveit_msgs.msg import Grasp, GripperTranslation, PlaceLocation, MoveItErrorCodes, CollisionObject
from trajectory_msgs.msg import JointTrajectory, JointTrajectoryPoint
from tf.transformations import euler_from_quaternion, quaternion_from_euler
import random

from std_srvs.srv import Empty

GROUP_NAME_ARM = "manipulator"
FIXED_FRAME = 'world'

#############################################
# Global Variables                          #
#############################################
bbox = BoundingBox()
pre_bbox = BoundingBox()

pc = PointCloud2()
center = Vector3()
point = Vector3()
points = []
depth = 0.0
'''
goal_x = 0.0
goal_y = 0.0
goal_z = 0.0
'''


def bbox_callback(msg):
コード例 #27
0
#!/usr/bin/env python
import rospy
from std_msgs.msg import Float64
from std_msgs.msg import Bool
from darknet_ros_msgs.msg import BoundingBox
from darknet_ros_msgs.msg import BoundingBoxes
from geometry_msgs.msg import Twist

detectedBoxes = BoundingBoxes()
detectedBoxes.header.seq = 1

corridorBox = BoundingBox()


def callback(data):
    global automode
    if (automode == False):
        pub_pidEn.publish(0)
        return None

    global detectedBoxes
    detectedBoxes = data
    #rospy.loginfo(detectedBoxes.header.seq)


def timercb(event):
    global seq_prev
    global detectedBoxes
    global automode
    global state_prev
    global state_pp
コード例 #28
0
ファイル: ros-sort.py プロジェクト: wuqiangch/ros-yolo-sort
if __name__ == '__main__':
    colours = np.random.rand(32, 3) #used only for display
    mot_tracker = Sort(max_age=200, min_hits=1) #create instance of the SORT tracker

    while True:
        try:
            start_time = time.time()
            if mot_tracker.bbox_checkin==1:
                trackers = mot_tracker.update(mot_tracker.dets)
                mot_tracker.bbox_checkin=0
            else:
                trackers = mot_tracker.update(np.empty((0,5)))

            r = BoundingBoxes()
            for d in range(len(trackers)):
                rb = BoundingBox()
                rb.probability=1
                rb.xmin = int(trackers[d,0])
                rb.ymin = int(trackers[d,1])
                rb.xmax = int(trackers[d,2])
                rb.ymax = int(trackers[d,3])
                rb.id = int(trackers[d,4])
                rb.Class = 'tracked'
                r.bounding_boxes.append(rb)
                if mot_tracker.img_in==1 and mot_tracker.display:
                    res = trackers[d].astype(np.int32)
                    rgb=colours[res[4]%32,:]*255
                    cv2.rectangle(mot_tracker.img, (res[0],res[1]), (res[2],res[3]), (rgb[0],rgb[1],rgb[2]), 6)
                    cv2.putText(mot_tracker.img, "ID : %d"%(res[4]), (res[0],res[1]), cv2.FONT_HERSHEY_SIMPLEX, 1.6, (200,85,200), 6)
            if mot_tracker.img_in==1 and mot_tracker.display:
                try : 
コード例 #29
0
    def resultCallback(self, msg):
        #if self.is_turn_angular and self.is_turn_linear and self.is_pub_nav2arm == False:
        if self.is_turn_linear and self.is_pub_nav2arm == False:
            nav2arm_msg = String()
            nav2arm_msg.data = 'catch'
            print('catch')
            self.pub_nav2arm.publish(nav2arm_msg)
            self.is_pub_nav2arm = True

        if self.is_get_image:

            boundingbox = BoundingBox()
            boundingbox = msg.bounding_boxes
            #wprint(boundingbox)
            for bx in boundingbox:
                if bx.Class == 'bottle':
                    xmin = bx.xmin
                    xmax = bx.xmax
                    ymin = bx.ymin
                    ymax = bx.ymax
                    middle_x = (xmin + xmax) / 2
                    self.is_find_bottle = True

            if self.is_find_bottle:
                if self.is_turn_angular == False:
                    if middle_x - 320 > 50:
                        self.twist_msg.angular.z = -0.3
                        self.pub_twist.publish(self.twist_msg)
                        self.is_get_image = False
                    elif middle_x - 320 > 10:
                        self.twist_msg.angular.z = -0.1
                        self.pub_twist.publish(self.twist_msg)
                        self.is_get_image = False
                    elif middle_x - 320 < -50:
                        self.twist_msg.angular.z = 0.3
                        self.pub_twist.publish(self.twist_msg)
                        self.is_get_image = False
                    elif middle_x - 320 < -10:
                        self.twist_msg.angular.z = 0.1
                        self.pub_twist.publish(self.twist_msg)
                        self.is_get_image = False
                    else:
                        self.is_turn_angular = True
                        self.is_get_image = False
                        print('[INFO] The Angular is Correct')
                if self.is_turn_angular and self.is_turn_linear == False:
                    self.avg_depth = 0.0
                    count = (xmax - xmin) * (ymax - ymin)
                    for i in range(int(xmin), int(xmax)):
                        for j in range(int(ymin), int(ymax)):
                            tmp_depth = self.depth_img[i, j]
                            print(tmp_depth)
                            if tmp_depth > 1000:
                                count = count - 1
                            else:
                                self.avg_depth = self.avg_depth + tmp_depth
                    self.avg_depth = self.avg_depth / count
                    #print(self.avg_depth)
                    if self.avg_depth - 5 > 500:
                        self.twist_msg.linear.x = 0.3
                        self.pub_twist.publish(self.twist_msg)
                        self.is_get_image = False
                    elif self.avg_depth - 5 > 5:
                        self.twist_msg.linear.x = 0.1
                        self.pub_twist.publish(self.twist_msg)
                        self.is_get_image = False
                    elif self.avg_depth - 5 < -500:
                        self.twist_msg.linear.x = -0.3
                        self.pub_twist.publish(self.twist_msg)
                        self.is_get_image = False
                    elif self.avg_depth - 5 < -5:
                        self.twist_msg.linear.x = -0.1
                        self.pub_twist.publish(self.twist_msg)
                        self.is_get_image = False
                    else:
                        self.is_turn_linear = True
                        self.is_get_image = False
                        print('[INFO] The Distance is Correct')

            else:
                print('[INFO] Cannot Find Target Object')
        else:
            print('[INFO] Cannot Get Valid Image')
コード例 #30
0
                                                  0.7, 2)  # Get font size
            label_ymin = max(
                ymin, labelSize[1] +
                10)  # Make sure not to draw label too close to top of window
            cv2.rectangle(frame, (xmin, label_ymin - labelSize[1] - 10),
                          (xmin + labelSize[0], label_ymin + baseLine - 10),
                          (255, 255, 255),
                          cv2.FILLED)  # Draw white box to put label text in
            cv2.putText(frame, label, (xmin, label_ymin - 7),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 0),
                        2)  # Draw label text

            boundingbox = BoundingBox(xmin=xmin,
                                      xmax=xmax,
                                      ymin=ymin,
                                      ymax=ymax,
                                      id=id_count,
                                      Class=object_name,
                                      probability=scores[i])
            Boxes.append(boundingbox)
            id_count = id_count + 1

    ################################################################
    #
    # BoundingBox Publish Start
    #
    ################################################################

    array_forPublish = BoundingBoxes(bounding_boxes=Boxes)
    motor.publish(array_forPublish)