コード例 #1
0
 def pose_msg_callback(self, pose, classes):
     bboxes = BoundingBoxArray(header=pose.header)
     for p in pose.poses:
         b = BoundingBox(header=pose.header)
         b.pose = p
         bboxes.boxes.append(b)
     self.box_msg_callback(bboxes, classes)
コード例 #2
0
def callback(msg):
    if not candidate_pose:
        return
    target_array = BoundingBoxArray()
    target_array.header.stamp = msg.header.stamp
    target_array.header.frame_id = "world"
    target = BoundingBox()
    target.header.stamp = msg.header.stamp
    target.header.frame_id = "world"
    target.pose = candidate_pose
    target.dimensions.x = 0.2
    target.dimensions.y = 0.2
    target.dimensions.z = 0.2
    target_array.boxes = [target]
    pub_target.publish(target_array)
    candidate_array = BoundingBoxArray()
    candidate_array.header.stamp = msg.header.stamp
    candidate_array.header.frame_id = "world"
    for x in [-0.2, -0.1, 0.0, 0.1, 0.2]:
        for y in [-0.2, -0.1, 0.0, 0.1, 0.2]:
            for z in [-0.2, -0.1, 0.0, 0.1, 0.2]:
                candidate = BoundingBox()
                candidate.header.stamp = msg.header.stamp
                candidate.header.frame_id = "world"
                candidate.pose.position.z = 2 + z
                candidate.pose.position.x = x
                candidate.pose.position.y = y
                candidate.pose.orientation.w = 1.0
                candidate.dimensions.x = 0.1
                candidate.dimensions.y = 0.1
                candidate.dimensions.z = 0.1
                candidate_array.boxes.append(candidate)
    pub_candidate.publish(candidate_array)
コード例 #3
0
def callback(msg):
    if not candidate_pose:
        return
    target_array = BoundingBoxArray()
    target_array.header.stamp = msg.header.stamp
    target_array.header.frame_id = "world"
    target = BoundingBox()
    target.header.stamp = msg.header.stamp
    target.header.frame_id = "world"
    target.pose = candidate_pose
    target.dimensions.x = 0.2
    target.dimensions.y = 0.2
    target.dimensions.z = 0.2
    target_array.boxes = [target]
    pub_target.publish(target_array)
    candidate_array = BoundingBoxArray()
    candidate_array.header.stamp = msg.header.stamp
    candidate_array.header.frame_id = "world"
    for x in [-0.2, -0.1, 0.0, 0.1, 0.2]:
        for y in [-0.2, -0.1, 0.0, 0.1, 0.2]:
            for z in [-0.2, -0.1, 0.0, 0.1, 0.2]:
                candidate = BoundingBox()
                candidate.header.stamp = msg.header.stamp
                candidate.header.frame_id = "world"
                candidate.pose.position.z = 2 + z
                candidate.pose.position.x = x
                candidate.pose.position.y = y
                candidate.pose.orientation.w = 1.0
                candidate.dimensions.x = 0.1
                candidate.dimensions.y = 0.1
                candidate.dimensions.z = 0.1
                candidate_array.boxes.append(candidate)
    pub_candidate.publish(candidate_array)
コード例 #4
0
    def to_msg(self, boxes, class_ids, header):
        box_arr = BoundingBoxArray()
        box_arr.header = header

        for i in range(len(boxes)):
            if str(self.classes[class_ids[i]]) == "person":
                x, y, w, h = boxes[i]
                box = BoundingBox()
                box.label = i
                box.value = 0

                box.pose.position.x = x
                box.pose.position.y = y
                box.pose.position.z = 0

                box.pose.orientation.x = 0
                box.pose.orientation.y = 0
                box.pose.orientation.x = 0
                box.pose.orientation.w = 0

                box.dimensions.x = w
                box.dimensions.y = h
                box.dimensions.z = 0

                box_arr.boxes.append(box)

        return box_arr
コード例 #5
0
    def get_nearest_box(self, req):
        distance = 100
        has_request_item = False
        nearest_box = BoundingBox()
        for index, box in enumerate(self.boxes.boxes):
            if box.pose.position.x == 0 or \
               box.pose.position.y == 0 or \
               box.pose.position.z == 0:
                rospy.logwarn('boxes has (0, 0, 0) position box')
                continue

            if self.label_lst[box.label] == req.label:
                has_request_item = True
                ref_point = np.array([box.pose.position.x + (box.dimensions.x * 0.5),
                                      box.pose.position.y + (box.dimensions.y * 0.5),
                                      box.pose.position.z + (box.dimensions.z * 0.5)])
                target_point = np.array([req.target.x,
                                         req.target.y,
                                         req.target.z])
                if np.linalg.norm(ref_point - target_point) < distance:
                    nearest_box.pose = box.pose
                    nearest_box.dimensions = box.dimensions
                    distance = np.linalg.norm(ref_point - target_point)

        return nearest_box, has_request_item
コード例 #6
0
    def create_bb_msg(self, cur_bb, is_map_coord=False):
        bb_corners = np.array([cur_bb['upper_left_px'], cur_bb['lower_right_px']])
        if is_map_coord is True:
            bb_corners_map_coord = bb_corners
        else:
            bb_corners_px_coord = bb_corners
            bb_corners_map_coord = self.img2map.pixel2map(bb_corners_px_coord)
        bb_corners_mean = np.mean(bb_corners_map_coord, axis=0)
        bb_corners_diff = np.abs(bb_corners_map_coord[0, :] - bb_corners_map_coord[1, :])

        cur_bb_msg = BoundingBox()
        cur_bb_msg.header.frame_id = 'map'
        cur_bb_msg.header.stamp = rospy.Time.now()
        cur_bb_msg.pose.position.x = bb_corners_mean[0]
        cur_bb_msg.pose.position.y = bb_corners_mean[1]
        cur_bb_msg.pose.position.z = 0
        cur_bb_msg.pose.orientation.x = 0
        cur_bb_msg.pose.orientation.y = 0
        cur_bb_msg.pose.orientation.z = 0
        cur_bb_msg.pose.orientation.w = 1
        cur_bb_msg.dimensions.x = bb_corners_diff[0]
        cur_bb_msg.dimensions.y = bb_corners_diff[1]
        cur_bb_msg.dimensions.z = 0.1
        cur_bb_msg.value = 1.0
        cur_bb_msg.label = 1
        return cur_bb_msg
コード例 #7
0
def rslidar_callback(msg):
    t_t = time.time()

    #calib = getCalibfromFile(calib_file)
    #calib = getCalibfromROS(calibmsg)

    frame = msg.header.seq

    arr_bbox = BoundingBoxArray()

    msg_cloud = ros_numpy.point_cloud2.pointcloud2_to_array(msg)
    np_p = get_xyz_points(msg_cloud, True)
    print("  ")
    #scores, dt_box_lidar, types = proc_1.run(np_p)
    scores, dt_box_lidar, types, pred_dict = proc_1.run(np_p, calib, frame)

    annos_sorted = sortbydistance(dt_box_lidar, scores, types)
    #pp_AB3DMOT_list  = anno_to_AB3DMOT(pred_dict, msg)
    pp_AB3DMOT_list = anno_to_AB3DMOT(dt_box_lidar, scores, types, msg)
    pp_list = anno_to_sort(dt_box_lidar, scores, types)
    pp_3D_list = anno_to_3Dsort(annos_sorted, types)
    MarkerArray_list = anno_to_rviz(dt_box_lidar, scores, types, msg)

    if scores.size != 0:
        for i in range(scores.size):
            if scores[i] > threshold:
                bbox = BoundingBox()
                bbox.header.frame_id = msg.header.frame_id
                bbox.header.stamp = rospy.Time.now()
                q = yaw2quaternion(float(dt_box_lidar[i][6]))
                bbox.pose.orientation.x = q[1]
                bbox.pose.orientation.y = q[2]
                bbox.pose.orientation.z = q[3]
                bbox.pose.orientation.w = q[0]
                bbox.pose.position.x = float(
                    dt_box_lidar[i][0]) - movelidarcenter
                bbox.pose.position.y = float(dt_box_lidar[i][1])
                bbox.pose.position.z = float(dt_box_lidar[i][2])
                bbox.dimensions.x = float(dt_box_lidar[i][3])
                bbox.dimensions.y = float(dt_box_lidar[i][4])
                bbox.dimensions.z = float(dt_box_lidar[i][5])
                bbox.value = scores[i]
                bbox.label = int(types[i])
                arr_bbox.boxes.append(bbox)

    print("total callback time: ", time.time() - t_t)
    arr_bbox.header.frame_id = msg.header.frame_id
    arr_bbox.header.stamp = msg.header.stamp
    if len(arr_bbox.boxes) is not 0:
        pub_arr_bbox.publish(arr_bbox)
        arr_bbox.boxes = []
    else:
        arr_bbox.boxes = []
        pub_arr_bbox.publish(arr_bbox)

    pubRviz.publish(MarkerArray_list)
    pubSort.publish(pp_list)
    pub3DSort.publish(pp_3D_list)
    pubAB3DMOT.publish(pp_AB3DMOT_list)
コード例 #8
0
def viz_bbox(position, q, size, i):
    bbox = BoundingBox()
    bbox.pose.position = position
    bbox.pose.orientation = Quaternion(*q)
    bbox.dimensions = size
    bbox.label = i

    return bbox
コード例 #9
0
def rslidar_callback(msg):
    t_t = time.time()

    arr_bbox = BoundingBoxArray()

    #t = time.time()
    msg_cloud = ros_numpy.point_cloud2.pointcloud2_to_array(msg)
    np_p = get_xyz_points(msg_cloud, True)
    print("  ")
    #print("prepare cloud time: ", time.time() - t)

    #t = time.time()
    scores, dt_box_lidar, types = proc_1.run(np_p)
    #print("network forward time: ", time.time() - t)
    # filter_points_sum = []
    #t = time.time()
    if scores.size != 0:
        for i in range(scores.size):
            bbox = BoundingBox()
            bbox.header.frame_id = msg.header.frame_id
            bbox.header.stamp = rospy.Time.now()

            q = yaw2quaternion(float(dt_box_lidar[i][6]))
            bbox.pose.orientation.x = q[0]
            bbox.pose.orientation.y = q[1]
            bbox.pose.orientation.z = q[2]
            bbox.pose.orientation.w = q[3]

            bbox.pose.position.x = float(dt_box_lidar[i][0])
            bbox.pose.position.y = float(dt_box_lidar[i][1])
            bbox.pose.position.z = float(dt_box_lidar[i][2])
            bbox.dimensions.x = float(dt_box_lidar[i][3])
            bbox.dimensions.y = float(dt_box_lidar[i][4])
            bbox.dimensions.z = float(dt_box_lidar[i][5])
            bbox.value = scores[i]
            bbox.label = types[i]
            arr_bbox.boxes.append(bbox)

            # filter_points = np_p[point_indices[:,i]]
            # filter_points_sum.append(filter_points)
    #print("publish time cost: ", time.time() - t)
    # filter_points_sum = np.concatenate(filter_points_sum, axis=0)
    # filter_points_sum = filter_points_sum[:, :3]

    # print("output of concatenate:", filter_points_sum)
    # filter_points_sum = np.arange(24).reshape(8,3)
    # cluster_cloud = xyz_array_to_pointcloud2(filter_points_sum, stamp=rospy.Time.now(), frame_id=msg.header.frame_id)
    # pub_segments.publish(cluster_cloud)

    print("total callback time: ", time.time() - t_t)
    arr_bbox.header.frame_id = msg.header.frame_id
    arr_bbox.header.stamp = rospy.Time.now()
    if len(arr_bbox.boxes) is not 0:
        pub_arr_bbox.publish(arr_bbox)
        arr_bbox.boxes = []
    else:
        arr_bbox.boxes = []
        pub_arr_bbox.publish(arr_bbox)
コード例 #10
0
    def callback(self, box_msg):
        labeled_boxes = {}
        label_buf = []
        orientation = Pose().orientation

        for box in box_msg.boxes:
            if box.label in label_buf:
                labeled_boxes[box.label] += [self.get_points(box)]
                orientation = box.pose.orientation
            else:
                labeled_boxes[box.label] = [self.get_points(box)]
                label_buf.append(box.label)

        bounding_box_msg = BoundingBoxArray()

        for label, boxes in zip(labeled_boxes.keys(), labeled_boxes.values()):
            thresh = self.thresh
            if self.label_lst[label] == 'shelf_flont':
                thresh = 2.0

            clustering = Clustering()
            boxes = np.array(boxes)
            result = clustering.clustering_wrapper(boxes, thresh)

            for cluster in result:
                max_candidates = [
                    boxes[i][0] + (boxes[i][1] * 0.5) for i in cluster.indices
                ]
                min_candidates = [
                    boxes[i][0] - (boxes[i][1] * 0.5) for i in cluster.indices
                ]
                candidates = np.array(max_candidates + min_candidates)

                dimension = candidates.max(axis=0) - candidates.min(axis=0)
                center = candidates.min(axis=0) + (dimension * 0.5)

                distances = self.get_distances(
                    np.array([boxes[i][0] for i in cluster.indices]), [
                        np.linalg.norm(boxes[i][1]) * 0.5
                        for i in cluster.indices
                    ])
                tmp_box = BoundingBox()
                tmp_box.header = box_msg.header
                tmp_box.dimensions.x = dimension[0]
                tmp_box.dimensions.y = dimension[1]
                tmp_box.dimensions.z = dimension[2]
                tmp_box.pose.position.x = center[0]
                tmp_box.pose.position.y = center[1]
                tmp_box.pose.position.z = center[2]
                tmp_box.pose.orientation = orientation
                tmp_box.label = label
                tmp_box.value = distances.mean()
                bounding_box_msg.boxes.append(tmp_box)

            bounding_box_msg.header = box_msg.header
        self.box_pub.publish(bounding_box_msg)
コード例 #11
0
def callback(msg):
    box_array = BoundingBoxArray()
    box_array.header = msg.header
    for footstep in msg.footsteps:
        box = BoundingBox()
        box.header = msg.header
        box.pose = footstep.pose
        box.dimensions = footstep.dimensions
        box_array.boxes.append(box)
    pub.publish(box_array)
コード例 #12
0
def callback(msg):
    box_array = BoundingBoxArray()
    box_array.header = msg.header
    for footstep in msg.footsteps:
        box = BoundingBox()
        box.header = msg.header
        box.pose = footstep.pose
        box.dimensions = footstep.dimensions
        box_array.boxes.append(box)
    pub.publish(box_array)
コード例 #13
0
def convert(header, xx_cube):
    boundingboxarray_msg = BoundingBoxArray()
    boundingboxarray_msg.header = header

    num_xx = int(xx_cube.shape[0] / 8)
    for i in range(num_xx):
        cube_raw = xx_cube[range(8 * i, 8 * i + 8), :]
        bb_in_camera = np.column_stack((cube_raw, np.ones((8, 1))))
        bb_in_lidar = np.linalg.inv(calib.lidar_to_cam).dot(bb_in_camera.T).T
        boundingbox_msg = BoundingBox()
        boundingbox_msg.header = boundingboxarray_msg.header

        # boundingbox中心点位置
        boundingbox_msg.pose.position.x = bb_in_lidar[:, 0].mean()
        boundingbox_msg.pose.position.y = bb_in_lidar[:, 1].mean()
        boundingbox_msg.pose.position.z = bb_in_lidar[:, 2].mean()

        # 寻找y坐标最小的顶点,计算相邻两个顶点的旋转角及边长
        bb_bottom = bb_in_lidar[:4]
        min_idx = np.where(bb_bottom[:, 1] == bb_bottom[:, 1].min())[0][0]
        theta = math.atan2(
            bb_bottom[(min_idx + 1) % 4, 1] - bb_bottom[min_idx, 1],
            bb_bottom[(min_idx + 1) % 4, 0] - bb_bottom[min_idx, 0])
        b_1 = (
            (bb_bottom[(min_idx + 1) % 4, 1] - bb_bottom[min_idx, 1])**2 +
            (bb_bottom[(min_idx + 1) % 4, 0] - bb_bottom[min_idx, 0])**2)**0.5
        b_2 = (
            (bb_bottom[(min_idx + 3) % 4, 1] - bb_bottom[min_idx, 1])**2 +
            (bb_bottom[(min_idx + 3) % 4, 0] - bb_bottom[min_idx, 0])**2)**0.5
        if theta < 90 * math.pi / 180:
            rotation_angle = theta
            dimension_x = b_1
            dimension_y = b_2
        else:
            rotation_angle = theta - 90 * math.pi / 180
            dimension_x = b_2
            dimension_y = b_1

        # boundingbox旋转角四元数
        boundingbox_msg.pose.orientation.x = 0
        boundingbox_msg.pose.orientation.y = 0
        boundingbox_msg.pose.orientation.z = math.sin(0.5 * rotation_angle)
        boundingbox_msg.pose.orientation.w = math.cos(0.5 * rotation_angle)

        # boundingbox尺寸
        boundingbox_msg.dimensions.x = dimension_x
        boundingbox_msg.dimensions.y = dimension_y
        boundingbox_msg.dimensions.z = bb_in_lidar[:, 2].max(
        ) - bb_in_lidar[:, 2].min()

        boundingbox_msg.value = 0
        boundingbox_msg.label = 0
        boundingboxarray_msg.boxes.append(boundingbox_msg)

    return boundingboxarray_msg
コード例 #14
0
def callback(msg):
    box_array = BoundingBoxArray()
    box_array.header = msg.header
    for footstep in msg.footsteps:
        box = BoundingBox()
        box.header = msg.header
        box.pose = footstep.pose
        box.dimensions = footstep.dimensions
        box.pose.position.z += (z_max + z_min) / 2.0
        box.dimensions.z = z_max - z_min
        box_array.boxes.append(box)
    pub.publish(box_array)
コード例 #15
0
    def detector_callback(self, pcl_msg):
        start = time.time()
        # rospy.loginfo('Processing Pointcloud with PointRCNN')
        arr_bbox = BoundingBoxArray()
        seq = pcl_msg.header.seq
        stamp = pcl_msg.header.stamp
        # in message pointcloud has x pointing forward, y pointing to the left and z pointing upward
        pts_lidar = np.array([[
            p[0], p[1], p[2], p[3]
        ] for p in pc2.read_points(
            pcl_msg, skip_nans=True, field_names=("x", "y", "z", "intensity"))
                              ],
                             dtype=np.float32)
        scores, dt_box_lidar, types = self.run_model(pts_lidar)

        # TODO: question convert into torch tensors? torch.from_numpy(pts_lidar)
        # move onto gpu if available

        # TODO: check if needs translation/rotation to compensate for tilt etc.

        if scores.size != 0:
            for i in range(scores.size):
                bbox = BoundingBox()
                bbox.header.frame_id = pcl_msg.header.frame_id
                bbox.header.stamp = rospy.Time.now()
                # bbox.header.seq = pcl_msg.header.seq
                q = yaw2quaternion(float(dt_box_lidar[i][6]))
                bbox.pose.orientation.x = q[1]
                bbox.pose.orientation.y = q[2]
                bbox.pose.orientation.z = q[3]
                bbox.pose.orientation.w = q[0]
                bbox.pose.position.x = float(dt_box_lidar[i][0])
                bbox.pose.position.y = float(dt_box_lidar[i][1])
                bbox.pose.position.z = float(dt_box_lidar[i][2])
                bbox.dimensions.x = float(dt_box_lidar[i][3])
                bbox.dimensions.y = float(dt_box_lidar[i][4])
                bbox.dimensions.z = float(dt_box_lidar[i][5])
                bbox.value = scores[i]
                bbox.label = int(types[i])
                arr_bbox.boxes.append(bbox)

        # rospy.loginfo("3D detector time: {}".format(time.time() - start))

        arr_bbox.header.frame_id = pcl_msg.header.frame_id
        arr_bbox.header.stamp = pcl_msg.header.stamp
        arr_bbox.header.seq = pcl_msg.header.seq

        if len(arr_bbox.boxes) != 0:
            self.pub_arr_bbox.publish(arr_bbox)
            arr_bbox.boxes = []
        else:
            arr_bbox.boxes = []
            self.pub_arr_bbox.publish(arr_bbox)
コード例 #16
0
def callback(msg):
    box_array = BoundingBoxArray()
    box_array.header = msg.header
    for footstep in msg.footsteps:
        box = BoundingBox()
        box.header = msg.header
        box.pose = footstep.pose
        box.dimensions = footstep.dimensions
        box.pose.position.z += (z_max + z_min) / 2.0
        box.dimensions.z = z_max - z_min
        box_array.boxes.append(box)
    pub.publish(box_array)
コード例 #17
0
def get_box(msg):
    box = BoundingBox()
    get_box_pose_srv = rospy.ServiceProxy("/transformable_server_sample/get_pose", GetTransformableMarkerPose)
    resp = get_box_pose_srv(target_name=msg.data)
    box.pose = resp.pose_stamped.pose
    box.header = resp.pose_stamped.header
    get_box_dim_srv = rospy.ServiceProxy("/transformable_server_sample/get_dimensions", GetMarkerDimensions)
    resp2 = get_box_dim_srv(target_name=msg.data)
    box.dimensions.x = resp2.dimensions.x
    box.dimensions.y = resp2.dimensions.y
    box.dimensions.z = resp2.dimensions.z
    return box
コード例 #18
0
 def labeled_pose_callback(self, pose_msg):
     self.header = pose_msg.header
     self.qatm_boxes = BoundingBoxArray()
     self.qatm_boxes.header = pose_msg.header
     for pose in pose_msg.poses:
         tmp_box = BoundingBox()
         tmp_box.header = pose_msg.header
         tmp_box.pose = pose.pose
         tmp_box.dimensions.x = 0.03
         tmp_box.dimensions.y = 0.03
         tmp_box.dimensions.z = 0.03
         tmp_box.label = self.label_lst.index(pose.label)
         self.qatm_boxes.boxes.append(tmp_box)
コード例 #19
0
def readXML(file):
    tree = ET.parse(file)
    root = tree.getroot()

    item = root.findall('./tracklets/item')

    d = {}
    pictograms = {}

    for i, v in enumerate(item):
        h = float(v.find('h').text)
        w = float(v.find('w').text)
        l = float(v.find('l').text)
        frame = int(v.find('first_frame').text)
        size = Vector3(l, w, h)

        label = v.find('objectType').text
        
        pose = v.findall('./poses/item')

        for j, p in enumerate(pose):
            tx = float(p.find('tx').text)
            ty = float(p.find('ty').text)
            tz = float(p.find('tz').text)
            rz = float(p.find('rz').text)

            q = tf.transformations.quaternion_from_euler(0.0, 0.0, rz)

            b = BoundingBox()
            b.pose.position = Vector3(tx, ty, tz/2.0)
            b.pose.orientation = Quaternion(*q)
            b.dimensions = size
            b.label = i

            picto_text = Pictogram()
            picto_text.mode = Pictogram.STRING_MODE
            picto_text.pose.position = Vector3(tx, ty, -tz/2.0)
            q = tf.transformations.quaternion_from_euler(0.7, 0.0, -0.7)
            picto_text.pose.orientation = Quaternion(0.0, -0.5, 0.0, 0.5)
            picto_text.size = 1
            picto_text.color = std_msgs.msg.ColorRGBA(1, 1, 1, 1)
            picto_text.character = label

            if d.has_key(frame + j) == True:
                d[frame + j].append(b)
                pictograms[frame + j].append(picto_text)
            else:
                d[frame + j] = [b]
                pictograms[frame + j] = [picto_text]

    return d, pictograms
コード例 #20
0
def get_box(msg):
    box = BoundingBox()
    get_box_pose_srv = rospy.ServiceProxy(
        "/transformable_server_sample/get_pose", GetTransformableMarkerPose)
    resp = get_box_pose_srv(target_name=msg.data)
    box.pose = resp.pose_stamped.pose
    box.header = resp.pose_stamped.header
    get_box_dim_srv = rospy.ServiceProxy(
        "/transformable_server_sample/get_dimensions", GetMarkerDimensions)
    resp2 = get_box_dim_srv(target_name=msg.data)
    box.dimensions.x = resp2.dimensions.x
    box.dimensions.y = resp2.dimensions.y
    box.dimensions.z = resp2.dimensions.z
    return box
コード例 #21
0
 def _pub_bboxes_callback(self, event):
     bbox_array_msg = BoundingBoxArray()
     bbox_array_msg.header.frame_id = self.config['boxes'][0]['frame_id']
     bbox_array_msg.header.stamp = event.current_real
     for box in self.config['boxes']:
         bbox_msg = BoundingBox()
         pose = self.object_poses[box['name']]
         bbox_msg.header.frame_id = bbox_array_msg.header.frame_id
         bbox_msg.header.stamp = bbox_array_msg.header.stamp
         bbox_msg.pose = pose
         dimensions = self.object_dimensions[box['name']]
         bbox_msg.dimensions.x = dimensions.x
         bbox_msg.dimensions.y = dimensions.y
         bbox_msg.dimensions.z = dimensions.z
         bbox_array_msg.boxes.append(bbox_msg)
     self.pub_bboxes.publish(bbox_array_msg)
コード例 #22
0
 def _pub_bboxes_callback(self, event):
     bbox_array_msg = BoundingBoxArray()
     bbox_array_msg.header.frame_id = self.config['boxes'][0]['frame_id']
     bbox_array_msg.header.stamp = event.current_real
     for box in self.config['boxes']:
         bbox_msg = BoundingBox()
         pose = self.object_poses[box['name']]
         bbox_msg.header.frame_id = bbox_array_msg.header.frame_id
         bbox_msg.header.stamp = bbox_array_msg.header.stamp
         bbox_msg.pose = pose
         dimensions = self.object_dimensions[box['name']]
         bbox_msg.dimensions.x = dimensions.x
         bbox_msg.dimensions.y = dimensions.y
         bbox_msg.dimensions.z = dimensions.z
         bbox_array_msg.boxes.append(bbox_msg)
     self.pub_bboxes.publish(bbox_array_msg)
コード例 #23
0
def rslidar_callback(msg):
    # t_t = time.time()
    arr_bbox = BoundingBoxArray()
    msg_cloud = ros_numpy.point_cloud2.pointcloud2_to_array(msg)
    np_p = get_xyz_points(msg_cloud, True)
 
    print("  ")
    seq = msg.header.seq
    stamp = msg.header.stamp
    input_points = {
        'stamp': stamp,
        'seq': seq,
        'points': np_p
    }
    if(proc_1.get_lidar_data(input_points)):
        scores, dt_box_lidar, types = proc_1.run()

        if scores.size != 0:
            for i in range(scores.size):
                bbox = BoundingBox()
                bbox.header.frame_id = msg.header.frame_id
                bbox.header.stamp = rospy.Time.now()
                q = yaw2quaternion(float(dt_box_lidar[i][8]))
                bbox.pose.orientation.x = q[1]
                bbox.pose.orientation.y = q[2]
                bbox.pose.orientation.z = q[3]
                bbox.pose.orientation.w = q[0]
                bbox.pose.position.x = float(dt_box_lidar[i][0])
                bbox.pose.position.y = float(dt_box_lidar[i][1])
                bbox.pose.position.z = float(dt_box_lidar[i][2])
                bbox.dimensions.x = float(dt_box_lidar[i][4])
                bbox.dimensions.y = float(dt_box_lidar[i][3])
                bbox.dimensions.z = float(dt_box_lidar[i][5])
                bbox.value = scores[i]
                bbox.label = int(types[i])
                arr_bbox.boxes.append(bbox)
        # print("total callback time: ", time.time() - t_t)
        arr_bbox.header.frame_id = msg.header.frame_id
        arr_bbox.header.stamp = msg.header.stamp
        if len(arr_bbox.boxes) is not 0:
            pub_arr_bbox.publish(arr_bbox)
            arr_bbox.boxes = []
        else:
            arr_bbox.boxes = []
            pub_arr_bbox.publish(arr_bbox)
コード例 #24
0
    def init_boundingboxarray(self, num_boxes=30):
        self.boundingBoxArray_object = BoundingBoxArray()

        h = std_msgs.msg.Header()
        h.stamp = rospy.Time.now(
        )  # Note you need to call rospy.init_node() before this will work
        h.frame_id = "world"

        self.boundingBoxArray_object.header = h

        self.minimum_dimension = 0.2
        self.init_x_position = 1.0

        for i in range(num_boxes):
            new_box = BoundingBox()
            new_box.header = h

            new_box.pose = Pose()
            new_box.pose.position.x = self.init_x_position + i * self.minimum_dimension

            new_box.dimensions = Vector3()
            new_box.dimensions.x = self.minimum_dimension
            new_box.dimensions.y = self.minimum_dimension
            new_box.dimensions.z = self.minimum_dimension

            new_box.label = i
            new_box.value = i * self.minimum_dimension

            self.boundingBoxArray_object.boxes.append(new_box)

        self.publish_once(self.boundingBoxArray_object)
コード例 #25
0
 def __init__(self):
     self.srv = Server(DynamicParamsConfig,
                       self.dynamic_reconfigure_callback)
     self.sub = rospy.Subscriber(
         "~input", PolygonArray, self.polygon_callback)
     self.box = BoundingBox()
     self.frame = rospy.get_param("~frame", "/base_link")
     self.box.header.frame_id = self.frame
     self.pub = rospy.Publisher("~output", BoundingBox, queue_size=1)
コード例 #26
0
def vis_callback(msg):
    obj_bba = BoundingBoxArray()
    obj_bba.header.frame_id = 'map'

    for obj in msg.objects:
        obj_bb = BoundingBox()
        res = tosm.query_individual(obj.object_name + str(obj.ID))
        obj_bb.header = obj.header
        pose = res.pose[0].replace('[', '').replace(']', '').split(',')
        size = res.size[0].replace('[', '').replace(']', '').split(',')

        obj_bb.pose.position.x = float(pose[0])
        obj_bb.pose.position.y = float(pose[1])
        obj_bb.pose.position.z = float(pose[2]) + float(size[2]) / 2.0
        obj_bb.pose.orientation.x = float(pose[3])
        obj_bb.pose.orientation.y = float(pose[4])
        obj_bb.pose.orientation.z = float(pose[5])
        obj_bb.pose.orientation.w = float(pose[6])

        # bounding box size
        obj_bb.dimensions.x = float(size[0])
        obj_bb.dimensions.y = float(size[1])
        obj_bb.dimensions.z = float(size[2])

        # likelihood
        obj_bb.value = 1

        # determine the color
        if (obj.object_name == "hingeddoor"):
            obj_bb.label = 1
        elif (obj.object_name == "elevatordoor"):
            obj_bb.label = 2
        elif (obj.object_name == "A"):
            obj_bb.label = 3
        elif (obj.object_name == "B"):
            obj_bb.label = 4
        elif (obj.object_name == "C"):
            obj_bb.label = 5
        else:
            obj_bb.label = 10

        obj_bba.boxes.append(obj_bb)

    ses_map_object_vis_pub.publish(obj_bba)
コード例 #27
0
    def inference_image(self, data):

        try:
            image = self.bridge.imgmsg_to_cv2(data, "bgr8")
        except CvBridgeError as e:
            print(e)
            return

        #if image captured
        image_expanded = np.expand_dims(image, axis=0)
        # Perform the actual detection by running the model with the image as input
        (boxes, scores, classes,
         num) = self.sess.run([
             self.detection_boxes, self.detection_scores,
             self.detection_classes, self.num_detections
         ],
                              feed_dict={self.image_tensor: image_expanded})
        # Draw the results of the detection (aka 'visulaize the results')
        vis_util.visualize_boxes_and_labels_on_image_array(
            image,
            np.squeeze(boxes),
            np.squeeze(classes).astype(np.int32),
            np.squeeze(scores),
            self.category_index,
            use_normalized_coordinates=True,
            line_thickness=8,
            min_score_thresh=0.9)
        if len(boxes[0]) > 0 and scores[0][0] > 0.95:
            [min_x, min_y, max_x, max_y] = boxes[0][0]
            height, width = image.shape[:2]
            # print(min_x * height, min_y * width, max_x * height, max_y * width)
            # cv2.rectangle(image, (int(min_y * width), int(min_x * height)), (int(max_y * width), int(max_x * height)), (255, 0, 0), 2)
            boundingbox = BoundingBox()
            boundingbox.header = data.header
            boundingbox.pose.position.x = int(min_y * width)
            boundingbox.pose.position.y = int(min_x * height)
            boundingbox.dimensions.x = int((max_y - min_y) * width)
            boundingbox.dimensions.y = int((max_x - min_x) * height)
            # print(boundingbox)
            self.pub_boundingbox.publish(boundingbox)
        # All the results have been drawn on image. Now display the image.
        cv2.imshow('Object detector', image)
        if cv2.waitKey(10) == ord('q'):
            return
コード例 #28
0
def candidateBoxes(header, model_index):
    box_array = BoundingBoxArray()
    box_array.header.stamp = header.stamp
    box_array.header.frame_id = "odom"
    dx = 0.1
    for y in np.arange(0.0, 0.6, dx):
        for z in np.arange(0.7, 1.0, dx):
            for x in np.arange(0.0, -0.5, -dx):
                box = BoundingBox()
                box.header = box_array.header
                box.pose.orientation.w = 1.0
                box.pose.position.x = x
                box.pose.position.y = y
                box.pose.position.z = z
                box.dimensions.x = 0.1
                box.dimensions.y = 0.1
                box.dimensions.z = 0.1
                box_array.boxes.append(box)
    return box_array
コード例 #29
0
    def to_msg(self):
        msg = BoundingBox()
        #msg.header = header
        msg.label = self.id
        msg.value = self.d_area

        msg.pose.position.x = self.center[-1][0]
        msg.pose.position.y = self.center[-1][1]
        msg.pose.position.z = 0

        msg.pose.orientation.x = self.direction[0]
        msg.pose.orientation.y = self.direction[1]
        msg.pose.orientation.z = 0
        msg.pose.orientation.w = 0

        msg.dimensions.x = self.withe
        msg.dimensions.y = self.hight
        msg.dimensions.z = 0
        return msg
コード例 #30
0
def candidateBoxes(header, model_index):
    box_array = BoundingBoxArray()
    box_array.header.stamp = header.stamp
    box_array.header.frame_id = "odom"
    dx = 0.1
    for y in np.arange(0.0, 0.6, dx):
        for z in np.arange(0.7, 1.0, dx):
            for x in np.arange(0.0, -0.5, -dx):
                box = BoundingBox()
                box.header = box_array.header
                box.pose.orientation.w = 1.0
                box.pose.position.x = x
                box.pose.position.y = y
                box.pose.position.z = z
                box.dimensions.x = 0.1
                box.dimensions.y = 0.1
                box.dimensions.z = 0.1
                box_array.boxes.append(box)
    return box_array
コード例 #31
0
def LeadCarUpdate(msg):
	global lidar_array, filter_boxes
	leadc_boxes = BoundingBoxArray()
	leadc_boxes.header = filter_boxes.header
	compare_boxes = BoundingBoxArray()
	compare_boxes.header = filter_boxes.header
	compare_boxes.boxes = filter_boxes.boxes[:]

	if len(msg.data)==0:
		print('no lead car')
	else:
		rc_x = msg.data[0].pos_x
		rc_y = msg.data[0].pos_y
		rc_v = msg.data[0].speed

	distances = []
	for i in range(len(compare_boxes.boxes)):
		lidar_x = compare_boxes.boxes[i].pose.position.x
		lidar_y = compare_boxes.boxes[i].pose.position.y

		distance = np.sqrt((rc_x-lidar_x)**2+(rc_y-lidar_y)**2)
		distances.append(distance)
	print(min(distances))
	if (min(distances)<5):
		obj_i = distances.index(min(distances))
		leadc_boxes.boxes.append(compare_boxes.boxes[obj_i])
	else:
		leadc_box = BoundingBox()
		leadc_box.header = filter_boxes.boxes[1].header
		leadc_box.pose.position.x = rc_x
		leadc_box.pose.position.y = rc_y
		leadc_box.pose.position.z = .5
		leadc_box.pose.orientation.x = 0
		leadc_box.pose.orientation.y = 0
		leadc_box.pose.orientation.z = 0
		leadc_box.pose.orientation.w = 0
		leadc_box.dimensions.x = 5
		leadc_box.dimensions.y = 5
		leadc_box.dimensions.z = 5

		leadc_boxes.boxes.append(leadc_box)

	pub_leadc.publish(leadc_boxes)
コード例 #32
0
    def lidar_callback(self, msg):
        if self.model.inference_ctx is None or self.model.inference_ctx.anchor_cache is None:
            return

        for field in msg.fields:
            if field.name == "i" or field.name == "intensity":
                intensity_fname = field.name
                intensity_dtype = field.datatype
            else:
                intensity_fname = None
                intensity_dtype = None

        dtype_list = self._fields_to_dtype(msg.fields, msg.point_step)
        pc_arr = np.frombuffer(msg.data, dtype_list)

        if intensity_fname:
            pc_arr = structured_to_unstructured(
                pc_arr[["x", "y", "z", intensity_fname]]).copy()
            if intensity_dtype == 2:
                pc_arr[:, 3] = pc_arr[:, 3] / 255
        else:
            pc_arr = structured_to_unstructured(pc_arr[["x", "y", "z"]]).copy()
            pc_arr = np.hstack((pc_arr, np.zeros((pc_arr.shape[0], 1))))

        lidar_boxes = self.model.predcit(pc_arr)

        num_detects = len(lidar_boxes)
        arr_bbox = BoundingBoxArray()
        for i in range(num_detects):
            bbox = BoundingBox()

            bbox.header.frame_id = msg.header.frame_id
            bbox.header.stamp = rospy.Time.now()

            bbox.pose.position.x = float(lidar_boxes[i][0])
            bbox.pose.position.y = float(lidar_boxes[i][1])
            bbox.pose.position.z = float(
                lidar_boxes[i][2]) + float(lidar_boxes[i][5]) / 2
            bbox.dimensions.x = float(lidar_boxes[i][3])  # width
            bbox.dimensions.y = float(lidar_boxes[i][4])  # length
            bbox.dimensions.z = float(lidar_boxes[i][5])  # height

            q = Quaternion(axis=(0, 0, 1), radians=float(lidar_boxes[i][6]))
            bbox.pose.orientation.x = q.x
            bbox.pose.orientation.y = q.y
            bbox.pose.orientation.z = q.z
            bbox.pose.orientation.w = q.w

            arr_bbox.boxes.append(bbox)

        arr_bbox.header.frame_id = msg.header.frame_id
        arr_bbox.header.stamp = rospy.Time.now()
        print("Number of detections: {}".format(num_detects))

        self.pub_bbox.publish(arr_bbox)
コード例 #33
0
 def prepareDetectionRegistration(self, centroid, now):
     obj_det = BucketDetection()
     obj_det.image = self.cv_bridge.cv2_to_imgmsg(self.stereo_left, "bgr8")
     obj_det.tag = "object_tags/gate"
     bbox_3d = BoundingBox()
     bbox_3d.dimensions = Vector3(self.gate_dimensions[0], self.gate_dimensions[1], self.gate_dimensions[2])
     bbox_pose = Pose()
     x, y, z = list((np.squeeze(centroid)).T)
     obj_det.position = Point(x, y, z)
     bbox_pose.position = Point(x, y, z)
     bbox_3d.pose = bbox_pose
     bbox_header = Header()
     bbox_header.frame_id = "duo3d_optical_link_front"
     bbox_header.stamp = now
     bbox_3d.header = bbox_header
     obj_det.bbox_3d = bbox_3d
     obj_det.header = Header()
     obj_det.header.frame_id = bbox_header.frame_id
     obj_det.header.stamp = now
     return obj_det
コード例 #34
0
    def to_msg(self):
        msg = BoundingBox()
        msg.label = self.id
        msg.value = 0

        msg.pose.position.x = self.bbox[0]
        msg.pose.position.y = self.bbox[1]
        msg.pose.position.z = 0

        msg.pose.orientation.x = 0
        msg.pose.orientation.y = 0
        msg.pose.orientation.x = 0
        msg.pose.orientation.w = 0

        msg.dimensions.x = self.bbox[2] - self.bbox[0]
        msg.dimensions.y = self.bbox[3] - self.bbox[1]
        msg.dimensions.z = 0

        #print (msg)

        return msg
コード例 #35
0
    def publish(self, event):
        bbox_array_msg = BoundingBoxArray()
        bbox_array_msg.header.seq = self.seq
        bbox_array_msg.header.frame_id = self.frame_id
        bbox_array_msg.header.stamp = event.current_real
        for box in self.boxes:
            pos = box['position']
            rot = box.get('rotation', [0, 0, 0])
            qua = quaternion_from_euler(*rot)
            dim = box['dimension']

            bbox_msg = BoundingBox()
            bbox_msg.header.seq = self.seq
            bbox_msg.header.frame_id = self.frame_id
            bbox_msg.header.stamp = event.current_real
            bbox_msg.pose.position = Point(*pos)
            bbox_msg.pose.orientation = Quaternion(*qua)
            bbox_msg.dimensions = Vector3(*dim)

            bbox_array_msg.boxes.append(bbox_msg)

        self.pub.publish(bbox_array_msg)
コード例 #36
0
    def publish(self, event):
        bbox_array_msg = BoundingBoxArray()
        bbox_array_msg.header.seq = self.seq
        bbox_array_msg.header.frame_id = self.frame_id
        bbox_array_msg.header.stamp = event.current_real
        for i_box in xrange(self.n_boxes):
            pos = self.positions[i_box]
            rot = self.rotations[i_box]
            qua = quaternion_from_euler(*rot)
            dim = self.dimensions[i_box]

            bbox_msg = BoundingBox()
            bbox_msg.header.seq = self.seq
            bbox_msg.header.frame_id = self.frame_id
            bbox_msg.header.stamp = event.current_real
            bbox_msg.pose.position = Point(*pos)
            bbox_msg.pose.orientation = Quaternion(*qua)
            bbox_msg.dimensions = Vector3(*dim)

            bbox_array_msg.boxes.append(bbox_msg)

        self.pub.publish(bbox_array_msg)
コード例 #37
0
    def publish(self, event):
        bbox_array_msg = BoundingBoxArray()
        bbox_array_msg.header.seq = self.seq
        bbox_array_msg.header.frame_id = self.frame_id
        bbox_array_msg.header.stamp = event.current_real
        for box in self.boxes:
            pos = box['position']
            rot = box.get('rotation', [0, 0, 0])
            qua = quaternion_from_euler(*rot)
            dim = box['dimension']

            bbox_msg = BoundingBox()
            bbox_msg.header.seq = self.seq
            bbox_msg.header.frame_id = self.frame_id
            bbox_msg.header.stamp = event.current_real
            bbox_msg.pose.position = Point(*pos)
            bbox_msg.pose.orientation = Quaternion(*qua)
            bbox_msg.dimensions = Vector3(*dim)

            bbox_array_msg.boxes.append(bbox_msg)

        self.pub.publish(bbox_array_msg)
コード例 #38
0
    def listen_transform(self, parent_frame, child_frame):
        box = BoundingBox()
        try:
            self.listener.waitForTransform(
                parent_frame, child_frame, rospy.Time(0), rospy.Duration(3.0))
            (trans, rot) = self.listener.lookupTransform(
                parent_frame, child_frame, rospy.Time(0))

            box.pose.position = Point(trans[0], trans[1], trans[2])
            box.pose.orientation = Quaternion(rot[0], rot[1], rot[2], rot[3])
            return box
        except:
            rospy.logwarn('cannot lookup transform')
            return box
コード例 #39
0
def readXML(file):
	tree = ET.parse(file)
	root = tree.getroot()
	
	item = root.findall('./tracklets/item')

	d = {}
	boxes_2d = {}
	pictograms = {}

	for i, v in enumerate(item):
		h = float(v.find('h').text)
		w = float(v.find('w').text)
		l = float(v.find('l').text)
		frame = int(v.find('first_frame').text)
		size = Vector3(l, w, h)

		label = v.find('objectType').text

		pose = v.findall('./poses/item')

		for j, p in enumerate(pose):
			tx = float(p.find('tx').text)
			ty = float(p.find('ty').text)
			tz = float(p.find('tz').text)
			rz = float(p.find('rz').text)
			occlusion = float(p.find('occlusion').text)
			q = tf.transformations.quaternion_from_euler(0.0, 0.0, rz)

			b = BoundingBox()
			b.pose.position = Vector3(tx, ty, tz/2.0)
			b.pose.orientation = Quaternion(*q)
			b.dimensions = size
			b.label = i
			
			picto_text = Pictogram()
			picto_text.mode = Pictogram.STRING_MODE
			picto_text.pose.position = Vector3(tx, ty, -tz/2.0)
			q = tf.transformations.quaternion_from_euler(0.7, 0.0, -0.7)
			picto_text.pose.orientation = Quaternion(0.0, -0.5, 0.0, 0.5)
			picto_text.size = 5
			picto_text.color = std_msgs.msg.ColorRGBA(1, 1, 1, 1)
			picto_text.character = label
			
			# Bounding Box corners
			corner_x = np.array([l/2, l/2, -l/2, -l/2, l/2, l/2, -l/2, -l/2])
			corner_y = np.array([w/2, -w/2, -w/2, w/2, w/2, -w/2, -w/2, w/2])
			corner_z = np.array([0, 0, 0, 0, h, h, h, h])
			rz = wrapToPi(rz)
			
			###################
			#create box on origin, then translate and rotate according to pose. finally, project into 2D image
			# Rotate and translate 3D bounding box in velodyne coordinate system
			R = np.array([	[math.cos(rz), 	-math.sin(rz), 	0], 
							[math.sin(rz), 	math.cos(rz), 	0],
							[0, 			0, 				1]])
			corner_3d = np.dot(R,np.array([corner_x, corner_y, corner_z]))
			#Translate
			corner_3d[0,:] = corner_3d[0,:] + tx
			corner_3d[1,:] = corner_3d[1,:] + ty
			corner_3d[2,:] = corner_3d[2,:] + tz
			
			#Project to 2D
			low_row = np.vstack([corner_3d, np.ones(corner_3d.shape[1], dtype=np.float)])
			corner_3d = np.dot(np.asarray(rt_matrix), low_row)

			#################################
			#Create an orientation vector
			orientation_3d = np.dot( R, np.array([[0,0.7*l],[0,0],[0,0]]) )
			#Translate
			orientation_3d[0,:] = orientation_3d[0,:] + tx
			orientation_3d[1,:] = orientation_3d[1,:] + ty
			orientation_3d[2,:] = orientation_3d[2,:] + tz
			#Project
			low_row = np.vstack([orientation_3d, np.ones(orientation_3d.shape[1], dtype=np.float)])
			orientation_3d = np.dot(rt_matrix, low_row)

			K = np.asarray(cam_to_cam['P_rect_02']).reshape(3,4)
			K = K[:3,:3]

			corners_2d = projectToImage(corner_3d, K)
			orientation_2d = projectToImage(orientation_3d, K)

			x1 = min(corners_2d[0,:])
			x2 = max(corners_2d[0,:])
			y1 = min(corners_2d[1,:])
			y2 = max(corners_2d[1,:])

			bbox_2d = ImageRect()
			bbox_2d.score = -10.0
			
			if ( (label == 'Car' or label=='Truck' or label=='Van') and np.any(corner_3d[2,:]>=0.5)) and (np.any(orientation_3d[2,:]>=0.5) and x1>=0 and x2>=0 and y1>0 and y2>=0 and occlusion <2):				
				bbox_2d.x = x1
				bbox_2d.y = y1
				bbox_2d.width = x2-x1
				bbox_2d.height = y2-y1
				bbox_2d.score = 1.0

			if d.has_key(frame + j) == True:
				d[frame + j].append(b)
				boxes_2d[frame + j].append(bbox_2d)
				pictograms[frame + j].append(picto_text)
			else:
				d[frame + j] = [b]
				boxes_2d[frame + j] = [bbox_2d]
				pictograms[frame + j]= [picto_text]

	return d, boxes_2d, pictograms
コード例 #40
0
#!/usr/bin/env python


import rospy

from jsk_recognition_msgs.msg import BoundingBoxArray, BoundingBox
from tf.transformations import *
rospy.init_node("bbox_sample")
pub = rospy.Publisher("bbox", BoundingBoxArray)
r = rospy.Rate(24)
counter = 0
while not rospy.is_shutdown():
  box_a = BoundingBox()
  box_b = BoundingBox()
  box_a.label = 2
  box_b.label = 5
  box_arr = BoundingBoxArray()
  now = rospy.Time.now()
  box_a.header.stamp = now
  box_b.header.stamp = now
  box_arr.header.stamp = now
  box_a.header.frame_id = "map"
  box_b.header.frame_id = "map"
  box_arr.header.frame_id = "map"
  q = quaternion_about_axis((counter % 100) * math.pi * 2 / 100.0, [0, 0, 1])
  box_a.pose.orientation.x = q[0]
  box_a.pose.orientation.y = q[1]
  box_a.pose.orientation.z = q[2]
  box_a.pose.orientation.w = q[3]
  box_b.pose.orientation.w = 1
  box_b.pose.position.y = 2