def callback_with_cluster_box(self, cluster_boxes_msg, instance_boxes_msg, instance_label_msg): labeled_cluster_boxes = BoundingBoxArray() labeled_instance_boxes = BoundingBoxArray() labeled_cluster_boxes.header = cluster_boxes_msg.header labeled_instance_boxes.header = instance_boxes_msg.header for index, box in enumerate(cluster_boxes_msg.boxes): if not box.pose.position.x == 0.0: tmp_box = BoundingBox() tmp_box.header = box.header tmp_box.pose = box.pose tmp_box.dimensions = box.dimensions # TODO fix index indent, jsk_pcl_ros_utils/label_to_cluster_point_indices_nodelet.cpp tmp_box.label = index + 1 labeled_cluster_boxes.boxes.append(tmp_box) for box, label in zip(instance_boxes_msg.boxes, instance_label_msg.labels): tmp_box = BoundingBox() tmp_box.header = box.header tmp_box.pose = box.pose tmp_box.dimensions = box.dimensions tmp_box.label = label.id labeled_instance_boxes.boxes.append(tmp_box) self.labeled_cluster_boxes_pub.publish(labeled_cluster_boxes) self.labeled_instance_boxes_pub.publish(labeled_instance_boxes)
def create_bb_msg(self, cur_bb, is_map_coord=False): bb_corners = np.array([cur_bb['upper_left_px'], cur_bb['lower_right_px']]) if is_map_coord is True: bb_corners_map_coord = bb_corners else: bb_corners_px_coord = bb_corners bb_corners_map_coord = self.img2map.pixel2map(bb_corners_px_coord) bb_corners_mean = np.mean(bb_corners_map_coord, axis=0) bb_corners_diff = np.abs(bb_corners_map_coord[0, :] - bb_corners_map_coord[1, :]) cur_bb_msg = BoundingBox() cur_bb_msg.header.frame_id = 'map' cur_bb_msg.header.stamp = rospy.Time.now() cur_bb_msg.pose.position.x = bb_corners_mean[0] cur_bb_msg.pose.position.y = bb_corners_mean[1] cur_bb_msg.pose.position.z = 0 cur_bb_msg.pose.orientation.x = 0 cur_bb_msg.pose.orientation.y = 0 cur_bb_msg.pose.orientation.z = 0 cur_bb_msg.pose.orientation.w = 1 cur_bb_msg.dimensions.x = bb_corners_diff[0] cur_bb_msg.dimensions.y = bb_corners_diff[1] cur_bb_msg.dimensions.z = 0.1 cur_bb_msg.value = 1.0 cur_bb_msg.label = 1 return cur_bb_msg
def init_boundingboxarray(self, num_boxes=30): self.boundingBoxArray_object = BoundingBoxArray() h = std_msgs.msg.Header() h.stamp = rospy.Time.now( ) # Note you need to call rospy.init_node() before this will work h.frame_id = "world" self.boundingBoxArray_object.header = h self.minimum_dimension = 0.2 self.init_x_position = 1.0 for i in range(num_boxes): new_box = BoundingBox() new_box.header = h new_box.pose = Pose() new_box.pose.position.x = self.init_x_position + i * self.minimum_dimension new_box.dimensions = Vector3() new_box.dimensions.x = self.minimum_dimension new_box.dimensions.y = self.minimum_dimension new_box.dimensions.z = self.minimum_dimension new_box.label = i new_box.value = i * self.minimum_dimension self.boundingBoxArray_object.boxes.append(new_box) self.publish_once(self.boundingBoxArray_object)
def to_msg(self, boxes, class_ids, header): box_arr = BoundingBoxArray() box_arr.header = header for i in range(len(boxes)): if str(self.classes[class_ids[i]]) == "person": x, y, w, h = boxes[i] box = BoundingBox() box.label = i box.value = 0 box.pose.position.x = x box.pose.position.y = y box.pose.position.z = 0 box.pose.orientation.x = 0 box.pose.orientation.y = 0 box.pose.orientation.x = 0 box.pose.orientation.w = 0 box.dimensions.x = w box.dimensions.y = h box.dimensions.z = 0 box_arr.boxes.append(box) return box_arr
def get_nearest_box(self, req): distance = 100 has_request_item = False target_index = 0 nearest_box = BoundingBox() for index, box in enumerate(self.boxes.boxes): if box.pose.position.x == 0 or \ box.pose.position.y == 0 or \ box.pose.position.z == 0: rospy.logwarn('boxes has (0, 0, 0) position box') continue if self.label_lst[box.label] == req.label: has_request_item = True ref_point = np.array([box.pose.position.x + (box.dimensions.x * 0.5), box.pose.position.y + (box.dimensions.y * 0.5), box.pose.position.z + (box.dimensions.z * 0.5)]) target_point = np.array([req.target.x, req.target.y, req.target.z]) if np.linalg.norm(ref_point - target_point) < distance: nearest_box.pose = box.pose nearest_box.dimensions = box.dimensions nearest_box.label = box.label target_index = index distance = np.linalg.norm(ref_point - target_point) return nearest_box, has_request_item, target_index
def rslidar_callback(msg): t_t = time.time() #calib = getCalibfromFile(calib_file) #calib = getCalibfromROS(calibmsg) frame = msg.header.seq arr_bbox = BoundingBoxArray() msg_cloud = ros_numpy.point_cloud2.pointcloud2_to_array(msg) np_p = get_xyz_points(msg_cloud, True) print(" ") #scores, dt_box_lidar, types = proc_1.run(np_p) scores, dt_box_lidar, types, pred_dict = proc_1.run(np_p, calib, frame) annos_sorted = sortbydistance(dt_box_lidar, scores, types) #pp_AB3DMOT_list = anno_to_AB3DMOT(pred_dict, msg) pp_AB3DMOT_list = anno_to_AB3DMOT(dt_box_lidar, scores, types, msg) pp_list = anno_to_sort(dt_box_lidar, scores, types) pp_3D_list = anno_to_3Dsort(annos_sorted, types) MarkerArray_list = anno_to_rviz(dt_box_lidar, scores, types, msg) if scores.size != 0: for i in range(scores.size): if scores[i] > threshold: bbox = BoundingBox() bbox.header.frame_id = msg.header.frame_id bbox.header.stamp = rospy.Time.now() q = yaw2quaternion(float(dt_box_lidar[i][6])) bbox.pose.orientation.x = q[1] bbox.pose.orientation.y = q[2] bbox.pose.orientation.z = q[3] bbox.pose.orientation.w = q[0] bbox.pose.position.x = float( dt_box_lidar[i][0]) - movelidarcenter bbox.pose.position.y = float(dt_box_lidar[i][1]) bbox.pose.position.z = float(dt_box_lidar[i][2]) bbox.dimensions.x = float(dt_box_lidar[i][3]) bbox.dimensions.y = float(dt_box_lidar[i][4]) bbox.dimensions.z = float(dt_box_lidar[i][5]) bbox.value = scores[i] bbox.label = int(types[i]) arr_bbox.boxes.append(bbox) print("total callback time: ", time.time() - t_t) arr_bbox.header.frame_id = msg.header.frame_id arr_bbox.header.stamp = msg.header.stamp if len(arr_bbox.boxes) is not 0: pub_arr_bbox.publish(arr_bbox) arr_bbox.boxes = [] else: arr_bbox.boxes = [] pub_arr_bbox.publish(arr_bbox) pubRviz.publish(MarkerArray_list) pubSort.publish(pp_list) pub3DSort.publish(pp_3D_list) pubAB3DMOT.publish(pp_AB3DMOT_list)
def viz_bbox(position, q, size, i): bbox = BoundingBox() bbox.pose.position = position bbox.pose.orientation = Quaternion(*q) bbox.dimensions = size bbox.label = i return bbox
def rslidar_callback(msg): t_t = time.time() arr_bbox = BoundingBoxArray() #t = time.time() msg_cloud = ros_numpy.point_cloud2.pointcloud2_to_array(msg) np_p = get_xyz_points(msg_cloud, True) print(" ") #print("prepare cloud time: ", time.time() - t) #t = time.time() scores, dt_box_lidar, types = proc_1.run(np_p) #print("network forward time: ", time.time() - t) # filter_points_sum = [] #t = time.time() if scores.size != 0: for i in range(scores.size): bbox = BoundingBox() bbox.header.frame_id = msg.header.frame_id bbox.header.stamp = rospy.Time.now() q = yaw2quaternion(float(dt_box_lidar[i][6])) bbox.pose.orientation.x = q[0] bbox.pose.orientation.y = q[1] bbox.pose.orientation.z = q[2] bbox.pose.orientation.w = q[3] bbox.pose.position.x = float(dt_box_lidar[i][0]) bbox.pose.position.y = float(dt_box_lidar[i][1]) bbox.pose.position.z = float(dt_box_lidar[i][2]) bbox.dimensions.x = float(dt_box_lidar[i][3]) bbox.dimensions.y = float(dt_box_lidar[i][4]) bbox.dimensions.z = float(dt_box_lidar[i][5]) bbox.value = scores[i] bbox.label = types[i] arr_bbox.boxes.append(bbox) # filter_points = np_p[point_indices[:,i]] # filter_points_sum.append(filter_points) #print("publish time cost: ", time.time() - t) # filter_points_sum = np.concatenate(filter_points_sum, axis=0) # filter_points_sum = filter_points_sum[:, :3] # print("output of concatenate:", filter_points_sum) # filter_points_sum = np.arange(24).reshape(8,3) # cluster_cloud = xyz_array_to_pointcloud2(filter_points_sum, stamp=rospy.Time.now(), frame_id=msg.header.frame_id) # pub_segments.publish(cluster_cloud) print("total callback time: ", time.time() - t_t) arr_bbox.header.frame_id = msg.header.frame_id arr_bbox.header.stamp = rospy.Time.now() if len(arr_bbox.boxes) is not 0: pub_arr_bbox.publish(arr_bbox) arr_bbox.boxes = [] else: arr_bbox.boxes = [] pub_arr_bbox.publish(arr_bbox)
def callback(self, box_msg): labeled_boxes = {} label_buf = [] orientation = Pose().orientation for box in box_msg.boxes: if box.label in label_buf: labeled_boxes[box.label] += [self.get_points(box)] orientation = box.pose.orientation else: labeled_boxes[box.label] = [self.get_points(box)] label_buf.append(box.label) bounding_box_msg = BoundingBoxArray() for label, boxes in zip(labeled_boxes.keys(), labeled_boxes.values()): thresh = self.thresh if self.label_lst[label] == 'shelf_flont': thresh = 2.0 clustering = Clustering() boxes = np.array(boxes) result = clustering.clustering_wrapper(boxes, thresh) for cluster in result: max_candidates = [ boxes[i][0] + (boxes[i][1] * 0.5) for i in cluster.indices ] min_candidates = [ boxes[i][0] - (boxes[i][1] * 0.5) for i in cluster.indices ] candidates = np.array(max_candidates + min_candidates) dimension = candidates.max(axis=0) - candidates.min(axis=0) center = candidates.min(axis=0) + (dimension * 0.5) distances = self.get_distances( np.array([boxes[i][0] for i in cluster.indices]), [ np.linalg.norm(boxes[i][1]) * 0.5 for i in cluster.indices ]) tmp_box = BoundingBox() tmp_box.header = box_msg.header tmp_box.dimensions.x = dimension[0] tmp_box.dimensions.y = dimension[1] tmp_box.dimensions.z = dimension[2] tmp_box.pose.position.x = center[0] tmp_box.pose.position.y = center[1] tmp_box.pose.position.z = center[2] tmp_box.pose.orientation = orientation tmp_box.label = label tmp_box.value = distances.mean() bounding_box_msg.boxes.append(tmp_box) bounding_box_msg.header = box_msg.header self.box_pub.publish(bounding_box_msg)
def convert(header, xx_cube): boundingboxarray_msg = BoundingBoxArray() boundingboxarray_msg.header = header num_xx = int(xx_cube.shape[0] / 8) for i in range(num_xx): cube_raw = xx_cube[range(8 * i, 8 * i + 8), :] bb_in_camera = np.column_stack((cube_raw, np.ones((8, 1)))) bb_in_lidar = np.linalg.inv(calib.lidar_to_cam).dot(bb_in_camera.T).T boundingbox_msg = BoundingBox() boundingbox_msg.header = boundingboxarray_msg.header # boundingbox中心点位置 boundingbox_msg.pose.position.x = bb_in_lidar[:, 0].mean() boundingbox_msg.pose.position.y = bb_in_lidar[:, 1].mean() boundingbox_msg.pose.position.z = bb_in_lidar[:, 2].mean() # 寻找y坐标最小的顶点,计算相邻两个顶点的旋转角及边长 bb_bottom = bb_in_lidar[:4] min_idx = np.where(bb_bottom[:, 1] == bb_bottom[:, 1].min())[0][0] theta = math.atan2( bb_bottom[(min_idx + 1) % 4, 1] - bb_bottom[min_idx, 1], bb_bottom[(min_idx + 1) % 4, 0] - bb_bottom[min_idx, 0]) b_1 = ( (bb_bottom[(min_idx + 1) % 4, 1] - bb_bottom[min_idx, 1])**2 + (bb_bottom[(min_idx + 1) % 4, 0] - bb_bottom[min_idx, 0])**2)**0.5 b_2 = ( (bb_bottom[(min_idx + 3) % 4, 1] - bb_bottom[min_idx, 1])**2 + (bb_bottom[(min_idx + 3) % 4, 0] - bb_bottom[min_idx, 0])**2)**0.5 if theta < 90 * math.pi / 180: rotation_angle = theta dimension_x = b_1 dimension_y = b_2 else: rotation_angle = theta - 90 * math.pi / 180 dimension_x = b_2 dimension_y = b_1 # boundingbox旋转角四元数 boundingbox_msg.pose.orientation.x = 0 boundingbox_msg.pose.orientation.y = 0 boundingbox_msg.pose.orientation.z = math.sin(0.5 * rotation_angle) boundingbox_msg.pose.orientation.w = math.cos(0.5 * rotation_angle) # boundingbox尺寸 boundingbox_msg.dimensions.x = dimension_x boundingbox_msg.dimensions.y = dimension_y boundingbox_msg.dimensions.z = bb_in_lidar[:, 2].max( ) - bb_in_lidar[:, 2].min() boundingbox_msg.value = 0 boundingbox_msg.label = 0 boundingboxarray_msg.boxes.append(boundingbox_msg) return boundingboxarray_msg
def detector_callback(self, pcl_msg): start = time.time() # rospy.loginfo('Processing Pointcloud with PointRCNN') arr_bbox = BoundingBoxArray() seq = pcl_msg.header.seq stamp = pcl_msg.header.stamp # in message pointcloud has x pointing forward, y pointing to the left and z pointing upward pts_lidar = np.array([[ p[0], p[1], p[2], p[3] ] for p in pc2.read_points( pcl_msg, skip_nans=True, field_names=("x", "y", "z", "intensity")) ], dtype=np.float32) scores, dt_box_lidar, types = self.run_model(pts_lidar) # TODO: question convert into torch tensors? torch.from_numpy(pts_lidar) # move onto gpu if available # TODO: check if needs translation/rotation to compensate for tilt etc. if scores.size != 0: for i in range(scores.size): bbox = BoundingBox() bbox.header.frame_id = pcl_msg.header.frame_id bbox.header.stamp = rospy.Time.now() # bbox.header.seq = pcl_msg.header.seq q = yaw2quaternion(float(dt_box_lidar[i][6])) bbox.pose.orientation.x = q[1] bbox.pose.orientation.y = q[2] bbox.pose.orientation.z = q[3] bbox.pose.orientation.w = q[0] bbox.pose.position.x = float(dt_box_lidar[i][0]) bbox.pose.position.y = float(dt_box_lidar[i][1]) bbox.pose.position.z = float(dt_box_lidar[i][2]) bbox.dimensions.x = float(dt_box_lidar[i][3]) bbox.dimensions.y = float(dt_box_lidar[i][4]) bbox.dimensions.z = float(dt_box_lidar[i][5]) bbox.value = scores[i] bbox.label = int(types[i]) arr_bbox.boxes.append(bbox) # rospy.loginfo("3D detector time: {}".format(time.time() - start)) arr_bbox.header.frame_id = pcl_msg.header.frame_id arr_bbox.header.stamp = pcl_msg.header.stamp arr_bbox.header.seq = pcl_msg.header.seq if len(arr_bbox.boxes) != 0: self.pub_arr_bbox.publish(arr_bbox) arr_bbox.boxes = [] else: arr_bbox.boxes = [] self.pub_arr_bbox.publish(arr_bbox)
def callback(self, instance_boxes_msg, instance_label_msg): labeled_instance_boxes = BoundingBoxArray() labeled_instance_boxes.header = instance_boxes_msg.header for box, label in zip(instance_boxes_msg.boxes, instance_label_msg.labels): tmp_box = BoundingBox() tmp_box.header = box.header tmp_box.pose = box.pose tmp_box.dimensions = box.dimensions tmp_box.label = label.id labeled_instance_boxes.boxes.append(tmp_box) self.labeled_instance_boxes_pub.publish(labeled_instance_boxes)
def labeled_pose_callback(self, pose_msg): self.header = pose_msg.header self.qatm_boxes = BoundingBoxArray() self.qatm_boxes.header = pose_msg.header for pose in pose_msg.poses: tmp_box = BoundingBox() tmp_box.header = pose_msg.header tmp_box.pose = pose.pose tmp_box.dimensions.x = 0.03 tmp_box.dimensions.y = 0.03 tmp_box.dimensions.z = 0.03 tmp_box.label = self.label_lst.index(pose.label) self.qatm_boxes.boxes.append(tmp_box)
def readXML(file): tree = ET.parse(file) root = tree.getroot() item = root.findall('./tracklets/item') d = {} pictograms = {} for i, v in enumerate(item): h = float(v.find('h').text) w = float(v.find('w').text) l = float(v.find('l').text) frame = int(v.find('first_frame').text) size = Vector3(l, w, h) label = v.find('objectType').text pose = v.findall('./poses/item') for j, p in enumerate(pose): tx = float(p.find('tx').text) ty = float(p.find('ty').text) tz = float(p.find('tz').text) rz = float(p.find('rz').text) q = tf.transformations.quaternion_from_euler(0.0, 0.0, rz) b = BoundingBox() b.pose.position = Vector3(tx, ty, tz/2.0) b.pose.orientation = Quaternion(*q) b.dimensions = size b.label = i picto_text = Pictogram() picto_text.mode = Pictogram.STRING_MODE picto_text.pose.position = Vector3(tx, ty, -tz/2.0) q = tf.transformations.quaternion_from_euler(0.7, 0.0, -0.7) picto_text.pose.orientation = Quaternion(0.0, -0.5, 0.0, 0.5) picto_text.size = 1 picto_text.color = std_msgs.msg.ColorRGBA(1, 1, 1, 1) picto_text.character = label if d.has_key(frame + j) == True: d[frame + j].append(b) pictograms[frame + j].append(picto_text) else: d[frame + j] = [b] pictograms[frame + j] = [picto_text] return d, pictograms
def dummyBoundingBoxPublisher(): pub = rospy.Publisher('/dummy_bounding_box', BoundingBox, queue_size=1) rospy.init_node('dummyBoundingBoxPublisher_node', anonymous=True) rate = rospy.Rate(25) boundingBox_object = BoundingBox() i = 0 pose_object = Pose() dimensions_object = Vector3() minimum_dimension = 0.2 boundingBox_object.label = 1234 while not rospy.is_shutdown(): h = std_msgs.msg.Header() h.stamp = rospy.Time.now( ) # Note you need to call rospy.init_node() before this will work h.frame_id = "world" boundingBox_object.header = h sinus_value = math.sin(i / 10.0) boundingBox_object.value = sinus_value # Change Pose to see effects pose_object.position.x = 1.0 pose_object.position.y = 0.0 pose_object.position.z = sinus_value # ai, aj, ak == roll, pitch, yaw quaternion = tf.transformations.quaternion_from_euler(ai=0, aj=0, ak=sinus_value) pose_object.orientation.x = quaternion[0] pose_object.orientation.y = quaternion[1] pose_object.orientation.z = quaternion[2] pose_object.orientation.w = quaternion[3] dimensions_object.x = sinus_value / 10 + minimum_dimension dimensions_object.y = minimum_dimension dimensions_object.z = minimum_dimension # Assign pose and dimension objects boundingBox_object.pose = pose_object boundingBox_object.dimensions = dimensions_object pub.publish(boundingBox_object) rate.sleep() i += 1
def rslidar_callback(msg): # t_t = time.time() arr_bbox = BoundingBoxArray() msg_cloud = ros_numpy.point_cloud2.pointcloud2_to_array(msg) np_p = get_xyz_points(msg_cloud, True) print(" ") seq = msg.header.seq stamp = msg.header.stamp input_points = { 'stamp': stamp, 'seq': seq, 'points': np_p } if(proc_1.get_lidar_data(input_points)): scores, dt_box_lidar, types = proc_1.run() if scores.size != 0: for i in range(scores.size): bbox = BoundingBox() bbox.header.frame_id = msg.header.frame_id bbox.header.stamp = rospy.Time.now() q = yaw2quaternion(float(dt_box_lidar[i][8])) bbox.pose.orientation.x = q[1] bbox.pose.orientation.y = q[2] bbox.pose.orientation.z = q[3] bbox.pose.orientation.w = q[0] bbox.pose.position.x = float(dt_box_lidar[i][0]) bbox.pose.position.y = float(dt_box_lidar[i][1]) bbox.pose.position.z = float(dt_box_lidar[i][2]) bbox.dimensions.x = float(dt_box_lidar[i][4]) bbox.dimensions.y = float(dt_box_lidar[i][3]) bbox.dimensions.z = float(dt_box_lidar[i][5]) bbox.value = scores[i] bbox.label = int(types[i]) arr_bbox.boxes.append(bbox) # print("total callback time: ", time.time() - t_t) arr_bbox.header.frame_id = msg.header.frame_id arr_bbox.header.stamp = msg.header.stamp if len(arr_bbox.boxes) is not 0: pub_arr_bbox.publish(arr_bbox) arr_bbox.boxes = [] else: arr_bbox.boxes = [] pub_arr_bbox.publish(arr_bbox)
def to_msg(self): msg = BoundingBox() #msg.header = header msg.label = self.id msg.value = self.d_area msg.pose.position.x = self.center[-1][0] msg.pose.position.y = self.center[-1][1] msg.pose.position.z = 0 msg.pose.orientation.x = self.direction[0] msg.pose.orientation.y = self.direction[1] msg.pose.orientation.z = 0 msg.pose.orientation.w = 0 msg.dimensions.x = self.withe msg.dimensions.y = self.hight msg.dimensions.z = 0 return msg
def to_msg(self): msg = BoundingBox() msg.label = self.id msg.value = 0 msg.pose.position.x = self.bbox[0] msg.pose.position.y = self.bbox[1] msg.pose.position.z = 0 msg.pose.orientation.x = 0 msg.pose.orientation.y = 0 msg.pose.orientation.x = 0 msg.pose.orientation.w = 0 msg.dimensions.x = self.bbox[2] - self.bbox[0] msg.dimensions.y = self.bbox[3] - self.bbox[1] msg.dimensions.z = 0 #print (msg) return msg
def vis_callback(msg): obj_bba = BoundingBoxArray() obj_bba.header.frame_id = 'map' for obj in msg.objects: obj_bb = BoundingBox() res = tosm.query_individual(obj.object_name + str(obj.ID)) obj_bb.header = obj.header pose = res.pose[0].replace('[', '').replace(']', '').split(',') size = res.size[0].replace('[', '').replace(']', '').split(',') obj_bb.pose.position.x = float(pose[0]) obj_bb.pose.position.y = float(pose[1]) obj_bb.pose.position.z = float(pose[2]) + float(size[2]) / 2.0 obj_bb.pose.orientation.x = float(pose[3]) obj_bb.pose.orientation.y = float(pose[4]) obj_bb.pose.orientation.z = float(pose[5]) obj_bb.pose.orientation.w = float(pose[6]) # bounding box size obj_bb.dimensions.x = float(size[0]) obj_bb.dimensions.y = float(size[1]) obj_bb.dimensions.z = float(size[2]) # likelihood obj_bb.value = 1 # determine the color if (obj.object_name == "hingeddoor"): obj_bb.label = 1 elif (obj.object_name == "elevatordoor"): obj_bb.label = 2 elif (obj.object_name == "A"): obj_bb.label = 3 elif (obj.object_name == "B"): obj_bb.label = 4 elif (obj.object_name == "C"): obj_bb.label = 5 else: obj_bb.label = 10 obj_bba.boxes.append(obj_bb) ses_map_object_vis_pub.publish(obj_bba)
def publish(self, event): bbox_array_msg = BoundingBoxArray() bbox_array_msg.header.seq = self.seq bbox_array_msg.header.frame_id = self.frame_id bbox_array_msg.header.stamp = event.current_real for box in self.boxes: pos = box['position'] rot = box.get('rotation', [0, 0, 0]) qua = quaternion_from_euler(*rot) dim = box['dimension'] label = box.get('label', 0) bbox_msg = BoundingBox() bbox_msg.header.seq = self.seq bbox_msg.header.frame_id = self.frame_id bbox_msg.header.stamp = event.current_real bbox_msg.pose.position = Point(*pos) bbox_msg.pose.orientation = Quaternion(*qua) bbox_msg.dimensions = Vector3(*dim) bbox_msg.label = label bbox_array_msg.boxes.append(bbox_msg) self.pub.publish(bbox_array_msg)
def callback(self, front_MarkerArray, back_MarkerArray, TwistStamped): # print("front",len(front_MarkerArray.markers)/4) # print("back",len(back_MarkerArray.markers)/4) # # Concat front and back MarkerArray Messages add_MarkerArray = copy.deepcopy(front_MarkerArray) for i in range(len(back_MarkerArray.markers)): add_MarkerArray.markers.append(back_MarkerArray.markers[i]) # print("add",len(add_MarkerArray.markers)/4) # print("done") if len(add_MarkerArray.markers) == 0: return header = add_MarkerArray.markers[0].header frame = header.seq boxes = BoundingBoxArray() #3D Boxes with JSK boxes.header = header texts = PictogramArray() #Labels with JSK texts.header = header obj_ori_arrows = MarkerArray() #arrow with visualization_msgs velocity_markers = MarkerArray() #text with visualization_msgs obj_path_markers = MarkerArray() # passed path warning_line_markers = MarkerArray() dets = np.zeros((0, 9)) # (None, 9) : 9는 사용할 3d bbox의 파라미터 개수 obj_box_info = np.empty((0, 7)) obj_label_info = np.empty((0, 2)) # frame을 rviz에 출력 overlayTxt = OverlayText() overlayTxt.left = 10 overlayTxt.top = 10 overlayTxt.width = 1200 overlayTxt.height = 1200 overlayTxt.fg_color.a = 1.0 overlayTxt.fg_color.r = 1.0 overlayTxt.fg_color.g = 1.0 overlayTxt.fg_color.b = 1.0 overlayTxt.text_size = 12 overlayTxt.text = "Frame_seq : {}".format(frame) det_boxes = BoundingBoxArray() #3D Boxes with JSK det_boxes.header = header # Receive each objects info in this frame for object_info in add_MarkerArray.markers: #extract info [ frame,type(label),tx,ty,tz,h,w,l,ry ] if object_info.ns == "/detection/lidar_detector/box_markers": tx = object_info.pose.position.x ty = object_info.pose.position.y tz = object_info.pose.position.z l = object_info.scale.x w = object_info.scale.y h = object_info.scale.z quaternion_xyzw = [object_info.pose.orientation.x, object_info.pose.orientation.y, \ object_info.pose.orientation.z, object_info.pose.orientation.w] rz = tf.transformations.euler_from_quaternion( quaternion_xyzw)[2] obj_box_info = np.append( obj_box_info, [[-ty, -tz, tx - 0.27, h, w, l, -rz + np.pi / 2]], axis=0) size_det = Vector3(l, w, h) det_box = BoundingBox() det_box.header = header det_box.pose.position = Point(tx, ty, tz) q_det_box = tf.transformations.quaternion_from_euler( 0.0, 0.0, rz) # 어쩔 수 없이 끝단에서만 90도 돌림 det_box.pose.orientation = Quaternion(*q_det_box) det_box.dimensions = size_det det_boxes.boxes.append(det_box) elif object_info.ns == "/detection/lidar_detector/label_markers": label = object_info.text.strip() if label == '': label = 'None' obj_label_info = np.append(obj_label_info, [[frame, label]], axis=0) dets = np.concatenate((obj_label_info, obj_box_info), axis=1) self.pub_det_markerarray.publish(det_boxes) del current_id_list[:] # All Detection Info in one Frame bboxinfo = dets[dets[:, 0] == str(frame), 2:9] # [ tx, ty, tz, h, w, l, rz ] additional_info = dets[dets[:, 0] == str(frame), 0:2] # frame, labe reorder = [3, 4, 5, 0, 1, 2, 6] # [tx,ty,tz,h,w,l,ry] -> [h,w,l,tx,ty,tz,theta] reorder_back = [3, 4, 5, 0, 1, 2, 6] # [h,w,l,tx,ty,tz,theta] -> [tx,ty,tz,h,w,l,ry] reorder2velo = [2, 0, 1, 3, 4, 5, 6] bboxinfo = bboxinfo[:, reorder] # reorder bboxinfo parameter [h,w,l,x,y,z,theta] bboxinfo = bboxinfo.astype(np.float64) dets_all = {'dets': bboxinfo, 'info': additional_info} # ObjectTracking from Detection trackers = self.mot_tracker.update(dets_all) # h,w,l,x,y,z,theta trackers_bbox = trackers[:, 0:7] trackers_info = trackers[:, 7:10] # id, frame, label trackers_bbox = trackers_bbox[:, reorder_back] # reorder_back bboxinfo parameter [tx,ty,tz,h,w,l,ry] trackers_bbox = trackers_bbox[:, reorder2velo] # reorder coordinate system cam to velo trackers_bbox = trackers_bbox.astype(np.float64) trackers_bbox[:, 0] = trackers_bbox[:, 0] trackers_bbox[:, 1] = trackers_bbox[:, 1] * -1 trackers_bbox[:, 2] = trackers_bbox[:, 2] * -1 trackers_bbox[:, 6] = trackers_bbox[:, 6] * -1 # for문을 통해 각 objects들의 정보를 추출하여 사용 for b, info in zip(trackers_bbox, trackers_info): bbox = BoundingBox() bbox.header = header # parameter 뽑기 [tx,ty,tz,h,w,l,rz] tx_trk, ty_trk, tz_trk = float(b[0]), float(b[1]), float(b[2]) rz_trk = float(b[6]) size_trk = Vector3(float(b[5]), float(b[4]), float(b[3])) obj_id = info[0] label_trk = info[2] bbox_color = colorCategory20(int(obj_id)) odom_mat = get_odom(self.tf2, "velo_link", "map") xyz = np.array(b[:3]).reshape(1, -1) points = np.array((0, 3), float) if odom_mat is not None: points = get_transformation(odom_mat, xyz) # 이전 x frame 까지 지나온 points들을 저장하여 반환하는 함수 # obj_id와 bbox.label은 단지 type차이만 날뿐 같은 데이터 # path_points_list = points_path(tx_trk, ty_trk, tz_trk, obj_id) path_points_list = points_path(points[0, 0], points[0, 1], points[0, 2], obj_id) map_header = copy.deepcopy(header) map_header.frame_id = "/map" bbox_color = colorCategory20(int(obj_id)) path_marker = Marker( type=Marker.LINE_STRIP, id=int(obj_id), lifetime=rospy.Duration(0.5), # pose=Pose(Point(0,0,0), Quaternion(0, 0, 0, 1)), # origin point position scale=Vector3(0.1, 0.0, 0.0), # line width header=map_header, color=bbox_color) path_marker.points = path_points_list obj_path_markers.markers.append(path_marker) # Tracker들의 BoundingBoxArray 설정 bbox.pose.position = Point(tx_trk, ty_trk, tz_trk / 2.0) q_box = tf.transformations.quaternion_from_euler( 0.0, 0.0, rz_trk + np.pi / 2) # 어쩔 수 없이 끝단에서만 90도 돌림 bbox.pose.orientation = Quaternion(*q_box) bbox.dimensions = size_trk bbox.label = int(obj_id) boxes.boxes.append(bbox) picto_text = Pictogram() picto_text.header = header picto_text.mode = Pictogram.STRING_MODE picto_text.pose.position = Point(tx_trk, ty_trk, -tz_trk) # q = tf.transformations.quaternion_from_euler(0.7, 0.0, -0.7) picto_text.pose.orientation = Quaternion(0.0, -0.5, 0.0, 0.5) picto_text.size = 4 picto_text.color = std_msgs.msg.ColorRGBA(1, 1, 1, 1) picto_text.character = label_trk + ' ' + str(bbox.label) texts.pictograms.append(picto_text) # GPS sensor values oxtLinear = TwistStamped.twist.linear # oxtLinear = TwistStamped.twist.linear # Tracker들의 속도 추정 obj_velo, dx_t, dy_t, dz_t = obj_velocity([tx_trk, ty_trk, tz_trk], bbox.label, oxtLinear) if obj_velo != None: obj_velo = np.round_(obj_velo, 1) # m/s obj_velo = obj_velo * 3.6 # km/h obj_velo_scale = convert_velo2scale(obj_velo) # # Tracker들의 Orientation q_ori = tf.transformations.quaternion_from_euler( 0.0, 0.0, rz_trk + np.pi / 2) # 어쩔 수 없이 끝단에서만 90도 돌림 obj_ori_arrow = Marker( type=Marker.ARROW, id=bbox.label, lifetime=rospy.Duration(0.2), pose=Pose(Point(tx_trk, ty_trk, tz_trk / 2.0), Quaternion(*q_ori)), scale=Vector3(obj_velo_scale, 0.5, 0.5), header=header, # color=ColorRGBA(0.0, 1.0, 0.0, 0.8)) color=bbox_color) obj_ori_arrows.markers.append(obj_ori_arrow) obj_velo_marker = Marker(type=Marker.TEXT_VIEW_FACING, id=bbox.label, lifetime=rospy.Duration(0.5), pose=Pose(Point(tx_trk, ty_trk, tz_trk), Quaternion(0.0, -0.5, 0.0, 0.5)), scale=Vector3(1.5, 1.5, 1.5), header=header, color=ColorRGBA(1.0, 1.0, 1.0, 1.0), text="{}km/h".format(obj_velo)) velocity_markers.markers.append(obj_velo_marker) current_id_list.append(bbox.label) # Warning object line warning_line = Marker( type=Marker.LINE_LIST, id=int(obj_id), lifetime=rospy.Duration(0.2), pose=Pose(Point(0, 0, 0), Quaternion(0, 0, 0, 1)), # origin point position scale=Vector3(0.2, 0.0, 0.0), # line width header=header, color=ColorRGBA(1.0, 0.0, 0.0, 1.0)) d = dist_from_objBbox(tx_trk, ty_trk, tz_trk, size_trk.x, size_trk.y, size_trk.z) if d < MIN_WARNING_DIST: warning_line.points = Point(tx_trk, ty_trk, tz_trk), Point(0.0, 0.0, 0.0) warning_line_markers.markers.append(warning_line) # Change Outer Circle Color outer_circle_color = ColorRGBA(1.0 * 25 / 255, 1.0, 0.0, 1.0) if len(warning_line_markers.markers) > 0: outer_circle_color = ColorRGBA(1.0 * 255 / 255, 1.0 * 0 / 255, 1.0 * 0 / 255, 1.0) # ego_vehicle's warning boundary outer_circle = Marker( type=Marker.CYLINDER, id=int(obj_id), lifetime=rospy.Duration(0.5), pose=Pose(Point(0.0, 0.0, -2.0), Quaternion(0, 0, 0, 1)), scale=Vector3(8.0, 8.0, 0.1), # line width header=header, color=outer_circle_color) inner_circle = Marker( type=Marker.CYLINDER, id=int(obj_id), lifetime=rospy.Duration(0.5), pose=Pose(Point(0.0, 0.0, -1.8), Quaternion(0, 0, 0, 1)), scale=Vector3(7.0, 7.0, 0.2), # line width header=header, color=ColorRGBA(0.22, 0.22, 0.22, 1.0)) # ego-vehicle velocity selfvelo = np.sqrt(oxtLinear.x**2 + oxtLinear.y**2 + oxtLinear.z**2) selfvelo = np.round_(selfvelo, 1) # m/s selfvelo = selfvelo * 3.6 # km/h oxtAngular = TwistStamped.twist.angular q_gps = tf.transformations.quaternion_from_euler( oxtAngular.x, oxtAngular.y, oxtAngular.z) # # ego-vehicle 사진 출력 ego_car = Marker(type=Marker.MESH_RESOURCE, id=0, lifetime=rospy.Duration(0.5), pose=Pose(Point(0.0, 0.0, -1.8), Quaternion(0, 0, 0, 1)), scale=Vector3(1.5, 1.5, 1.5), header=header, action=Marker.ADD, mesh_resource=CAR_DAE_PATH, color=ColorRGBA(1.0, 1.0, 1.0, 1.0)) # Self ego Velocity text_marker = Marker(type=Marker.TEXT_VIEW_FACING, id=0, lifetime=rospy.Duration(0.5), pose=Pose(Point(-7.0, 0.0, 0.0), Quaternion(0, 0, 0, 1)), scale=Vector3(1.5, 1.5, 1.5), header=header, color=ColorRGBA(1.0, 1.0, 1.0, 1.0), text="{}km/h".format(selfvelo)) for i in prior_trk_xyz.keys(): if i not in current_id_list: prior_trk_xyz.pop(i) self.pub_frame_seq.publish(overlayTxt) self.pub_boxes.publish(boxes) self.pub_pictograms.publish(texts) self.pub_selfvelo_text.publish(text_marker) # self.pub_selfveloDirection.publish(arrow_marker) self.pub_objs_ori.publish(obj_ori_arrows) self.pub_objs_velo.publish(velocity_markers) self.pub_path.publish(obj_path_markers) self.pub_warning_lines.publish(warning_line_markers) self.pub_ego_outCircle.publish(outer_circle) self.pub_ego_innerCircle.publish(inner_circle) self.pub_ego_car.publish(ego_car)
def readXML(file): tree = ET.parse(file) root = tree.getroot() item = root.findall('./tracklets/item') d = {} boxes_2d = {} pictograms = {} for i, v in enumerate(item): h = float(v.find('h').text) w = float(v.find('w').text) l = float(v.find('l').text) frame = int(v.find('first_frame').text) size = Vector3(l, w, h) label = v.find('objectType').text pose = v.findall('./poses/item') for j, p in enumerate(pose): tx = float(p.find('tx').text) ty = float(p.find('ty').text) tz = float(p.find('tz').text) rz = float(p.find('rz').text) occlusion = float(p.find('occlusion').text) q = tf.transformations.quaternion_from_euler(0.0, 0.0, rz) b = BoundingBox() b.pose.position = Vector3(tx, ty, tz / 2.0) b.pose.orientation = Quaternion(*q) b.dimensions = size b.label = i picto_text = Pictogram() picto_text.mode = Pictogram.STRING_MODE picto_text.pose.position = Vector3(tx, ty, -tz / 2.0) q = tf.transformations.quaternion_from_euler(0.7, 0.0, -0.7) picto_text.pose.orientation = Quaternion(0.0, -0.5, 0.0, 0.5) picto_text.size = 5 picto_text.color = std_msgs.msg.ColorRGBA(1, 1, 1, 1) picto_text.character = label # Bounding Box corners corner_x = np.array( [l / 2, l / 2, -l / 2, -l / 2, l / 2, l / 2, -l / 2, -l / 2]) corner_y = np.array( [w / 2, -w / 2, -w / 2, w / 2, w / 2, -w / 2, -w / 2, w / 2]) corner_z = np.array([0, 0, 0, 0, h, h, h, h]) rz = wrapToPi(rz) ################### #create box on origin, then translate and rotate according to pose. finally, project into 2D image # Rotate and translate 3D bounding box in velodyne coordinate system R = np.array([[math.cos(rz), -math.sin(rz), 0], [math.sin(rz), math.cos(rz), 0], [0, 0, 1]]) corner_3d = np.dot(R, np.array([corner_x, corner_y, corner_z])) #Translate corner_3d[0, :] = corner_3d[0, :] + tx corner_3d[1, :] = corner_3d[1, :] + ty corner_3d[2, :] = corner_3d[2, :] + tz #Project to 2D low_row = np.vstack( [corner_3d, np.ones(corner_3d.shape[1], dtype=np.float)]) corner_3d = np.dot(np.asarray(rt_matrix), low_row) ################################# #Create an orientation vector orientation_3d = np.dot(R, np.array([[0, 0.7 * l], [0, 0], [0, 0]])) #Translate orientation_3d[0, :] = orientation_3d[0, :] + tx orientation_3d[1, :] = orientation_3d[1, :] + ty orientation_3d[2, :] = orientation_3d[2, :] + tz #Project low_row = np.vstack([ orientation_3d, np.ones(orientation_3d.shape[1], dtype=np.float) ]) orientation_3d = np.dot(rt_matrix, low_row) K = np.asarray(cam_to_cam['P_rect_02']).reshape(3, 4) K = K[:3, :3] corners_2d = projectToImage(corner_3d, K) orientation_2d = projectToImage(orientation_3d, K) x1 = min(corners_2d[0, :]) x2 = max(corners_2d[0, :]) y1 = min(corners_2d[1, :]) y2 = max(corners_2d[1, :]) bbox_2d = image_rect() bbox_2d.score = -10.0 if ((label == 'Car' or label == 'Truck' or label == 'Van') and np.any(corner_3d[2, :] >= 0.5)) and ( np.any(orientation_3d[2, :] >= 0.5) and x1 >= 0 and x2 >= 0 and y1 > 0 and y2 >= 0 and occlusion < 2): bbox_2d.x = x1 bbox_2d.y = y1 bbox_2d.width = x2 - x1 bbox_2d.height = y2 - y1 bbox_2d.score = 1.0 if d.has_key(frame + j) == True: d[frame + j].append(b) boxes_2d[frame + j].append(bbox_2d) pictograms[frame + j].append(picto_text) else: d[frame + j] = [b] boxes_2d[frame + j] = [bbox_2d] pictograms[frame + j] = [picto_text] return d, boxes_2d, pictograms
#!/usr/bin/env python import rospy from jsk_recognition_msgs.msg import BoundingBoxArray, BoundingBox from tf.transformations import * rospy.init_node("bbox_sample") pub = rospy.Publisher("bbox", BoundingBoxArray) r = rospy.Rate(24) counter = 0 while not rospy.is_shutdown(): box_a = BoundingBox() box_b = BoundingBox() box_a.label = 2 box_b.label = 5 box_arr = BoundingBoxArray() now = rospy.Time.now() box_a.header.stamp = now box_b.header.stamp = now box_arr.header.stamp = now box_a.header.frame_id = "map" box_b.header.frame_id = "map" box_arr.header.frame_id = "map" q = quaternion_about_axis((counter % 100) * math.pi * 2 / 100.0, [0, 0, 1]) box_a.pose.orientation.x = q[0] box_a.pose.orientation.y = q[1] box_a.pose.orientation.z = q[2] box_a.pose.orientation.w = q[3] box_b.pose.orientation.w = 1 box_b.pose.position.y = 2 box_b.dimensions.x = (counter % 10 + 1) * 0.1
def velo_callback(msg): global proc global seq_num time_start = rospy.get_rostime() callback_time = rospy.get_rostime() - msg.header.stamp callback_time_secs = callback_time.secs + callback_time.nsecs/1e9 print("Time between message publish and callback:", callback_time_secs, "seconds") arr_bbox = BoundingBoxArray() arr_dobject = DetectedObjectArray() #pcl_msg = pc2.read_points(msg, skip_nans=False, field_names=("x", "y", "z","intensity","ring")) #pcl_msg = pc2.read_points(msg, skip_nans=False, field_names=("x", "y", "z")) #np_p = np.array(list(pcl_msg), dtype=np.float32) np_p = ros_numpy.point_cloud2.pointcloud2_to_xyz_array(msg) #print("np_p shape: ", np_p.shape) #np_p = np.delete(np_p, -1, 1) # delete "ring" field # convert to xyzi point cloud x = np_p[:, 0].reshape(-1) y = np_p[:, 1].reshape(-1) z = np_p[:, 2].reshape(-1) if np_p.shape[1] == 4: # if intensity field exists i = np_p[:, 3].reshape(-1) else: i = np.zeros((np_p.shape[0], 1)).reshape(-1) cloud = np.stack((x, y, z, i)).T #print("cloud[0]:",cloud[0,0]) # start processing dt_boxes_corners, scores, dt_box_lidar, label = proc.run(cloud) #print("dt_boxes_corners :",dt_boxes_corners) # print(scores) # # field of view cut cond = hv_in_range(x=np_p[:, 0], y=np_p[:, 1], z=np_p[:, 2], fov=[-90, 90], fov_type='h') cloud_ranged = cloud[cond] cloud_ranged_pc2 = xyz_array_to_pointcloud2(cloud_ranged[:,:3],frame_id="velodyne") ##pay attention here, xyz_array_to_pointcloud2 only works for nX3 array!!! # # print(cond.shape, np_p.shape, cloud.shape) # publish to /velodyne_poitns_modified # publish_test(cloud_ranged, msg.header.frame_id) #publish_test(proc.inputs['points'][0], msg.header.frame_id) #print("shape for points_display", points_display[0].shape) #publish_test(points_display[0], msg.header.frame_id) label_n = [] for la,la2 in enumerate(label): #print("la is:",la) #print("la2 is:",la2) if la2 == 'Car': label_n.append(1) elif la2 == 'Cyclist': label_n.append(2) elif la2 == 'Pedestrian': label_n.append(3) else: label_n.append(4) # process results #print("label_n is ",label_n) end_time_1 = rospy.get_rostime() - time_start end_time_1_sec = end_time_1.secs + end_time_1.nsecs/1e9 print("end_time_1 is :",end_time_1_sec) segment_index = box_np_ops.points_in_rbbox(cloud, dt_box_lidar, lidar=True) end_time_2 = rospy.get_rostime() - time_start end_time_2_sec = end_time_2.secs + end_time_2.nsecs/1e9 print("end_time_2 is :",end_time_2_sec) #print("shape of segment_index is",segment_index.shape) #print("segment_index is", segment_index) #print(type(cloud)) #segment_cloud = np.matmul(cloud.T,segment_index) segment_cloud_agg = np.array([]) seq_num = seq_num + 1 if scores.size != 0: # print('Number of detections: ', results['name'].size) for i in range(scores.size): bbox = BoundingBox() dobject = DetectedObject() #DetectedObject, DetectedObjectArray #box_test = BoundingBox() bbox.header.frame_id = msg.header.frame_id dobject.header.frame_id = msg.header.frame_id #box_test.header.frame_id = msg.header.frame_id # bbox.header.stamp = rospy.Time.now() q = quaternion_from_euler(0,0,-float(dt_box_lidar[i][6])) bbox.pose.orientation.x = q[0] bbox.pose.orientation.y = q[1] bbox.pose.orientation.z = q[2] bbox.pose.orientation.w = q[3] bbox.pose.position.x = float(dt_box_lidar[i][0]) bbox.pose.position.y = float(dt_box_lidar[i][1]) #bbox.pose.position.z = float(dt_box_lidar[i][2]+dt_box_lidar[i][5]/2) bbox.pose.position.z = float(dt_box_lidar[i][2]) bbox.dimensions.x = float(dt_box_lidar[i][3]) bbox.dimensions.y = float(dt_box_lidar[i][4]) bbox.dimensions.z = float(dt_box_lidar[i][5]) bbox.label = label_n[i] dobject.header = msg.header dobject.header.seq = seq_num #dobject.id = i #dobject.header.label = label[i] #dobject.header.score = scores[i] dobject.label = label[i] dobject.score = 1. dobject.space_frame = "velodyne" dobject.pose = bbox.pose dobject.dimensions = bbox.dimensions segment_cloud = cloud[segment_index[:,i],:3] #pay attention here, xyz_array_to_pointcloud2 only works for nX3 array!!! #print("segment_cloud",segment_cloud) #print("shape_segment_pc:", segment_cloud.shape) segment_cloud_pc2 = xyz_array_to_pointcloud2(segment_cloud) #segment_cloud = cloud #segment_cloud_pc2 = pc2.create_cloud(header=msg.header, # fields=_make_point_field(4), # 4 PointFields as channel description # points=segment_cloud) #dobject.pointcloud = segment_cloud_pc2 dobject.valid = True #print("shape of segment_cloud",segment_cloud.shape) #if segment_cloud_agg.size == 0: # segment_cloud_agg = segment_cloud #else: # segment_cloud_agg = np.append(segment_cloud_agg,segment_cloud,axis=0) #box_test.pose.orientation.x = q[0] #box_test.pose.orientation.x = q[1] #box_test.pose.orientation.x = q[2] #box_test.pose.orientation.x = q[3] #box_test.pose.position.x = 5 #box_test.pose.position.y = 0 #box_test.pose.position.z = -1 #box_test.dimensions.x = float(dt_box_lidar[i][3]) #box_test.dimensions.y = float(dt_box_lidar[i][4]) #box_test.dimensions.z = float(dt_box_lidar[i][5]) arr_bbox.boxes.append(bbox) arr_dobject.objects.append(dobject) arr_bbox.header.frame_id = msg.header.frame_id arr_dobject.header = msg.header arr_dobject.header.frame_id = msg.header.frame_id #arr_dobject.header = msg.header.frame_id # arr_bbox.header.stamp = rospy.Time.now() #print("arr_bbox.boxes.size() : {} ".format(len(arr_bbox.boxes))) #if len(arr_bbox.boxes) is not 0: # for i in range(0, len(arr_bbox.boxes)): # print("[+] [x,y,z,dx,dy,dz] : {}, {}, {}, {}, {}, {}".\ # format(arr_bbox.boxes[i].pose.position.x,arr_bbox.boxes[i].pose.position.y,arr_bbox.boxes[i].pose.position.z,\ # arr_bbox.boxes[i].dimensions.x,arr_bbox.boxes[i].dimensions.y,arr_bbox.boxes[i].dimensions.z)) # publish to /voxelnet_arr_bbox end_time_3 = rospy.get_rostime() - time_start end_time_3_sec = end_time_3.secs + end_time_3.nsecs/1e9 print("end_time_3 is :",end_time_3_sec) pub_arr_bbox.publish(arr_bbox) pub_arr_dobject.publish(arr_dobject) pub_ranged_cloud.publish(cloud_ranged_pc2) #print("size of segment_cloud_agg",segment) #publish_segment(segment_cloud_agg,msg.header.frame_id) #pub_test_box.publish(box_test) #arr_bbox.boxes.clear() arr_bbox.boxes = [] arr_dobject.objects = []
#!/usr/bin/env python import rospy from jsk_recognition_msgs.msg import BoundingBoxArray, BoundingBox from tf.transformations import * rospy.init_node("bbox_sample") pub = rospy.Publisher("bbox", BoundingBoxArray, queue_size=10) r = rospy.Rate(24) counter = 0 while not rospy.is_shutdown(): box_a = BoundingBox() box_b = BoundingBox() box_a.label = 2 box_b.label = 5 box_arr = BoundingBoxArray() now = rospy.Time.now() box_a.header.stamp = now box_b.header.stamp = now box_arr.header.stamp = now box_a.header.frame_id = "map" box_b.header.frame_id = "map" box_arr.header.frame_id = "map" q = quaternion_about_axis((counter % 100) * math.pi * 2 / 100.0, [0, 0, 1]) box_a.pose.orientation.x = q[0] box_a.pose.orientation.y = q[1] box_a.pose.orientation.z = q[2] box_a.pose.orientation.w = q[3] box_b.pose.orientation.w = 1 box_b.pose.position.y = 2 box_b.dimensions.x = (counter % 10 + 1) * 0.1
def readXML(file): tree = ET.parse(file) root = tree.getroot() item = root.findall('./tracklets/item') d = {} boxes_2d = {} pictograms = {} for i, v in enumerate(item): h = float(v.find('h').text) w = float(v.find('w').text) l = float(v.find('l').text) frame = int(v.find('first_frame').text) size = Vector3(l, w, h) label = v.find('objectType').text pose = v.findall('./poses/item') for j, p in enumerate(pose): tx = float(p.find('tx').text) ty = float(p.find('ty').text) tz = float(p.find('tz').text) rz = float(p.find('rz').text) occlusion = float(p.find('occlusion').text) q = tf.transformations.quaternion_from_euler(0.0, 0.0, rz) b = BoundingBox() b.pose.position = Vector3(tx, ty, tz/2.0) b.pose.orientation = Quaternion(*q) b.dimensions = size b.label = i picto_text = Pictogram() picto_text.mode = Pictogram.STRING_MODE picto_text.pose.position = Vector3(tx, ty, -tz/2.0) q = tf.transformations.quaternion_from_euler(0.7, 0.0, -0.7) picto_text.pose.orientation = Quaternion(0.0, -0.5, 0.0, 0.5) picto_text.size = 5 picto_text.color = std_msgs.msg.ColorRGBA(1, 1, 1, 1) picto_text.character = label # Bounding Box corners corner_x = np.array([l/2, l/2, -l/2, -l/2, l/2, l/2, -l/2, -l/2]) corner_y = np.array([w/2, -w/2, -w/2, w/2, w/2, -w/2, -w/2, w/2]) corner_z = np.array([0, 0, 0, 0, h, h, h, h]) rz = wrapToPi(rz) ################### #create box on origin, then translate and rotate according to pose. finally, project into 2D image # Rotate and translate 3D bounding box in velodyne coordinate system R = np.array([ [math.cos(rz), -math.sin(rz), 0], [math.sin(rz), math.cos(rz), 0], [0, 0, 1]]) corner_3d = np.dot(R,np.array([corner_x, corner_y, corner_z])) #Translate corner_3d[0,:] = corner_3d[0,:] + tx corner_3d[1,:] = corner_3d[1,:] + ty corner_3d[2,:] = corner_3d[2,:] + tz #Project to 2D low_row = np.vstack([corner_3d, np.ones(corner_3d.shape[1], dtype=np.float)]) corner_3d = np.dot(np.asarray(rt_matrix), low_row) ################################# #Create an orientation vector orientation_3d = np.dot( R, np.array([[0,0.7*l],[0,0],[0,0]]) ) #Translate orientation_3d[0,:] = orientation_3d[0,:] + tx orientation_3d[1,:] = orientation_3d[1,:] + ty orientation_3d[2,:] = orientation_3d[2,:] + tz #Project low_row = np.vstack([orientation_3d, np.ones(orientation_3d.shape[1], dtype=np.float)]) orientation_3d = np.dot(rt_matrix, low_row) K = np.asarray(cam_to_cam['P_rect_02']).reshape(3,4) K = K[:3,:3] corners_2d = projectToImage(corner_3d, K) orientation_2d = projectToImage(orientation_3d, K) x1 = min(corners_2d[0,:]) x2 = max(corners_2d[0,:]) y1 = min(corners_2d[1,:]) y2 = max(corners_2d[1,:]) bbox_2d = ImageRect() bbox_2d.score = -10.0 if ( (label == 'Car' or label=='Truck' or label=='Van') and np.any(corner_3d[2,:]>=0.5)) and (np.any(orientation_3d[2,:]>=0.5) and x1>=0 and x2>=0 and y1>0 and y2>=0 and occlusion <2): bbox_2d.x = x1 bbox_2d.y = y1 bbox_2d.width = x2-x1 bbox_2d.height = y2-y1 bbox_2d.score = 1.0 if d.has_key(frame + j) == True: d[frame + j].append(b) boxes_2d[frame + j].append(bbox_2d) pictograms[frame + j].append(picto_text) else: d[frame + j] = [b] boxes_2d[frame + j] = [bbox_2d] pictograms[frame + j]= [picto_text] return d, boxes_2d, pictograms
#!/usr/bin/env python import rospy from jsk_recognition_msgs.msg import BoundingBoxArray, BoundingBox from tf.transformations import * rospy.init_node("bbox_sample") pub = rospy.Publisher("bbox", BoundingBoxArray) r = rospy.Rate(24) counter = 0 while not rospy.is_shutdown(): box_a = BoundingBox() box_b = BoundingBox() box_a.label = 2 box_b.label = 5 box_arr = BoundingBoxArray() now = rospy.Time.now() box_a.header.stamp = now box_b.header.stamp = now box_arr.header.stamp = now box_a.header.frame_id = "map" box_b.header.frame_id = "map" box_arr.header.frame_id = "map" q = quaternion_about_axis((counter % 100) * math.pi * 2 / 100.0, [0, 0, 1]) box_a.pose.orientation.x = q[0] box_a.pose.orientation.y = q[1] box_a.pose.orientation.z = q[2] box_a.pose.orientation.w = q[3] box_b.pose.orientation.w = 1 box_b.pose.position.y = 2
def rslidar_callback(msg): t_t = time.time() arr_bbox = BoundingBoxArray() msg_cloud = ros_numpy.point_cloud2.pointcloud2_to_array(msg) np_p = get_xyz_points(msg_cloud, True) print(" ") scores, dt_box_lidar, types = proc_1.run(np_p) MarkerArray_list = MarkerArray() ##CREO EL MENSAJE GENERAL if scores.size != 0: for i in range(scores.size): if scores[i] > 0.4: bbox = BoundingBox() bbox.header.frame_id = msg.header.frame_id bbox.header.stamp = rospy.Time.now() q = yaw2quaternion(float(dt_box_lidar[i][6])) bbox.pose.orientation.x = q[1] bbox.pose.orientation.y = q[2] bbox.pose.orientation.z = q[3] bbox.pose.orientation.w = q[0] bbox.pose.position.x = float(dt_box_lidar[i][0]) - 69.12 / 2 bbox.pose.position.y = float(dt_box_lidar[i][1]) bbox.pose.position.z = float(dt_box_lidar[i][2]) bbox.dimensions.x = float(dt_box_lidar[i][3]) bbox.dimensions.y = float(dt_box_lidar[i][4]) bbox.dimensions.z = float(dt_box_lidar[i][5]) bbox.value = scores[i] bbox.label = int(types[i]) arr_bbox.boxes.append(bbox) obj = Marker() #obj.CUBE = 1 obj.header.stamp = rospy.Time.now() obj.header.frame_id = msg.header.frame_id obj.type = Marker.CUBE obj.id = i obj.lifetime = rospy.Duration.from_sec(1) #obj.type = int(types[i]) obj.pose.position.x = float(dt_box_lidar[i][0]) - 69.12 / 2 obj.pose.position.y = float(dt_box_lidar[i][1]) obj.pose.position.z = float(dt_box_lidar[i][2]) q = yaw2quaternion(float(dt_box_lidar[i][6])) obj.pose.orientation.x = q[1] obj.pose.orientation.y = q[2] obj.pose.orientation.z = q[3] obj.pose.orientation.w = q[0] obj.scale.x = float(dt_box_lidar[i][3]) obj.scale.y = float(dt_box_lidar[i][4]) obj.scale.z = float(dt_box_lidar[i][5]) obj.color.r = 255 obj.color.a = 0.5 MarkerArray_list.markers.append(obj) print("total callback time: ", time.time() - t_t) arr_bbox.header.frame_id = msg.header.frame_id arr_bbox.header.stamp = msg.header.stamp if len(arr_bbox.boxes) is not 0: pub_arr_bbox.publish(arr_bbox) arr_bbox.boxes = [] else: arr_bbox.boxes = [] pub_arr_bbox.publish(arr_bbox) pubMarker.publish(MarkerArray_list)