def generate_scans_pickle(trajectory_bag_path, localization_image, min_angle,
                          max_angle, samples_num, min_distance, max_distance,
                          resolution, r_primary_search_samples,
                          r_secondary_search_step, output_pickle_path):
    trajectory_bag = rosbag.Bag(trajectory_bag_path)
    timestamp_to_scan = {}
    for _, message, timestamp in trajectory_bag.read_messages(
            topics='ugv_pose'):
        scan_ranges = contours_scan.generate(
            localization_image,
            center_x=message.point.x,
            center_y=message.point.y,
            min_angle=min_angle,
            max_angle=max_angle,
            samples_num=samples_num,
            min_distance=min_distance,
            max_distance=max_distance,
            resolution=resolution,
            r_primary_search_samples=r_primary_search_samples,
            r_secondary_search_step=r_secondary_search_step)
        timestamp_to_scan[message.header.stamp] = np.asanyarray(scan_ranges)
    with open(output_pickle_path, 'wb') as f:
        pickle.dump(timestamp_to_scan, f)
draw_range_circle = False
dummy_resolution = 0.0259837881206
#################################################################################################

if __name__ == '__main__':
    execution_dir = utils.create_new_execution_folder('synthetic_scan_drawer')
    image = cv2.imread(snapshots[image_key].path)
    map_image = maps_generation.generate_canopies_map(image)
    center_x, center_y = cv_utils.sample_pixel_coordinates(map_image)
    scan_ranges = contours_scan.generate(
        map_image,
        center_x=center_x,
        center_y=center_y,
        min_angle=config.synthetic_scan_min_angle,
        max_angle=config.synthetic_scan_max_angle,
        samples_num=config.synthetic_scan_samples_num,
        min_distance=config.synthetic_scan_min_distance,
        max_distance=config.synthetic_scan_max_distance,
        resolution=dummy_resolution,
        r_primary_search_samples=config.
        synthetic_scan_r_primary_search_samples,
        r_secondary_search_step=config.synthetic_scan_r_secondary_search_step)
    if noise_sigma is not None:
        noise = np.random.normal(loc=0,
                                 scale=noise_sigma,
                                 size=len(scan_ranges))
        scan_ranges += noise
    trunks_scan_points_list = cv_utils.get_coordinates_list_from_scan_ranges(
        scan_ranges, center_x, center_y, config.synthetic_scan_min_angle,
        config.synthetic_scan_max_angle, dummy_resolution)
    viz_image = image.copy()
Ejemplo n.º 3
0
def generate_scans_pickle(video_path, output_pickle_path, resolution):
    scans_and_ugv_poses = OrderedDict()
    prev_scan_time = None
    cap = cv2.VideoCapture(video_path)
    if (cap.isOpened() == False):
        raise Exception('Error opening video stream')
    frame_idx = 0
    frames_count = cap.get(cv2.CAP_PROP_FRAME_COUNT)
    while cap.isOpened():
        is_success, frame = cap.read()
        frame_idx += 1
        if frame_idx >= frames_count:
            break
        if not is_success:
            print 'skipped!!!!!!'
            continue
        if prev_scan_time is None:
            prev_scan_time = datetime.datetime.now()
            continue
        curr_scan_time = datetime.datetime.now()
        vehicle_pose = segmentation.extract_vehicle(frame)
        if vehicle_pose is None:
            continue
        else:
            contours_image = maps_generation.generate_canopies_map(frame)
            scan_ranges = contours_scan.generate(
                contours_image,
                center_x=vehicle_pose[0],
                center_y=vehicle_pose[1],
                min_angle=min_angle,
                max_angle=max_angle,
                samples_num=samples_num,
                min_distance=min_distance,
                max_distance=max_distance,
                resolution=resolution,
                r_primary_search_samples=r_primary_search_samples,
                r_secondary_search_step=r_secondary_search_step)
            video_timestamp = cap.get(cv2.CAP_PROP_POS_MSEC) * 1e-3

            laser_scan = LaserScan()
            laser_scan.header.stamp = rospy.rostime.Time.from_sec(
                video_timestamp)
            laser_scan.header.frame_id = frame_id
            laser_scan.header.seq = frame_idx
            laser_scan.angle_min = min_angle
            laser_scan.angle_max = max_angle
            laser_scan.angle_increment = (max_angle - min_angle) / samples_num
            laser_scan.scan_time = (curr_scan_time - prev_scan_time).seconds
            laser_scan.range_min = min_distance * resolution
            laser_scan.range_max = max_distance * resolution
            laser_scan.ranges = np.asanyarray(scan_ranges)
            prev_scan_time = curr_scan_time

            vehicle_pose_point = PointStamped()
            vehicle_pose_point.point.x = vehicle_pose[0]
            vehicle_pose_point.point.y = vehicle_pose[1]
            vehicle_pose_point.header.stamp = rospy.rostime.Time.from_sec(
                video_timestamp)
            vehicle_pose_point.header.seq = frame_idx

            scans_and_ugv_poses[video_timestamp] = (laser_scan,
                                                    vehicle_pose_point)
            print video_timestamp

    cap.release()
    with open(output_pickle_path, 'wb') as f:
        pickle.dump(scans_and_ugv_poses, f)
 def _publish_scan_message(self, center_x, center_y, timestamp,
                           contours_image):
     if self.prev_scan_time is None:
         self.prev_scan_time = datetime.datetime.now()
         return
     if PROFILE_SCAN_GENERATOR:
         ts = time.time()
     if self.scans_pickle_path is None:
         scan_ranges = contours_scan.generate(
             contours_image,
             center_x=center_x,
             center_y=center_y,
             min_angle=self.min_angle,
             max_angle=self.max_angle,
             samples_num=self.samples_num,
             min_distance=self.min_distance_pixels,
             max_distance=self.max_distance_pixels,
             resolution=self.resolution,
             r_primary_search_samples=self.r_primary_search_samples,
             r_secondary_search_step=self.r_secondary_search_step)
     else:
         scan_ranges = self.timestamp_to_scan[timestamp]
     if self.noise_sigma != 0:
         noise = np.random.normal(loc=0,
                                  scale=self.noise_sigma,
                                  size=len(scan_ranges))
         scan_ranges = scan_ranges + noise
     curr_scan_time = datetime.datetime.now()
     if PROFILE_SCAN_GENERATOR:
         te = time.time()
         delta = (te - ts)
         if self.scan_idx == 0:
             self.mean_scan_time = delta
         else:
             self.mean_scan_time = float(self.mean_scan_time) * (
                 self.scan_idx - 1) / self.scan_idx + delta / self.scan_idx
         rospy.loginfo(
             '%s :: Synthetic scan generation time: %f[sec], mean: %f[sec]'
             % (self.namespace, delta, self.mean_scan_time))
     if TRACK_NAN_IN_SCANS:
         if np.isnan(scan_ranges).any():
             self.any_nan += 1
         if np.isnan(scan_ranges).all():
             self.all_nans += 1
         rospy.loginfo('%s :: Any NaN occurrences: %d' %
                       (self.namespace, self.any_nan))
         rospy.loginfo('%s :: All NaN occurrences: %d' %
                       (self.namespace, self.all_nans))
     if TRACK_INF_IN_SCANS:
         inf_rate = float(np.sum(np.isinf(scan_ranges))) / len(scan_ranges)
         if self.scan_idx == 0:
             self.mean_inf_rate = inf_rate
         else:
             self.mean_inf_rate = float(self.mean_inf_rate) * (
                 self.scan_idx -
                 1) / self.scan_idx + inf_rate / self.scan_idx
         rospy.loginfo('%s :: Mean inf rate: %f' %
                       (self.namespace, self.mean_inf_rate))
     self.scan_idx += 1
     laser_scan = LaserScan()
     laser_scan.header.stamp = rospy.rostime.Time.now()
     laser_scan.header.frame_id = self.frame_id
     laser_scan.angle_min = self.min_angle
     laser_scan.angle_max = self.max_angle
     laser_scan.angle_increment = (self.max_angle -
                                   self.min_angle) / self.samples_num
     laser_scan.scan_time = (curr_scan_time - self.prev_scan_time).seconds
     laser_scan.range_min = self.min_distance_pixels * self.resolution
     laser_scan.range_max = self.max_distance_pixels * self.resolution
     laser_scan.ranges = scan_ranges
     self.scan_pub.publish(laser_scan)
     self.prev_scan_time = curr_scan_time
                change_thresholds=(20, 20))
            vehicle_x, vehicle_y = vehicle_pose[0], vehicle_pose[1]
        else:
            filtered_pose = tracker.update_and_get_estimation(vehicle_pose)
            # vehicle_x, vehicle_y = filtered_pose[0], filtered_pose[1]
            if vehicle_pose is None:  # TODO: temp
                continue  # TODO: temp
            vehicle_x, vehicle_y = vehicle_pose[0], vehicle_pose[
                1]  # TODO: temp
        contours_image = maps_generation.generate_canopies_map(frame)
        scan_ranges = contours_scan.generate(
            contours_image,
            center_x=vehicle_x,
            center_y=vehicle_y,
            min_angle=min_angle,
            max_angle=max_angle,
            samples_num=samples_num,
            min_distance=min_distance,
            max_distance=max_distance,
            resolution=resolution,
            r_primary_search_samples=r_primary_search_samples,
            r_secondary_search_step=r_secondary_search_step)

        video_timestamp = cap.get(cv2.CAP_PROP_POS_MSEC) * 1e-3
        ros_timestamp = rospy.rostime.Time.from_sec(video_timestamp)
        print video_timestamp
        # if video_timestamp > 60:
        #     break

        # scan_points_list = cv_utils.get_coordinates_list_from_scan_ranges(scan_ranges, vehicle_x, vehicle_y, min_angle, max_angle, resolution)
        # visualization_image = cv_utils.draw_points_on_image(frame, scan_points_list, color=(0, 0, 255), radius=5)
        # visualization_image = cv2.circle(visualization_image, center=(int(vehicle_x), int(vehicle_y)), radius=25, color=(255, 0, 255), thickness=5)
    ## ros_utils.save_image_to_map(map_image, resolution=0.0125, map_name='purple_map', dir_name=r'/home/omer/Desktop')

    map_image = maps_generation.generate_canopies_map(frame)
    # map_image = cv2.cvtColor(cv2.imread(r'/home/omer/Desktop/purple_map.pgm'), cv2.COLOR_BGR2GRAY)
    # map_image = cv2.cvtColor(cv2.imread(r'/home/omer/Downloads/dji_15-53-1_map.pgm'), cv2.COLOR_BGR2GRAY)

    # vehicle_x = 1000
    # vehicle_y = 1000

    # _, scan_coordinates_list = contours_scan2.generate(map_image, vehicle_x, vehicle_y, 0, 2 * np.pi, 360, 3, 300, 0.0125)
    scan_ranges = contours_scan.generate(
        map_image,
        center_x=vehicle_x,
        center_y=vehicle_y,
        min_angle=0,
        max_angle=2 * np.pi,
        samples_num=360,
        min_distance=3,
        max_distance=300,
        resolution=0.0125,
        r_primary_search_samples=25,
        r_secondary_search_step=3)  # TODO: fine tune parameters!
    curr_scan_time = datetime.datetime.now()
    if prev_scan_time is None:
        prev_scan_time = datetime.datetime.now()
        continue
    laser_scan = LaserScan()
    laser_scan.header.stamp = rospy.rostime.Time.now()
    laser_scan.header.frame_id = 'contours_scan_link'
    laser_scan.angle_min = 0
    laser_scan.angle_max = 2 * np.pi
    laser_scan.angle_increment = (2 * np.pi - 0) / 360