示例#1
0
def create_visuals(robots, actual_map_name, out):
    db = get_easy_algo_db()
    actual_map = db.create_instance(FAMILY_SEGMAPS, actual_map_name)
    res = OrderedDict()
    res2 = OrderedDict()

    for i, robot_name in enumerate(sorted(robots)):
        dtu.logger.info('%d/%d: %s' % (i, len(robots), robot_name))
        try:
            gp = GroundProjection(robot_name)
        except (NoCameraInfoAvailable, NoHomographyInfoAvailable) as e:
            dtu.logger.warning('skipping %r: %s' % (robot_name, e))
            continue
        gpg = gp.get_ground_projection_geometry()
        pose = np.eye(3)
        simulated_data = \
            simulate_image(actual_map, pose, gpg, blur_sigma=1)
        res[robot_name] = simulated_data.rectified_synthetic_bgr
        res2[robot_name] = simulated_data.distorted_synthetic_bgr
    if not res:
        msg = 'No images to draw.'
        dtu.logger.error(msg)
    else:
        output = os.path.join(out, 'distorted')
        dtu.write_bgr_images_as_jpgs(res2, output)
        output = os.path.join(out, 'rectified')
        dtu.write_bgr_images_as_jpgs(res, output)
示例#2
0
def look_at(log, output, anti_instagram, line_detector, image_prep,
            lane_filter, all_details):
    filename = get_local_bag_file(log)

    bag = rosbag.Bag(filename)

    vehicle_name = dtu.which_robot(bag)

    dtu.logger.info('Vehicle name: %s' % vehicle_name)

    gp = GroundProjection(vehicle_name)

    topic = dtu.get_image_topic(bag)
    res = dtu.d8n_read_all_images_from_bag(bag, topic, max_images=1)

    image_cv = res[0]['rgb']

    #     dtu.logger.debug(dtu.describe_value(image_cv))

    image_cv_bgr = dtu.bgr_from_rgb(image_cv)

    dtu.DuckietownConstants.show_timeit_benchmarks = True
    res, _stats = run_pipeline(image_cv_bgr,
                               gp=gp,
                               anti_instagram_name=anti_instagram,
                               line_detector_name=line_detector,
                               image_prep_name=image_prep,
                               lane_filter_name=lane_filter,
                               all_details=all_details)

    res = dtu.resize_small_images(res)

    dtu.write_bgr_images_as_jpgs(res, output)
def single_image_histograms():
    p = dtu.require_resource('frame0002.jpg')

    image_cv = dtu.image_cv_from_jpg_fn(p)

    res = go(image_cv)
    outd = dtu.get_output_dir_for_test()
    dtu.write_bgr_images_as_jpgs(res, outd)
示例#4
0
    def go(self):
        vehicle_name = dtu.get_current_robot_name() if dtu.on_duckiebot(
        ) else "default"

        output = self.options.output
        if output is None:
            output = 'out-pipeline'  #  + dtu.get_md5(self.options.image)[:6]
            self.info('No --output given, using %s' % output)

        if self.options.image is not None:
            image_filename = self.options.image
            if image_filename.startswith('http'):
                image_filename = dtu.get_file_from_url(image_filename)

            bgr = dtu.bgr_from_jpg_fn(image_filename)
        else:
            print("Validating using the ROS image stream...")
            import rospy
            from sensor_msgs.msg import CompressedImage

            topic_name = os.path.join('/', vehicle_name,
                                      'camera_node/image/compressed')

            print('Let\'s wait for an image. Say cheese!')

            # Dummy to get ROS message
            rospy.init_node('single_image')
            img_msg = None
            try:
                img_msg = rospy.wait_for_message(topic_name,
                                                 CompressedImage,
                                                 timeout=10)
                print('Image captured!')
            except rospy.ROSException as e:
                print(
                    '\n\n\nDidn\'t get any message!: %s\n MAKE SURE YOU USE DT SHELL COMMANDS OF VERSION 4.1.9 OR HIGHER!\n\n\n'
                    % (e, ))

            bgr = dtu.bgr_from_rgb(dtu.rgb_from_ros(img_msg))
            self.info('Picture taken: %s ' % str(bgr.shape))

        gp = GroundProjection(vehicle_name)

        dtu.DuckietownConstants.show_timeit_benchmarks = True
        res, _stats = run_pipeline(
            bgr,
            gp,
            line_detector_name=self.options.line_detector,
            image_prep_name=self.options.image_prep,
            anti_instagram_name=self.options.anti_instagram,
            lane_filter_name=self.options.lane_filter)

        self.info('Resizing images..')
        res = dtu.resize_small_images(res)
        self.info('Writing images..')
        dtu.write_bgr_images_as_jpgs(res, output)
示例#5
0
def test_synthetic(actual_map_name, template, robot_name, line_detector_name,
                   image_prep_name, lane_filter_name, pose_or_location, outd):

    np.random.seed(42)
    db = get_easy_algo_db()
    actual_map = db.create_instance(FAMILY_SEGMAPS, actual_map_name)

    # first, load calibration for robot
    easy_algo_db = get_easy_algo_db()
    dtu.logger.debug('looking for localization template %r' % template)
    localization_template = easy_algo_db.create_instance(
        FAMILY_LOC_TEMPLATES, template)

    gp = get_ground_projection(robot_name)
    # GroundProjectionGeometry
    gpg = gp.get_ground_projection_geometry()

    if pose_or_location.shape == (3, 3):  # SE(2)
        pose = pose_or_location
        location = localization_template.coords_from_pose(pose)
    else:
        location = pose_or_location
        pose = localization_template.pose_from_coords(location)

    simulation_data = simulate_image(actual_map, pose, gpg, blur_sigma=0.3)

    image = simulation_data.distorted_synthetic_bgr

    #     anti_instagram_name='identity' # skip
    anti_instagram_name = 'baseline'

    all_details = False
    res, stats = run_pipeline(image,
                              gp,
                              line_detector_name=line_detector_name,
                              image_prep_name=image_prep_name,
                              lane_filter_name=lane_filter_name,
                              anti_instagram_name=anti_instagram_name,
                              all_details=all_details,
                              ground_truth=pose,
                              actual_map=actual_map)

    error = np.empty_like(location)
    for k in error.dtype.fields:
        error[k] = stats['estimate'][k] - location[k]
    stats['error'] = error

    res = dtu.resize_small_images(res)

    dtu.write_bgr_images_as_jpgs(res, outd, extra_string=outd.split('/')[-1])
    return res, stats
示例#6
0
    def go(self):
        output = self.options.output
        if output is None:
            output = 'out-pipeline'  #  + dtu.get_md5(self.options.image)[:6]
            self.info('No --output given, using %s' % output)

        if self.options.image is not None:
            image_filename = self.options.image
            if image_filename.startswith('http'):
                image_filename = dtu.get_file_from_url(image_filename)

            bgr = dtu.bgr_from_jpg_fn(image_filename)
        else:
            self.info('Taking a picture. Say cheese!')
            out = os.path.join(output, 'frame.jpg')
            bgr = dtu.bgr_from_raspistill(out)
            self.info('Picture taken: %s ' % str(bgr.shape))

            bgr = dtu.d8_image_resize_fit(bgr, 640)
            self.info('Resized to: %s ' % str(bgr.shape))

        vehicle_name = dtu.get_current_robot_name()
        gp = GroundProjection(vehicle_name)

        if False:
            _res = run_pipeline(
                bgr,
                gp,
                line_detector_name=self.options.line_detector,
                image_prep_name=self.options.image_prep,
                anti_instagram_name=self.options.anti_instagram,
                lane_filter_name=self.options.lane_filter)

        dtu.DuckietownConstants.show_timeit_benchmarks = True
        res, _stats = run_pipeline(
            bgr,
            gp,
            line_detector_name=self.options.line_detector,
            image_prep_name=self.options.image_prep,
            anti_instagram_name=self.options.anti_instagram,
            lane_filter_name=self.options.lane_filter)

        self.info('resizing')
        res = dtu.resize_small_images(res)
        self.info('writing')
        dtu.write_bgr_images_as_jpgs(res, output)
示例#7
0
    def go(self):
        robot_name = dtu.get_current_robot_name()

        output = self.options.output
        if output is None:
            output = 'out-calibrate-extrinsics'  #  + dtu.get_md5(self.options.image)[:6]
            self.info('No --output given, using %s' % output)

        if self.options.input is None:

            print("{}\nCalibrating using the ROS image stream...\n".format(
                "*" * 20))
            import rospy
            from sensor_msgs.msg import CompressedImage

            topic_name = os.path.join('/', robot_name,
                                      'camera_node/image/compressed')
            print('Topic to listen to is: %s' % topic_name)

            print('Let\'s wait for an image. Say cheese!')

            # Dummy for getting a ROS message
            rospy.init_node('calibrate_extrinsics')
            img_msg = None
            try:
                img_msg = rospy.wait_for_message(topic_name,
                                                 CompressedImage,
                                                 timeout=10)
                print('Image captured!')
            except rospy.ROSException as e:
                print(
                    '\n\n\n'
                    'Didn\'t get any message!: %s\n '
                    'MAKE SURE YOU USE DT SHELL COMMANDS OF VERSION 4.1.9 OR HIGHER!'
                    '\n\n\n' % (e, ))

            bgr = dtu.bgr_from_rgb(dtu.rgb_from_ros(img_msg))
            print('Picture taken: %s ' % str(bgr.shape))

        else:
            print('Loading input image %s' % self.options.input)
            bgr = dtu.bgr_from_jpg_fn(self.options.input)

        if bgr.shape[1] != 640:
            interpolation = cv2.INTER_CUBIC
            bgr = dtu.d8_image_resize_fit(bgr, 640, interpolation)
            print('Resized to: %s ' % str(bgr.shape))
        # Disable the old calibration file
        print("Disableing old homography")
        disable_old_homography(robot_name)
        print("Obtaining camera info")
        try:
            camera_info = get_camera_info_for_robot(robot_name)
        except Exception as E:
            print("Error on obtaining camera info!")
            print(E)
        print("Get default homography")
        try:
            homography_dummy = get_homography_default()
        except Exception as E:
            print("Error on getting homography")
            print(E)
        print("Rectify image")
        try:
            rect = Rectify(camera_info)
        except Exception as E:
            print("Error rectifying image!")
            print(E)
        print("Calculate GPG")
        try:
            gpg = GroundProjectionGeometry(camera_info.width,
                                           camera_info.height,
                                           homography_dummy.reshape((3, 3)))
        except Exception as E:
            print("Error calculating GPG!")
            print(E)
        print("Ordered Dict")
        res = OrderedDict()
        try:
            bgr_rectified = rect.rectify(bgr, interpolation=cv2.INTER_CUBIC)

            res['bgr'] = bgr
            res['bgr_rectified'] = bgr_rectified

            _new_matrix, res['rectified_full_ratio_auto'] = rect.rectify_full(
                bgr, ratio=1.65)

            (result_gpg, status) = gpg.estimate_homography(bgr_rectified)

            if status is not None:
                raise Exception(status)

            save_homography(result_gpg.H, robot_name)
            msg = '''

To check that this worked well, place the robot on the road, and run:

    rosrun complete_image_pipeline single_image

Look at the produced jpgs.

'''
            print(msg)
        except Exception as E:
            print(E)
        finally:
            dtu.write_bgr_images_as_jpgs(res, output)
    def go(self):
        robot_name = dtu.get_current_robot_name()

        output = self.options.output
        if output is None:
            output = 'out-calibrate-extrinsics'  #  + dtu.get_md5(self.options.image)[:6]
            self.info('No --output given, using %s' % output)

        if self.options.input is None:

            print("{}\nCalibrating using the ROS image stream...\n".format(
                "*" * 20))
            import rospy
            from sensor_msgs.msg import CompressedImage

            topic_name = os.path.join('/', robot_name,
                                      'camera_node/image/compressed')
            print('Topic to listen to is: %s' % topic_name)

            print('Let\'s wait for an image. Say cheese!')

            # Dummy for getting a ROS message
            rospy.init_node('calibrate_extrinsics')
            img_msg = None
            try:
                img_msg = rospy.wait_for_message(topic_name,
                                                 CompressedImage,
                                                 timeout=10)
                print('Image captured!')
            except rospy.ROSException as e:
                print(
                    '\n\n\nDidn\'t get any message!: %s\n MAKE SURE YOU USE DT SHELL COMMANDS OF VERSION 4.1.9 OR HIGHER!\n\n\n'
                    % (e, ))

            bgr = dtu.bgr_from_rgb(dtu.rgb_from_ros(img_msg))
            self.info('Picture taken: %s ' % str(bgr.shape))

        else:
            self.info('Loading input image %s' % self.options.input)
            bgr = dtu.bgr_from_jpg_fn(self.options.input)

        if bgr.shape[1] != 640:
            interpolation = cv2.INTER_CUBIC
            bgr = dtu.d8_image_resize_fit(bgr, 640, interpolation)
            self.info('Resized to: %s ' % str(bgr.shape))

        # Disable the old calibration file
        disable_old_homography(robot_name)

        camera_info = get_camera_info_for_robot(robot_name)
        homography_dummy = get_homography_default()
        gpg = GroundProjectionGeometry(camera_info, homography_dummy)

        res = OrderedDict()
        try:
            bgr_rectified = gpg.rectify(bgr, interpolation=cv2.INTER_CUBIC)

            res['bgr'] = bgr
            res['bgr_rectified'] = bgr_rectified

            _new_matrix, res['rectified_full_ratio_auto'] = gpg.rectify_full(
                bgr, ratio=1.65)
            result = estimate_homography(bgr_rectified)
            dtu.check_isinstance(result, HomographyEstimationResult)

            if result.bgr_detected_refined is not None:
                res['bgr_detected_refined'] = result.bgr_detected_refined

            if not result.success:
                raise Exception(result.error)

            save_homography(result.H, robot_name)

            msg = '''

To check that this worked well, place the robot on the road, and run:

    rosrun complete_image_pipeline single_image

Look at the produced jpgs.

'''
            self.info(msg)
        finally:
            dtu.write_bgr_images_as_jpgs(res, output)
    def process_log(self, bag_in, prefix_in, bag_out, prefix_out,
                    utils):  #@UnusedVariable
        log_name = utils.get_log().log_name

        vehicle_name = dtu.which_robot(bag_in)

        dtu.logger.info('Vehicle name: %s' % vehicle_name)

        gp = GroundProjection(vehicle_name)

        topic = dtu.get_image_topic(bag_in)

        bgcolor = dtu.ColorConstants.BGR_DUCKIETOWN_YELLOW

        sequence = bag_in.read_messages_plus(topics=[topic])
        for _i, mp in enumerate(sequence):

            bgr = dtu.bgr_from_rgb(dtu.rgb_from_ros(mp.msg))

            res, stats = run_pipeline(bgr,
                                      gp=gp,
                                      line_detector_name=self.line_detector,
                                      image_prep_name=self.image_prep,
                                      lane_filter_name=self.lane_filter,
                                      anti_instagram_name=self.anti_instagram,
                                      all_details=self.all_details)

            rect = (480, 640) if not self.all_details else (240, 320)
            res = dtu.resize_images_to_fit_in_rect(res, rect, bgcolor=bgcolor)

            print('abs: %s  window: %s  fron log: %s' %
                  (mp.time_absolute, mp.time_window,
                   mp.time_from_physical_log_start))
            headers = [
                "Robot: %s log: %s time: %.2f s" %
                (vehicle_name, log_name, mp.time_from_physical_log_start),
                "Algorithms | color correction: %s | preparation: %s | detector: %s | filter: %s"
                % (
                    self.anti_instagram,
                    self.image_prep,
                    self.line_detector,
                    self.lane_filter,
                )
            ]

            res = dtu.write_bgr_images_as_jpgs(res,
                                               dirname=None,
                                               bgcolor=bgcolor)

            cv_image = res['all']

            for head in reversed(headers):
                max_height = 35
                cv_image = dtu.add_header_to_bgr(cv_image,
                                                 head,
                                                 max_height=max_height)

            otopic = "all"

            omsg = dtu.d8_compressed_image_from_cv_image(
                cv_image, same_timestamp_as=mp.msg)
            t = rospy.Time.from_sec(mp.time_absolute)  # @UndefinedVariable
            print('written %r at t = %s' % (otopic, t.to_sec()))
            bag_out.write(prefix_out + '/' + otopic, omsg, t=t)

            for name, value in stats.items():
                utils.write_stat(prefix_out + '/' + name, value, t=t)