Esempio n. 1
0
    def __init__(self):

        # defaults overwritten by param
        self.robot_name = "shamrock"
        self.rectified_input = False
        
        # read calibrations
        self.extrinsics_filename = (get_ros_package_path('duckietown') +
                               "/config/baseline/calibration/camera_extrinsic/" +
                               self.robot_name + ".yaml")
        if not os.path.isfile(extrinsics_filename):
            logger.warn("no robot specific extrinsic calibration, trying default")
            alternate_extrinsics_filename = (get_ros_package_path('duckietown') +
                                   "/config/baseline/calibration/camera_extrinsic/default.yaml")
            if not os.path.isfile(alternate_extrinsics_filename):
                logger.warn("can't find default either, something's wrong")
            else:
                self.H = self.load_homography(alternate_extrinsics_filename)
        else:
            self.H = self.load_homography(self.extrinsics_filename)
        self.H_inv = np.linalg.inv(self.H)
        
        intrinsics_filename = (get_ros_package_path('duckietown') + "/config/baseline/calibration/camera_intrinsics/" + self.robot_name + ".yaml")
        if not os.path.isfile(intrinsics_filename):
            logger.warn("no robot specific  calibration, trying default")
            intrinsics_filename = (get_ros_package_path('duckietown') +
                                   "/config/baseline/calibration/camera_extrinsic/default.yaml")
            if not os.path.isfile(extrinsics_filename):
                logger.error("can't find default either, something's wrong")

                
        self.ci_ = self.load_camera_info(intrinsics_filename)
        self.pcm_ = PinholeCameraModel()
        self.pcm_.fromCameraInfo(self.ci)
Esempio n. 2
0
def get_unique_filename(rt_name, rdbe):
    commit = rdbe.commit[-8:]
    d = rdbe.date.replace('-', '')
    basename = rt_name + '_%s_%s_%s.rdbe.yaml' % (d, rdbe.branch, commit)

    dr = get_ros_package_path('easy_regression')
    filename = os.path.join(dr, 'db', rt_name, basename)
    return filename
Esempio n. 3
0
def single_image_histograms():
    url = 'https://www.dropbox.com/s/bzezpw8ivlfu4b0/frame0002.jpg?dl=0'
    p = os.path.join(get_ros_package_path('line_detector2'), 'include',
                     'line_detector2_tests', 'frame0002.jpg')
    download_if_not_exist(url, p)
    image_cv = image_cv_from_jpg_fn(p)

    res = go(image_cv)
    write_images_as_jpegs('single_image_histograms', res)
Esempio n. 4
0
 def load_board_info(self, filename=''):
     '''Load calibration checkerboard info'''
     if not os.path.isfile(filename):
         filename = get_ros_package_path('ground_projection') + '/config/ground_projection_node/default.yaml'
     target_data = yaml_load_file(filename)
     target_info = {
         'width': target_data['board_w'],
         'height': target_data['board_h'],
         'square_size': target_data['square_size'],
         'x_offset': target_data['x_offset'],
         'y_offset': target_data['y_offset'],
         'offset': np.array([target_data['x_offset'], -target_data['y_offset']]),
         'size': (target_data['board_w'], target_data['board_h']),
       }
     logger.info("Loaded checkerboard parameters")
     return target_info
Esempio n. 5
0
def examine_dataset(dirname, out):
    logger.info(dirname)
    dirname = expand_all(dirname)

    jpgs = locate_files(dirname, '*.jpg')
    mats = locate_files(dirname, '*.mat')

    logger.debug('I found %d JPGs and %d .mat.' % (len(jpgs), len(mats)))

    if len(jpgs) == 0:
        msg = 'Not JPGs found in %r.' % dirname
        raise ValueError(msg)


#     if len(mats) == 0:
#         msg = 'Not enough mats.'
#         raise ValueError(msg)

    first_jpg = sorted(jpgs)[0]
    logger.debug('Using jpg %r to learn transformation.' % first_jpg)

    first_jpg_image = image_cv_from_jpg_fn(first_jpg)

    success, health, parameters = calculate_transform(first_jpg_image)

    s = ""
    s += 'success: %s\n' % str(success)
    s += 'health: %s\n' % str(health)
    s += 'parameters: %s\n' % str(parameters)
    w = os.path.join(out, 'learned_transform.txt')
    with open(w, 'w') as f:
        f.write(s)
    logger.info(s)

    transform = ScaleAndShift(**parameters)

    duckietown_package_dir = get_ros_package_path('duckietown')
    config_dir = os.path.join(
        duckietown_package_dir,
        'config/baseline/line_detector/line_detector_node')

    if not os.path.exists(config_dir):
        msg = 'Could not find configuration dir %s' % config_dir
        raise Exception(msg)

    config_dir = expand_all(config_dir)
    configurations = locate_files(config_dir, '*.yaml')

    if not configurations:
        msg = 'Could not find any configuration file in %s.' % config_dir
        raise Exception(msg)
    #logger.info('configurations: %r' % configurations)

    for j in jpgs:
        summaries = []

        shape = (200, 160)
        interpolation = cv2.INTER_NEAREST

        bn = os.path.splitext(os.path.basename(j))[0]
        fn = os.path.join(out, '%s.all.png' % (bn))

        if os.path.exists(fn):
            logger.debug('Skipping because file exists: %r' % fn)
        else:
            for c in configurations:
                logger.info('Trying %r' % c)
                name = os.path.splitext(os.path.basename(c))[0]
                if name in ['oreo', 'myrtle', 'bad_lighting', '226-night']:
                    continue
                with open(c) as f:
                    stuff = yaml.load(f)

                if not 'detector' in stuff:
                    msg = 'Cannot find "detector" section in %r' % c
                    raise ValueError(msg)

                detector = stuff['detector']
                logger.info(detector)
                if not isinstance(detector, list) and len(detector) == 2:
                    raise ValueError(detector)

                def LineDetectorClass():
                    return instantiate(detector[0], detector[1])

                s = run_detection(transform,
                                  j,
                                  out,
                                  shape=shape,
                                  interpolation=interpolation,
                                  name=name,
                                  LineDetectorClass=LineDetectorClass)
                summaries.append(s)

            together = make_images_grid(summaries,
                                        cols=1,
                                        pad=10,
                                        bgcolor=[.5, .5, .5])
            cv2.imwrite(fn, zoom_image(together, 4))

    overall_results = []
    comparison_results = {}
    for m in mats:
        logger.debug(m)
        jpg = os.path.splitext(m)[0] + '.jpg'
        if not os.path.exists(jpg):
            msg = 'JPG %r for mat %r does not exist' % (jpg, m)
            logger.error(msg)
        else:
            frame_results = test_pair(transform, jpg, m, out)
            comparison_results[m] = frame_results
            overall_results = merge_comparison_results(comparison_results[m],
                                                       overall_results)
            print "comparison_results[m]=frame_results"

    print "finished mats: " + dirname
    return overall_results
Esempio n. 6
0
def run_pipeline(image, line_detector_name, image_prep_name):
    """ 
        Image: numpy (H,W,3) == BGR
        Returns a dictionary, res with the following fields:
            
            res['input_image']
    """
    res = OrderedDict()
    res['image_input'] = image
    algo_db = get_easy_algo_db()
    line_detector = algo_db.create_instance('line_detector',
                                            line_detector_name)
    image_prep = algo_db.create_instance('image_prep', image_prep_name)

    context = FakeContext()

    segment_list = image_prep.process(context,
                                      image,
                                      line_detector,
                                      transform=None)

    res['segments_on_image_input'] = vs_fancy_display(image_prep.image_cv,
                                                      segment_list)
    res['segments_on_image_resized'] = vs_fancy_display(
        image_prep.image_resized, segment_list)

    ai = AntiInstagram()
    ai.calculateTransform(res['image_input'])

    transform = ai.applyTransform

    res['image_input_transformed'] = transform(res['image_input'])
    res['image_input_transformed_then_convertScaleAbs'] = cv2.convertScaleAbs(
        res['image_input_transformed'])

    segment_list2 = image_prep.process(context,
                                       image,
                                       line_detector,
                                       transform=transform)

    res['segments2_on_image_input_transformed'] = \
        vs_fancy_display(image_prep.image_cv, segment_list2)
    res['segments2_on_image_input_transformed_resized'] = \
        vs_fancy_display(image_prep.image_resized, segment_list2)

    filename = (get_ros_package_path('duckietown') +
                "/config/baseline/calibration/camera_intrinsic/default.yaml")

    ci = load_camera_info(filename)
    #     (h,w) = image.shape[:2]
    ci_W = ci.width
    ci_H = ci.height
    mapx, mapy = cv2.initUndistortRectifyMap(ci.K, ci.D, ci.R, ci.P,
                                             (ci_W, ci_H), cv2.CV_32FC1)
    # mapx and mapy are (h, w) matrices that tell you
    # the x coordinate and the y coordinate for each point
    # in the first image
    #     print mapx.shape(),mapy.shape()
    res['grid'] = get_grid(image.shape[:2])
    res['grid_remap'] = cv2.remap(res['grid'], mapx, mapy, cv2.INTER_LINEAR)

    res['image_input_rect'] = cv2.remap(res['image_input'], mapx, mapy,
                                        cv2.INTER_LINEAR)
    segment_list_rect = apply_transform_to_segment(segment_list, mapx, mapy)
    res['segments2_on_image_input_rect'] = \
        vs_fancy_display(res['image_input_rect'], segment_list_rect)

    res['grid_undistort'] = cv2.undistort(res['grid'], ci.K, ci.D)
    res['image_input_undistort'] = cv2.undistort(res['image_input'], ci.K,
                                                 ci.D)

    homography = np.array([
        -4.89775e-05, -0.0002150858, -0.1818273, 0.00099274, 1.202336e-06,
        -0.3280241, -0.0004281805, -0.007185673, 1
    ]).reshape((3, 3))

    segment_list_undistort = undistort_segments(segment_list,
                                                ci_W,
                                                ci_H,
                                                ci.K,
                                                ci.D,
                                                ci.P,
                                                ci.R,
                                                homography=homography)
    res['segments_und_image_input_und'] = vs_fancy_display(
        res['image_input_undistort'], segment_list_undistort)

    #     homography_inv = np.linalg.inv(homography)
    res['image_input_undistort_warp'] = cv2.warpPerspective(
        res['image_input'],
        homography, (ci_W, ci_H),
        flags=cv2.WARP_INVERSE_MAP)
    # add flags=cv2.WARP_INVERSE_MAP to do the inverse

    return res