예제 #1
0
def run_detection(transform, jpg, out, shape, interpolation, name,
                  LineDetectorClass):
    image = image_cv_from_jpg_fn(jpg)

    image = cv2.resize(image, shape, interpolation=interpolation)

    #     bgr = bgr[bgr.shape[0] / 2:, :, :]

    image_detections = line_detection(LineDetectorClass, image)
    transformed = transform(image)

    transformed_clipped = image_clip_255(transformed)
    transformed_detections = line_detection(LineDetectorClass,
                                            transformed_clipped)

    if not os.path.exists(out):
        os.makedirs(out)
    bn = os.path.splitext(os.path.basename(jpg))[0]

    def write(postfix, im):
        fn = os.path.join(out, '%s.%s.%s.png' % (bn, name, postfix))
        cv2.imwrite(fn, zoom_image(im, 4))

    together = make_images_grid(
        [
            image,  # transformed,
            merge_masks_res(image_detections),
            gray2rgb(image_detections['edges']),
            image_detections['annotated'],
            transformed_clipped,
            merge_masks_res(transformed_detections),
            gray2rgb(transformed_detections['edges']),
            transformed_detections['annotated'],
        ],
        cols=4,
        pad=35,
        bgcolor=[1, 1, 1])

    # write the string "name" in the upper left of image together
    cv2.putText(together, name, (0, 20), cv2.FONT_HERSHEY_SIMPLEX, 1,
                (0, 0, 255), 2)

    return together
예제 #2
0
def run_detection(transform, jpg, out, shape, interpolation, name, LineDetectorClass):
    image = image_cv_from_jpg_fn(jpg)

    image = cv2.resize(image, shape, interpolation=interpolation)

    #     bgr = bgr[bgr.shape[0] / 2:, :, :]

    image_detections = line_detection(LineDetectorClass, image)
    transformed = transform(image)

    transformed_clipped = image_clip_255(transformed)
    transformed_detections = line_detection(LineDetectorClass, transformed_clipped)

    if not os.path.exists(out):
        os.makedirs(out)
    bn = os.path.splitext(os.path.basename(jpg))[0]

    def write(postfix, im):
        fn = os.path.join(out, "%s.%s.%s.png" % (bn, name, postfix))
        cv2.imwrite(fn, zoom_image(im, 4))

    together = make_images_grid(
        [
            image,  # transformed,
            merge_masks_res(image_detections),
            gray2rgb(image_detections["edges"]),
            image_detections["annotated"],
            transformed_clipped,
            merge_masks_res(transformed_detections),
            gray2rgb(transformed_detections["edges"]),
            transformed_detections["annotated"],
        ],
        cols=4,
        pad=35,
        bgcolor=[1, 1, 1],
    )

    # write the string "name" in the upper left of image together
    cv2.putText(together, name, (0, 20), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)

    return together
예제 #3
0
def examine_dataset(dirname, out):
    logger.info(dirname)
    dirname = expand_environment(dirname)

    jpgs = locate_files(dirname, "*.jpg")
    mats = locate_files(dirname, "*.mat")

    logger.debug("I found %d jpgs and %d mats" % (len(jpgs), len(mats)))

    if len(jpgs) == 0:
        msg = "Not enough jpgs."
        raise ValueError(msg)

    #     if len(mats) == 0:
    #         msg = 'Not enough mats.'
    #         raise ValueError(msg)

    first_jpg = sorted(jpgs)[0]
    logger.debug("Using jpg %r to learn transformation." % first_jpg)

    first_jpg_image = image_cv_from_jpg_fn(first_jpg)

    success, health, parameters = calculate_transform(first_jpg_image)

    s = ""
    s += "success: %s\n" % str(success)
    s += "health: %s\n" % str(health)
    s += "parameters: %s\n" % str(parameters)
    w = os.path.join(out, "learned_transform.txt")
    with open(w, "w") as f:
        f.write(s)
    logger.info(s)

    transform = ScaleAndShift(**parameters)

    config_dir = "${DUCKIETOWN_ROOT}/catkin_ws/src/duckietown/config/baseline/line_detector/line_detector_node/"
    config_dir = expand_environment(config_dir)
    configurations = locate_files(config_dir, "*.yaml")
    # logger.info('configurations: %r' % configurations)

    for j in jpgs:
        summaries = []

        shape = (200, 160)
        interpolation = cv2.INTER_NEAREST

        bn = os.path.splitext(os.path.basename(j))[0]
        fn = os.path.join(out, "%s.all.png" % (bn))

        if os.path.exists(fn):
            logger.debug("Skipping because file exists: %r" % fn)
        else:
            for c in configurations:
                logger.info("Trying %r" % c)
                name = os.path.splitext(os.path.basename(c))[0]
                if name in ["oreo", "myrtle", "bad_lighting", "226-night"]:
                    continue
                with open(c) as f:
                    stuff = yaml.load(f)

                if not "detector" in stuff:
                    msg = 'Cannot find "detector" section in %r' % c
                    raise ValueError(msg)

                detector = stuff["detector"]
                logger.info(detector)
                if not isinstance(detector, list) and len(detector) == 2:
                    raise ValueError(detector)

                from duckietown_utils.instantiate_utils import instantiate

                def LineDetectorClass():
                    return instantiate(detector[0], detector[1])

                s = run_detection(
                    transform,
                    j,
                    out,
                    shape=shape,
                    interpolation=interpolation,
                    name=name,
                    LineDetectorClass=LineDetectorClass,
                )
                summaries.append(s)

            together = make_images_grid(summaries, cols=1, pad=10, bgcolor=[0.5, 0.5, 0.5])
            cv2.imwrite(fn, zoom_image(together, 4))
    # ipython_if_guy()
    overall_results = []
    comparison_results = {}
    for m in mats:
        logger.debug(m)
        jpg = os.path.splitext(m)[0] + ".jpg"
        if not os.path.exists(jpg):
            msg = "JPG %r for mat %r does not exist" % (jpg, m)
            logger.error(msg)
        else:
            frame_results = test_pair(transform, jpg, m, out)
            comparison_results[m] = frame_results
            overall_results = merge_comparison_results(comparison_results[m], overall_results)
            print "comparison_results[m]=frame_results"
            # ipython_if_guy()
    print "finished mats: " + dirname
    return overall_results
예제 #4
0
def examine_dataset(dirname, out):
    logger.info(dirname)
    dirname = expand_all(dirname)

    jpgs = locate_files(dirname, '*.jpg')
    mats = locate_files(dirname, '*.mat')

    logger.debug('I found %d JPGs and %d .mat.' % (len(jpgs), len(mats)))

    if len(jpgs) == 0:
        msg = 'Not JPGs found in %r.' % dirname
        raise ValueError(msg)


#     if len(mats) == 0:
#         msg = 'Not enough mats.'
#         raise ValueError(msg)

    first_jpg = sorted(jpgs)[0]
    logger.debug('Using jpg %r to learn transformation.' % first_jpg)

    first_jpg_image = image_cv_from_jpg_fn(first_jpg)

    success, health, parameters = calculate_transform(first_jpg_image)

    s = ""
    s += 'success: %s\n' % str(success)
    s += 'health: %s\n' % str(health)
    s += 'parameters: %s\n' % str(parameters)
    w = os.path.join(out, 'learned_transform.txt')
    with open(w, 'w') as f:
        f.write(s)
    logger.info(s)

    transform = ScaleAndShift(**parameters)

    duckietown_package_dir = get_ros_package_path('duckietown')
    config_dir = os.path.join(
        duckietown_package_dir,
        'config/baseline/line_detector/line_detector_node')

    if not os.path.exists(config_dir):
        msg = 'Could not find configuration dir %s' % config_dir
        raise Exception(msg)

    config_dir = expand_all(config_dir)
    configurations = locate_files(config_dir, '*.yaml')

    if not configurations:
        msg = 'Could not find any configuration file in %s.' % config_dir
        raise Exception(msg)
    #logger.info('configurations: %r' % configurations)

    for j in jpgs:
        summaries = []

        shape = (200, 160)
        interpolation = cv2.INTER_NEAREST

        bn = os.path.splitext(os.path.basename(j))[0]
        fn = os.path.join(out, '%s.all.png' % (bn))

        if os.path.exists(fn):
            logger.debug('Skipping because file exists: %r' % fn)
        else:
            for c in configurations:
                logger.info('Trying %r' % c)
                name = os.path.splitext(os.path.basename(c))[0]
                if name in ['oreo', 'myrtle', 'bad_lighting', '226-night']:
                    continue
                with open(c) as f:
                    stuff = yaml.load(f)

                if not 'detector' in stuff:
                    msg = 'Cannot find "detector" section in %r' % c
                    raise ValueError(msg)

                detector = stuff['detector']
                logger.info(detector)
                if not isinstance(detector, list) and len(detector) == 2:
                    raise ValueError(detector)

                def LineDetectorClass():
                    return instantiate(detector[0], detector[1])

                s = run_detection(transform,
                                  j,
                                  out,
                                  shape=shape,
                                  interpolation=interpolation,
                                  name=name,
                                  LineDetectorClass=LineDetectorClass)
                summaries.append(s)

            together = make_images_grid(summaries,
                                        cols=1,
                                        pad=10,
                                        bgcolor=[.5, .5, .5])
            cv2.imwrite(fn, zoom_image(together, 4))

    overall_results = []
    comparison_results = {}
    for m in mats:
        logger.debug(m)
        jpg = os.path.splitext(m)[0] + '.jpg'
        if not os.path.exists(jpg):
            msg = 'JPG %r for mat %r does not exist' % (jpg, m)
            logger.error(msg)
        else:
            frame_results = test_pair(transform, jpg, m, out)
            comparison_results[m] = frame_results
            overall_results = merge_comparison_results(comparison_results[m],
                                                       overall_results)
            print "comparison_results[m]=frame_results"

    print "finished mats: " + dirname
    return overall_results