Example #1
0
    def define_jobs_context(self, context):

        dirname = self.options.dirname
        filenames = dtu.locate_files(dirname, '*.jpg')
        if len(filenames) == 0:
            msg = 'Could not find any file'
            raise Exception(msg)
        options = {
            'L1': dict(phi=make_smaller, distance=L1),
            'L2': dict(phi=make_smaller, distance=L2),
        }

        filenames = [f for f in filenames if '-0' in f]
        filenames = sorted(filenames)
        print "\n".join(filenames)

        for id_option, params in options.items():
            images = [dtu.image_cv_from_jpg_fn(f) for f in filenames]
            c = context.child(id_option)
            out = os.path.join(dirname, 'similarity', id_option, 'similarity')
            A = c.comp(get_similarity_matrix, images, out=out, **params)
            c.comp(write_similarity_matrix,
                   A,
                   out + '_final',
                   more=True,
                   images=images)
def single_image_histograms():
    p = dtu.require_resource('frame0002.jpg')

    image_cv = dtu.image_cv_from_jpg_fn(p)

    res = go(image_cv)
    outd = dtu.get_output_dir_for_test()
    dtu.write_bgr_images_as_jpgs(res, outd)
Example #3
0
def run_detection(transform, jpg, out, shape, interpolation, name,
                  LineDetectorClass):
    image = dtu.image_cv_from_jpg_fn(jpg)

    image = cv2.resize(image, shape, interpolation=interpolation)

    #     bgr = bgr[bgr.shape[0] / 2:, :, :]

    image_detections = line_detection(LineDetectorClass, image)
    transformed = transform(image)

    transformed_clipped = dtu.image_clip_255(transformed)
    transformed_detections = line_detection(LineDetectorClass,
                                            transformed_clipped)

    if not os.path.exists(out):
        os.makedirs(out)
    bn = os.path.splitext(os.path.basename(jpg))[0]

    def write(postfix, im):
        fn = os.path.join(out, '%s.%s.%s.png' % (bn, name, postfix))
        cv2.imwrite(fn, dtu.zoom_image(im, 4))

    together = dtu.make_images_grid(
        [
            image,  # transformed,
            merge_masks_res(image_detections),
            dtu.gray2rgb(image_detections['edges']),
            image_detections['annotated'],
            transformed_clipped,
            merge_masks_res(transformed_detections),
            dtu.gray2rgb(transformed_detections['edges']),
            transformed_detections['annotated'],
        ],
        cols=4,
        pad=35)

    # write the string "name" in the upper left of image together
    cv2.putText(together, name, (0, 20), cv2.FONT_HERSHEY_SIMPLEX, 1,
                (0, 0, 255), 2)

    return together
Example #4
0
def single_image1():
    p = dtu.require_resource('frame0002.jpg')
    image_cv = dtu.image_cv_from_jpg_fn(p)

    line_detector_name = 'baseline'
    image_prep_name = 'baseline'
    lane_filter_name = 'baseline'
    anti_instagram_name = 'baseline'
    robot_name = dtu.DuckietownConstants.ROBOT_NAME_FOR_TESTS
    gp = GroundProjection(robot_name)

    res, _stats = run_pipeline(image_cv, gp,
                         line_detector_name=line_detector_name,
                         image_prep_name=image_prep_name,
                         lane_filter_name=lane_filter_name,
                         anti_instagram_name=anti_instagram_name,
                         all_details=False, ground_truth=None)

    outd = dtu.get_output_dir_for_test()
    dtu.write_jpgs_to_dir(res, outd)
Example #5
0
def examine_dataset(dirname, out):
    logger.info(dirname)
    dirname = dtu.expand_all(dirname)

    jpgs = dtu.locate_files(dirname, '*.jpg')
    mats = dtu.locate_files(dirname, '*.mat')

    logger.debug('I found %d JPGs and %d .mat.' % (len(jpgs), len(mats)))

    if len(jpgs) == 0:
        msg = 'Not JPGs found in %r.' % dirname
        raise ValueError(msg)


#     if len(mats) == 0:
#         msg = 'Not enough mats.'
#         raise ValueError(msg)

    first_jpg = sorted(jpgs)[0]
    logger.debug('Using jpg %r to learn transformation.' % first_jpg)

    first_jpg_image = dtu.image_cv_from_jpg_fn(first_jpg)

    success, health, parameters = calculate_transform(first_jpg_image)

    s = ""
    s += 'success: %s\n' % str(success)
    s += 'health: %s\n' % str(health)
    s += 'parameters: %s\n' % str(parameters)
    w = os.path.join(out, 'learned_transform.txt')
    with open(w, 'w') as f:
        f.write(s)
    logger.info(s)

    transform = ScaleAndShift(**parameters)

    duckietown_package_dir = dtu.get_ros_package_path('duckietown')
    config_dir = os.path.join(
        duckietown_package_dir,
        'config/baseline/line_detector/line_detector_node')

    if not os.path.exists(config_dir):
        msg = 'Could not find configuration dir %s' % config_dir
        raise Exception(msg)

    config_dir = dtu.expand_all(config_dir)
    configurations = dtu.locate_files(config_dir, '*.yaml')

    if not configurations:
        msg = 'Could not find any configuration file in %s.' % config_dir
        raise Exception(msg)
    #logger.info('configurations: %r' % configurations)

    for j in jpgs:
        summaries = []

        shape = (200, 160)
        interpolation = cv2.INTER_NEAREST

        bn = os.path.splitext(os.path.basename(j))[0]
        fn = os.path.join(out, '%s.all.png' % (bn))

        if os.path.exists(fn):
            logger.debug('Skipping because file exists: %r' % fn)
        else:
            for c in configurations:
                logger.info('Trying %r' % c)
                name = os.path.splitext(os.path.basename(c))[0]
                if name in ['oreo', 'myrtle', 'bad_lighting', '226-night']:
                    continue
                with open(c) as f:
                    stuff = yaml.load(f)

                if not 'detector' in stuff:
                    msg = 'Cannot find "detector" section in %r' % c
                    raise ValueError(msg)

                detector = stuff['detector']
                logger.info(detector)
                if not isinstance(detector, list) and len(detector) == 2:
                    raise ValueError(detector)

                def LineDetectorClass():
                    return dtu.instantiate(detector[0], detector[1])

                s = run_detection(transform,
                                  j,
                                  out,
                                  shape=shape,
                                  interpolation=interpolation,
                                  name=name,
                                  LineDetectorClass=LineDetectorClass)
                summaries.append(s)

            together = dtu.make_images_grid(summaries, cols=1, pad=10)
            cv2.imwrite(fn, dtu.zoom_image(together, 4))

    overall_results = []
    comparison_results = {}
    for m in mats:
        logger.debug(m)
        jpg = os.path.splitext(m)[0] + '.jpg'
        if not os.path.exists(jpg):
            msg = 'JPG %r for mat %r does not exist' % (jpg, m)
            logger.error(msg)
        else:
            frame_results = test_pair(transform, jpg, m, out)
            comparison_results[m] = frame_results
            overall_results = merge_comparison_results(comparison_results[m],
                                                       overall_results)
            print "comparison_results[m]=frame_results"

    print "finished mats: " + dirname
    return overall_results
Example #6
0
def test_pair(transform, jpg, mat, out):
    """
        jpg = filename
        mat = filename
    """

    data = scipy.io.loadmat(mat)
    regions = data['regions'].flatten()
    max_type = 0
    for r in regions:
        max_type = max(max_type, r['type'][0][0][0][0])
    r_vals = {}

    for t in np.arange(max_type):
        r_vals[t + 1] = np.array([], 'float32')

    g_vals = copy.deepcopy(r_vals)
    b_vals = copy.deepcopy(r_vals)
    h_vals = copy.deepcopy(r_vals)
    s_vals = copy.deepcopy(r_vals)
    v_vals = copy.deepcopy(r_vals)

    result_stats = {
        'average_abs_err': [],
        'total_pixels': 0,
        'total_error': 0,
        'total_regions': 0,
        'r_vals': r_vals,
        'g_vals': g_vals,
        'b_vals': b_vals,
        'h_vals': h_vals,
        's_vals': s_vals,
        'v_vals': v_vals
    }
    for r in regions:
        logger.info('region')
        x = r['x'][0][0].flatten()
        y = r['y'][0][0].flatten()
        mask = r['mask'][0][0]
        mask3 = cv2.merge([mask, mask, mask])
        print 'x', x
        print 'y', y
        print 'mask shape', mask.shape
        # type in 1- based / matlab-based indices from the list of region types (i.e road, white,
        # yellow, red, or what ever types were annotated)
        print 'type', r['type'][0][0][0][0]
        # color in [r,g,b] where [r,g,b]are between 0 and 1
        print 'color', r['color'][0]
        t = r['type'][0][0][0][0]
        # print 'guy look here'
        region_color = r['color'][0]
        region_color = region_color[0][0]
        rval = region_color[0] * 255.
        gval = region_color[1] * 255.
        bval = region_color[2] * 255.
        image = dtu.image_cv_from_jpg_fn(jpg)
        transformed = transform(image)
        [b2, g2, r2] = cv2.split(transformed)
        thsv = cv2.cvtColor(transformed, cv2.COLOR_BGR2HSV)
        [h2, s2, v2] = cv2.split(thsv)
        r2_ = r2[mask.nonzero()]
        g2_ = g2[mask.nonzero()]
        b2_ = b2[mask.nonzero()]
        h2_ = h2[mask.nonzero()]
        s2_ = s2[mask.nonzero()]
        v2_ = v2[mask.nonzero()]

        result_stats['r_vals'][t] = np.concatenate(
            (result_stats['r_vals'][t], r2_), 0)
        result_stats['g_vals'][t] = np.concatenate(
            (result_stats['g_vals'][t], g2_), 0)
        result_stats['b_vals'][t] = np.concatenate(
            (result_stats['b_vals'][t], b2_), 0)
        result_stats['h_vals'][t] = np.concatenate(
            (result_stats['h_vals'][t], h2_), 0)
        result_stats['s_vals'][t] = np.concatenate(
            (result_stats['s_vals'][t], s2_), 0)
        result_stats['v_vals'][t] = np.concatenate(
            (result_stats['v_vals'][t], v2_), 0)
        absdiff_img = cv2.absdiff(transformed, np.array([bval, gval, rval,
                                                         0.]))
        masked_diff = cv2.multiply(np.array(absdiff_img, 'float32'),
                                   np.array(mask3, 'float32'))
        num_pixels = cv2.sumElems(mask)[0]
        region_error = cv2.sumElems(cv2.sumElems(masked_diff))[0]
        avg_abs_err = region_error / (num_pixels + 1.)
        print 'Average abs. error', avg_abs_err
        result_stats['average_abs_err'].append(avg_abs_err)
        result_stats[
            'total_pixels'] = result_stats['total_pixels'] + num_pixels
        result_stats[
            'total_error'] = result_stats['total_error'] + region_error
        result_stats['total_regions'] = result_stats['total_regions'] + 1
        # XXX: to finish
    return result_stats