Exemple #1
0
    def test_anti_instagram(self):
        ai = AntiInstagram()

        #### TEST SINGLE TRANSFORM ####
        error_threshold = 500
        # load test images
        failure = False

        imagesetf = [
            "inputimage0.jpg",
            "inputimage1.jpg",
        ]
        gtimagesetf = [
            "groundtruthimage0.jpg",
            "groundtruthimage1.jpg",
        ]

        package_root = get_rospkg_root('anti_instagram')
        def add_dir(x):
            return os.path.join(package_root, 'scripts', x)

        imagesetf = map(add_dir, imagesetf)
        gtimagesetf = map(add_dir, gtimagesetf)


        imageset = map(load_image, imagesetf)
        gtimageset = map(load_image, gtimagesetf)
        errors = self.correctImages(ai, imageset, gtimageset, error_threshold)
        logger.info("Test Image Errors: %s" % errors)

        self.assertLess(max(errors), error_threshold)
    def calcuateTransformOnRandomImg(self):
        n = 10
        tn = timeit.timeit(stmt='ai.calculateTransform(img,True)',
                           setup='from __main__ import setup; ai,img=setup()',
                           number=n)

        t = tn / n
        logger.info("Average Calculate Transform Took: %.1f ms" % (t * 1000))
        return t
 def applyTransformOnRandomImg(self):
     n = 50
     tn = timeit.timeit(stmt='ai.applyTransform(img)',
                        setup='from __main__ import setup; ai,img=setup()',
                        number=n
                        )
     t = tn / n
     logger.info("Average Apply Transform Took: %.1f ms " % (t * 1000))
     return t
    def test_anti_instagram_performance(self):
        logger.info('This is going to test the performance of algorithm 1 and 2')

        for i in [1,2]:
            SASParams.algorithm = i
            shapes = [ (480, 640), (240, 320), (120, 160) ]

            for Params.shape in shapes:
                res = self.applyTransformOnRandomImg()
                #logger.info('algo: %d Shape: %s   -> %1.f ms' % (i, str(Params.shape), 1000*res))
                # self.assertLess(res, 0.05)  # Calculate in less than 0.05

        for Params.shape in shapes:
            res = self.calcuateTransformOnRandomImg()
Exemple #5
0
    def test_anti_instagram_performance(self):
        logger.info(
            'This is going to test the performance of algorithm 1 and 2')

        for i in [1, 2]:
            SASParams.algorithm = i
            shapes = [(480, 640), (240, 320), (120, 160)]

            for Params.shape in shapes:
                res = self.applyTransformOnRandomImg()
                #logger.info('algo: %d Shape: %s   -> %1.f ms' % (i, str(Params.shape), 1000*res))
                # self.assertLess(res, 0.05)  # Calculate in less than 0.05

        for Params.shape in shapes:
            res = self.calcuateTransformOnRandomImg()
Exemple #6
0
 def correctImages(self, ai, imageset, gtimageset, error_threshold):
     error = []
     for i, image in enumerate(imageset):
         #print(image.shape)
         ai.calculateTransform(image,True)
         logger.info('health: %s' % ai.health)
         transimage = ai.applyTransform(image)
         testimgf = "testimage%d.jpg" % i
         cv2.imwrite(testimgf,transimage)
         testimg = cv2.imread(testimgf)
         # uncorrected is > 500
         e = L2_image_distance(testimg, gtimageset[i])
         if e > error_threshold:
             logger.error("Correction seemed to fail for image %s" % i)
         error.append(e)
     return error
Exemple #7
0
def anti_instagram_annotations_test(dirname, out_base):
    base = expand_all(dirname)

    if not os.path.exists(base):
        msg = 'Could not find directory %s' % base
        raise Exception(msg)

    dirs = locate_files(base, '*.iids1', alsodirs=True)
    directory_results = {}
    overall_results = []

    if not dirs:
        raise ValueError('No IIDS1 directories found')

    for d in dirs:
        out = os.path.join(out_base, os.path.basename(d) + '.v')

        if not os.path.exists(out):
            os.makedirs(out)
        results = examine_dataset(d, out)
        overall_results = merge_comparison_results(results, overall_results)
        directory_results[d] = results

    db = shelve.open('tests_results', flag='n')
    db['directory_results'] = directory_results
    db['overall_results'] = overall_results
    db.close()

    logger.info(
        "overall average error: %f" %
        (overall_results['total_error'] / overall_results['total_pixels']))
    logger.info("overall regions checked: %f" %
                (overall_results['total_regions']))
    for t in overall_results['v_vals'].keys():
        logger.info("region %f: RGB %f,%f,%f, HSV %f,%f,%f" %
                    (t, np.mean(overall_results['r_vals'][t]),
                     np.mean(overall_results['g_vals'][t]),
                     np.mean(overall_results['b_vals'][t]),
                     np.mean(overall_results['h_vals'][t]),
                     np.mean(overall_results['s_vals'][t]),
                     np.mean(overall_results['v_vals'][t])))
def decode3(data):
    return rgb_from_jpg_by_JPEG_library(data)

import numpy as np
def create_empty_image(data):
    r = np.zeros((480, 640, 3), np.uint8)
    return r
#     cv.CreateImage(cv.GetSize(src), cv.IPL_DEPTH_8U, 1).
    
def wrap(method, data):
    res = method(data)
    # print('%s returned %s' % (method.__name__, res.shape))

if __name__ == '__main__':
    n = 10
    import platform
    proc = platform.processor()

    methods = ['decode_cv_orig',  # 'decode_cv_buf',

                'decode2', 'decode3', 'create_empty_image']
    for m in methods:
        tn = timeit.timeit(stmt='from __main__ import %s, wrap; wrap(%s, data)' % (m, m),
                       setup='from __main__ import setup; data=setup()',
                       number=n
                       )
        t = tn / n
        logger.info("%s: method %s, avg over %d tries: %.1f ms " % (proc, m, n, t * 1000))

 def assert_L1_small(self, img1, img2, threshold=0.1):
     diff_L1 = L1_image_distance(img1, img2)
     diff_L2 = L2_image_distance(img1, img2)
     logger.info('diff_L2: %f' % diff_L2)
     logger.info('diff_L1: %f' % diff_L1)
     self.assertLessEqual(diff_L1, threshold)
    def test_anti_instagram_correctness(self):
        logger.info('This is going to test that algorithm 1 and 2 give same results')

        id_scale, id_shift = [1.0, 1.0, 1.0], [0.0, 0.0, 0.0]
        img = random_image(480, 640)

        logger.info('algo 1 respects the identity')
        a = scaleandshift1(img, id_scale, id_shift)
        self.assert_L1_small(img, a)

        logger.info('algo 2 respects the identity')
        b = scaleandshift2(img, id_scale, id_shift)
        self.assert_L1_small(img, b)

        logger.info('algo 1 and 2 give the same output with random shift')

        scale = id_scale
        shift = np.random.rand(3)

        img1 = scaleandshift1(img, scale, shift)
        img2 = scaleandshift2(img, scale, shift)
        self.assert_L1_small(img1, img2)


        logger.info('algo 1 and 2 give the same output with random scale')

        scale = np.random.rand(3)
        shift = id_shift  # 0 shift

        img1 = scaleandshift1(img, scale, shift)
        img2 = scaleandshift2(img, scale, shift)
        self.assert_L1_small(img1, img2)



        logger.info('algo 1 and 2 give the same output with random inputs')

        scale = np.random.rand(3)
        shift = np.random.rand(3)

        img1 = scaleandshift1(img, scale, shift)
        img2 = scaleandshift2(img, scale, shift)
        self.assert_L1_small(img1, img2)
Exemple #11
0
 def assert_L1_small(self, img1, img2, threshold=0.1):
     diff_L1 = L1_image_distance(img1, img2)
     diff_L2 = L2_image_distance(img1, img2)
     logger.info('diff_L2: %f' % diff_L2)
     logger.info('diff_L1: %f' % diff_L1)
     self.assertLessEqual(diff_L1, threshold)
Exemple #12
0
    def test_anti_instagram_correctness(self):
        logger.info(
            'This is going to test that algorithm 1 and 2 give same results')

        id_scale, id_shift = [1.0, 1.0, 1.0], [0.0, 0.0, 0.0]
        img = random_image(480, 640)

        logger.info('algo 1 respects the identity')
        a = scaleandshift1(img, id_scale, id_shift)
        self.assert_L1_small(img, a)

        logger.info('algo 2 respects the identity')
        b = scaleandshift2(img, id_scale, id_shift)
        self.assert_L1_small(img, b)

        logger.info('algo 1 and 2 give the same output with random shift')

        scale = id_scale
        shift = np.random.rand(3)

        img1 = scaleandshift1(img, scale, shift)
        img2 = scaleandshift2(img, scale, shift)
        self.assert_L1_small(img1, img2)

        logger.info('algo 1 and 2 give the same output with random scale')

        scale = np.random.rand(3)
        shift = id_shift  # 0 shift

        img1 = scaleandshift1(img, scale, shift)
        img2 = scaleandshift2(img, scale, shift)
        self.assert_L1_small(img1, img2)

        logger.info('algo 1 and 2 give the same output with random inputs')

        scale = np.random.rand(3)
        shift = np.random.rand(3)

        img1 = scaleandshift1(img, scale, shift)
        img2 = scaleandshift2(img, scale, shift)
        self.assert_L1_small(img1, img2)
def examine_dataset(dirname, out):
    logger.info(dirname)
    dirname = expand_environment(dirname)

    jpgs = locate_files(dirname, "*.jpg")
    mats = locate_files(dirname, "*.mat")

    logger.debug("I found %d jpgs and %d mats" % (len(jpgs), len(mats)))

    if len(jpgs) == 0:
        msg = "Not enough jpgs."
        raise ValueError(msg)

    #     if len(mats) == 0:
    #         msg = 'Not enough mats.'
    #         raise ValueError(msg)

    first_jpg = sorted(jpgs)[0]
    logger.debug("Using jpg %r to learn transformation." % first_jpg)

    first_jpg_image = image_cv_from_jpg_fn(first_jpg)

    success, health, parameters = calculate_transform(first_jpg_image)

    s = ""
    s += "success: %s\n" % str(success)
    s += "health: %s\n" % str(health)
    s += "parameters: %s\n" % str(parameters)
    w = os.path.join(out, "learned_transform.txt")
    with open(w, "w") as f:
        f.write(s)
    logger.info(s)

    transform = ScaleAndShift(**parameters)

    config_dir = "${DUCKIETOWN_ROOT}/catkin_ws/src/duckietown/config/baseline/line_detector/line_detector_node/"
    config_dir = expand_environment(config_dir)
    configurations = locate_files(config_dir, "*.yaml")
    # logger.info('configurations: %r' % configurations)

    for j in jpgs:
        summaries = []

        shape = (200, 160)
        interpolation = cv2.INTER_NEAREST

        bn = os.path.splitext(os.path.basename(j))[0]
        fn = os.path.join(out, "%s.all.png" % (bn))

        if os.path.exists(fn):
            logger.debug("Skipping because file exists: %r" % fn)
        else:
            for c in configurations:
                logger.info("Trying %r" % c)
                name = os.path.splitext(os.path.basename(c))[0]
                if name in ["oreo", "myrtle", "bad_lighting", "226-night"]:
                    continue
                with open(c) as f:
                    stuff = yaml.load(f)

                if not "detector" in stuff:
                    msg = 'Cannot find "detector" section in %r' % c
                    raise ValueError(msg)

                detector = stuff["detector"]
                logger.info(detector)
                if not isinstance(detector, list) and len(detector) == 2:
                    raise ValueError(detector)

                from duckietown_utils.instantiate_utils import instantiate

                def LineDetectorClass():
                    return instantiate(detector[0], detector[1])

                s = run_detection(
                    transform,
                    j,
                    out,
                    shape=shape,
                    interpolation=interpolation,
                    name=name,
                    LineDetectorClass=LineDetectorClass,
                )
                summaries.append(s)

            together = make_images_grid(summaries, cols=1, pad=10, bgcolor=[0.5, 0.5, 0.5])
            cv2.imwrite(fn, zoom_image(together, 4))
    # ipython_if_guy()
    overall_results = []
    comparison_results = {}
    for m in mats:
        logger.debug(m)
        jpg = os.path.splitext(m)[0] + ".jpg"
        if not os.path.exists(jpg):
            msg = "JPG %r for mat %r does not exist" % (jpg, m)
            logger.error(msg)
        else:
            frame_results = test_pair(transform, jpg, m, out)
            comparison_results[m] = frame_results
            overall_results = merge_comparison_results(comparison_results[m], overall_results)
            print "comparison_results[m]=frame_results"
            # ipython_if_guy()
    print "finished mats: " + dirname
    return overall_results
def test_pair(transform, jpg, mat, out):
    """ 
        jpg = filename
        mat = filename
    """

    data = scipy.io.loadmat(mat)
    regions = data["regions"].flatten()
    max_type = 0
    for r in regions:
        max_type = max(max_type, r["type"][0][0][0][0])
    r_vals = {}

    for t in np.arange(max_type):
        r_vals[t + 1] = np.array([], "float32")

    g_vals = copy.deepcopy(r_vals)
    b_vals = copy.deepcopy(r_vals)
    h_vals = copy.deepcopy(r_vals)
    s_vals = copy.deepcopy(r_vals)
    v_vals = copy.deepcopy(r_vals)

    result_stats = {
        "average_abs_err": [],
        "total_pixels": 0,
        "total_error": 0,
        "total_regions": 0,
        "r_vals": r_vals,
        "g_vals": g_vals,
        "b_vals": b_vals,
        "h_vals": h_vals,
        "s_vals": s_vals,
        "v_vals": v_vals,
    }
    for r in regions:
        logger.info("region")
        x = r["x"][0][0].flatten()
        y = r["y"][0][0].flatten()
        mask = r["mask"][0][0]
        mask3 = cv2.merge([mask, mask, mask])
        print "x", x
        print "y", y
        print "mask shape", mask.shape
        print "type", r["type"][0][0][0][
            0
        ]  # type in 1- based / matlab-based indices from the list of region types (i.e road, white, yellow, red, or what ever types were annotated)
        print "color", r["color"][0]  # color in [r,g,b] where [r,g,b]are between 0 and 1
        t = r["type"][0][0][0][0]
        # print 'guy look here'
        region_color = r["color"][0]
        region_color = region_color[0][0]
        rval = region_color[0] * 255.0
        gval = region_color[1] * 255.0
        bval = region_color[2] * 255.0
        image = image_cv_from_jpg_fn(jpg)
        transformed = transform(image)
        [b2, g2, r2] = cv2.split(transformed)
        thsv = cv2.cvtColor(transformed, cv2.cv.CV_BGR2HSV)
        [h2, s2, v2] = cv2.split(thsv)
        r2_ = r2[mask.nonzero()]
        g2_ = g2[mask.nonzero()]
        b2_ = b2[mask.nonzero()]
        h2_ = h2[mask.nonzero()]
        s2_ = s2[mask.nonzero()]
        v2_ = v2[mask.nonzero()]
        # ipython_if_guy()
        result_stats["r_vals"][t] = np.concatenate((result_stats["r_vals"][t], r2_), 0)
        result_stats["g_vals"][t] = np.concatenate((result_stats["g_vals"][t], g2_), 0)
        result_stats["b_vals"][t] = np.concatenate((result_stats["b_vals"][t], b2_), 0)
        result_stats["h_vals"][t] = np.concatenate((result_stats["h_vals"][t], h2_), 0)
        result_stats["s_vals"][t] = np.concatenate((result_stats["s_vals"][t], s2_), 0)
        result_stats["v_vals"][t] = np.concatenate((result_stats["v_vals"][t], v2_), 0)
        absdiff_img = cv2.absdiff(transformed, np.array([bval, gval, rval, 0.0]))
        masked_diff = cv2.multiply(np.array(absdiff_img, "float32"), np.array(mask3, "float32"))
        num_pixels = cv2.sumElems(mask)[0]
        region_error = cv2.sumElems(cv2.sumElems(masked_diff))[0]
        avg_abs_err = region_error / (num_pixels + 1.0)
        print "Average abs. error", avg_abs_err
        result_stats["average_abs_err"].append(avg_abs_err)
        result_stats["total_pixels"] = result_stats["total_pixels"] + num_pixels
        result_stats["total_error"] = result_stats["total_error"] + region_error
        result_stats["total_regions"] = result_stats["total_regions"] + 1
        # XXX: to finish
    return result_stats
Exemple #15
0
def examine_dataset(dirname, out):
    logger.info(dirname)
    dirname = expand_all(dirname)

    jpgs = locate_files(dirname, '*.jpg')
    mats = locate_files(dirname, '*.mat')

    logger.debug('I found %d JPGs and %d .mat.' % (len(jpgs), len(mats)))

    if len(jpgs) == 0:
        msg = 'Not JPGs found in %r.' % dirname
        raise ValueError(msg)


#     if len(mats) == 0:
#         msg = 'Not enough mats.'
#         raise ValueError(msg)

    first_jpg = sorted(jpgs)[0]
    logger.debug('Using jpg %r to learn transformation.' % first_jpg)

    first_jpg_image = image_cv_from_jpg_fn(first_jpg)

    success, health, parameters = calculate_transform(first_jpg_image)

    s = ""
    s += 'success: %s\n' % str(success)
    s += 'health: %s\n' % str(health)
    s += 'parameters: %s\n' % str(parameters)
    w = os.path.join(out, 'learned_transform.txt')
    with open(w, 'w') as f:
        f.write(s)
    logger.info(s)

    transform = ScaleAndShift(**parameters)

    duckietown_package_dir = get_ros_package_path('duckietown')
    config_dir = os.path.join(
        duckietown_package_dir,
        'config/baseline/line_detector/line_detector_node')

    if not os.path.exists(config_dir):
        msg = 'Could not find configuration dir %s' % config_dir
        raise Exception(msg)

    config_dir = expand_all(config_dir)
    configurations = locate_files(config_dir, '*.yaml')

    if not configurations:
        msg = 'Could not find any configuration file in %s.' % config_dir
        raise Exception(msg)
    #logger.info('configurations: %r' % configurations)

    for j in jpgs:
        summaries = []

        shape = (200, 160)
        interpolation = cv2.INTER_NEAREST

        bn = os.path.splitext(os.path.basename(j))[0]
        fn = os.path.join(out, '%s.all.png' % (bn))

        if os.path.exists(fn):
            logger.debug('Skipping because file exists: %r' % fn)
        else:
            for c in configurations:
                logger.info('Trying %r' % c)
                name = os.path.splitext(os.path.basename(c))[0]
                if name in ['oreo', 'myrtle', 'bad_lighting', '226-night']:
                    continue
                with open(c) as f:
                    stuff = yaml.load(f)

                if not 'detector' in stuff:
                    msg = 'Cannot find "detector" section in %r' % c
                    raise ValueError(msg)

                detector = stuff['detector']
                logger.info(detector)
                if not isinstance(detector, list) and len(detector) == 2:
                    raise ValueError(detector)

                def LineDetectorClass():
                    return instantiate(detector[0], detector[1])

                s = run_detection(transform,
                                  j,
                                  out,
                                  shape=shape,
                                  interpolation=interpolation,
                                  name=name,
                                  LineDetectorClass=LineDetectorClass)
                summaries.append(s)

            together = make_images_grid(summaries,
                                        cols=1,
                                        pad=10,
                                        bgcolor=[.5, .5, .5])
            cv2.imwrite(fn, zoom_image(together, 4))

    overall_results = []
    comparison_results = {}
    for m in mats:
        logger.debug(m)
        jpg = os.path.splitext(m)[0] + '.jpg'
        if not os.path.exists(jpg):
            msg = 'JPG %r for mat %r does not exist' % (jpg, m)
            logger.error(msg)
        else:
            frame_results = test_pair(transform, jpg, m, out)
            comparison_results[m] = frame_results
            overall_results = merge_comparison_results(comparison_results[m],
                                                       overall_results)
            print "comparison_results[m]=frame_results"

    print "finished mats: " + dirname
    return overall_results
Exemple #16
0
def test_pair(transform, jpg, mat, out):
    """
        jpg = filename
        mat = filename
    """

    data = scipy.io.loadmat(mat)
    regions = data['regions'].flatten()
    max_type = 0
    for r in regions:
        max_type = max(max_type, r['type'][0][0][0][0])
    r_vals = {}

    for t in np.arange(max_type):
        r_vals[t + 1] = np.array([], 'float32')

    g_vals = copy.deepcopy(r_vals)
    b_vals = copy.deepcopy(r_vals)
    h_vals = copy.deepcopy(r_vals)
    s_vals = copy.deepcopy(r_vals)
    v_vals = copy.deepcopy(r_vals)

    result_stats = {
        'average_abs_err': [],
        'total_pixels': 0,
        'total_error': 0,
        'total_regions': 0,
        'r_vals': r_vals,
        'g_vals': g_vals,
        'b_vals': b_vals,
        'h_vals': h_vals,
        's_vals': s_vals,
        'v_vals': v_vals
    }
    for r in regions:
        logger.info('region')
        x = r['x'][0][0].flatten()
        y = r['y'][0][0].flatten()
        mask = r['mask'][0][0]
        mask3 = cv2.merge([mask, mask, mask])
        print 'x', x
        print 'y', y
        print 'mask shape', mask.shape
        # type in 1- based / matlab-based indices from the list of region types (i.e road, white,
        # yellow, red, or what ever types were annotated)
        print 'type', r['type'][0][0][0][0]
        # color in [r,g,b] where [r,g,b]are between 0 and 1
        print 'color', r['color'][0]
        t = r['type'][0][0][0][0]
        # print 'guy look here'
        region_color = r['color'][0]
        region_color = region_color[0][0]
        rval = region_color[0] * 255.
        gval = region_color[1] * 255.
        bval = region_color[2] * 255.
        image = image_cv_from_jpg_fn(jpg)
        transformed = transform(image)
        [b2, g2, r2] = cv2.split(transformed)
        thsv = cv2.cvtColor(transformed, cv2.COLOR_BGR2HSV)
        [h2, s2, v2] = cv2.split(thsv)
        r2_ = r2[mask.nonzero()]
        g2_ = g2[mask.nonzero()]
        b2_ = b2[mask.nonzero()]
        h2_ = h2[mask.nonzero()]
        s2_ = s2[mask.nonzero()]
        v2_ = v2[mask.nonzero()]

        result_stats['r_vals'][t] = np.concatenate(
            (result_stats['r_vals'][t], r2_), 0)
        result_stats['g_vals'][t] = np.concatenate(
            (result_stats['g_vals'][t], g2_), 0)
        result_stats['b_vals'][t] = np.concatenate(
            (result_stats['b_vals'][t], b2_), 0)
        result_stats['h_vals'][t] = np.concatenate(
            (result_stats['h_vals'][t], h2_), 0)
        result_stats['s_vals'][t] = np.concatenate(
            (result_stats['s_vals'][t], s2_), 0)
        result_stats['v_vals'][t] = np.concatenate(
            (result_stats['v_vals'][t], v2_), 0)
        absdiff_img = cv2.absdiff(transformed, np.array([bval, gval, rval,
                                                         0.]))
        masked_diff = cv2.multiply(np.array(absdiff_img, 'float32'),
                                   np.array(mask3, 'float32'))
        num_pixels = cv2.sumElems(mask)[0]
        region_error = cv2.sumElems(cv2.sumElems(masked_diff))[0]
        avg_abs_err = region_error / (num_pixels + 1.)
        print 'Average abs. error', avg_abs_err
        result_stats['average_abs_err'].append(avg_abs_err)
        result_stats[
            'total_pixels'] = result_stats['total_pixels'] + num_pixels
        result_stats[
            'total_error'] = result_stats['total_error'] + region_error
        result_stats['total_regions'] = result_stats['total_regions'] + 1
        # XXX: to finish
    return result_stats