예제 #1
0
def job_analyze(log, analyzer):
    easy_algo_db = get_easy_algo_db()
    analyzer_instance = easy_algo_db.create_instance('analyzer', analyzer)
    in_bag = rosbag.Bag(log)
    results = OrderedDict()
    logger.info('Running %s on %s' % (analyzer, log))
    analyzer_instance.analyze_log(in_bag, results)
    in_bag.close()
    return results
예제 #2
0
    def on_parameters_changed(self, _first_time, updated):

        if 'verbose' in updated:
            self.info('Verbose is now %r' % self.config.verbose)

        if 'line_detector' in updated:
            db = get_easy_algo_db()
            self.detector = db.create_instance('line_detector',
                                               self.config.line_detector)
예제 #3
0
def job_merge(results, analyzer):
    """
        results: log name -> results dict
    """
    easy_algo_db = get_easy_algo_db()
    analyzer_instance = easy_algo_db.create_instance('analyzer', analyzer)

    results = list(results.values())

    total = merge_n(analyzer_instance, results)
    return total
예제 #4
0
def call_summary():
    db = get_easy_algo_db()
    s = format_db(db)
    logger.info(s)

    errors = []
    for f in db.family_name2config.values():
        if not f.valid:
            errors.append('Family %s: %s' %
                          (f.family_name, f.error_if_invalid))
        for i in f.instances.values():
            if not i.valid:
                errors.append('Family %s / instance %r: %s' %
                              (f, i.instance_name, i.error_if_invalid))

    if errors:
        msg = '\n' + '\n'.join(f.instances)
        raise Exception(msg)
예제 #5
0
 def define_jobs_context(self, context):
     easy_algo_db = get_easy_algo_db()
     
     expect = self.options.expect
     
     if not expect in RTCheck.CHECK_RESULTS:
         msg = 'Invalid expect status %s; must be one of %s.' % (expect, RTCheck.CHECK_RESULTS)
         raise DTUserError(msg)
     
     query = self.options.tests
     regression_tests = easy_algo_db.query('regression_test', query, raise_if_no_matches=True)
     
     for rt_name in regression_tests:
         rt = easy_algo_db.create_instance('regression_test', rt_name)
         
         easy_logs_db = self.get_easy_logs_db()
         c = context.child(rt_name)
         
         outd = os.path.join(self.options.output, 'regression_tests', rt_name)
         jobs_rt(c, rt_name, rt, easy_logs_db, outd, expect) 
예제 #6
0
def compute_check_results(rt_name, rt, results_all):
    current = make_entry(rt_name, results_all)

    algo_db = get_easy_algo_db()
    entries_names = algo_db.query('rdbe', 'parameters:regression_test_name:%s' % rt_name)
    dtu.logger.info('entries: %s' % list(entries_names))
    entries = []
    for name in entries_names:
        e = algo_db.create_instance('rdbe', name)
        entries.append(e)

    rdb = ResultDB(current=current, entries=entries)

    res = []
    for cwc in rt.get_checks():
        for check in cwc.checks:
            r = check.check(rdb)
            assert isinstance(r, CheckResult)
            res.append(r)
    return res
예제 #7
0
    def go(self):
        args = self.options.get_extra()
        db = get_easy_algo_db()
        colorize = True
        verbose = self.options.verbose
        if len(args) == 0:
            s = format_db(db, verbose=verbose, colorize=colorize)

        elif len(args) == 1:
            family = db.get_family(args[0])

            s = format_instances(family,
                                 colorize=colorize,
                                 verbose=self.options.verbose)
        elif len(args) == 2:
            family_name = args[0]
            family = db.get_family(family_name)
            instance_name = args[1]
            instance = db.create_instance(family_name, instance_name)

            s = instance
        print(s)
예제 #8
0
    def process_log(self, bag_in, bag_out):
        algo_db = get_easy_algo_db()
        line_detector = algo_db.create_instance(FAMILY_LINE_DETECTOR,
                                                self.line_detector)
        image_prep = algo_db.create_instance('image_prep', self.image_prep)

        vehicle = which_robot(bag_in)
        topic = '/%s/camera_node/image/compressed' % vehicle
        context = FakeContext()
        transform = None
        frame = 0
        for compressed_img_msg in d8n_bag_read_with_progress(bag_in, topic):

            with context.phase('decoding'):
                try:
                    image_cv = image_cv_from_jpg(compressed_img_msg.data)
                except ValueError as e:
                    msg = 'Could not decode image: %s' % e
                    raise_wrapped(ValueError, e, msg)

            segment_list = image_prep.process(context, image_cv, line_detector,
                                              transform)

            rendered = vs_fancy_display(image_prep.image_cv, segment_list)
            rendered = d8_image_zoom_linear(rendered, 2)
            log_name = 'log_name'
            time = 12
            rendered = add_duckietown_header(rendered, log_name, time, frame)
            out = d8n_image_msg_from_cv_image(
                rendered, "bgr8", same_timestamp_as=compressed_img_msg)

            # Write to the bag
            bag_out.write('processed', out)

            #             out = d8n_image_msg_from_cv_image(image_cv, "bgr8", same_timestamp_as=compressed_img_msg)
            bag_out.write('image', compressed_img_msg)

            frame += 1
예제 #9
0
def process_one(bag_filename, t0, t1, processors, log_out):
    logger.info('job_one()')
    logger.info('   input: %s' % bag_filename)
    logger.info('   processors: %s' % processors)
    logger.info('   out: %s' % log_out)

    d8n_make_sure_dir_exists(log_out)

    tmpdir = create_tmpdir()
    tmpfiles = []

    def get_tmp_bag():
        i = len(tmpfiles)
        f = os.path.join(tmpdir, 'tmp%d.bag' % i)
        tmpfiles.append(f)
        return f

    easy_algo_db = get_easy_algo_db()
    # instantiate processors
    processors_instances = [
        easy_algo_db.create_instance('processor', _) for _ in processors
    ]

    tmpfiles = []

    try:
        for i, p in enumerate(processors_instances):
            next_bag_filename = get_tmp_bag()

            in_bag = rosbag.Bag(bag_filename)

            if i == 0:
                in_bag = BagReadProxy(in_bag, t0, t1)

            out_bag = rosbag.Bag(next_bag_filename, 'w')

            logger.info('Processing:\n  in = %s\n out = %s' %
                        (bag_filename, next_bag_filename))
            p.process_log(in_bag, out_bag)

            in_bag.close()
            out_bag.close()

            bag_filename = next_bag_filename

        logger.info('Creating output file %s' % log_out)
        if not processors:
            # just create symlink
            logger.info('(Just creating symlink, because there '
                        'was no processing done.)')
            os.symlink(os.path.realpath(bag_filename), log_out)
        else:
            try:
                shutil.copy(bag_filename, log_out)
            except:
                logger.error('Could not create %s' % log_out)
        logger.info('I created %s' % log_out)

    finally:
        for f in tmpfiles:
            logger.info(' deleting %s' % f)
            os.unlink(f)
    return log_out
예제 #10
0
def run_pipeline(image, line_detector_name, image_prep_name):
    """ 
        Image: numpy (H,W,3) == BGR
        Returns a dictionary, res with the following fields:
            
            res['input_image']
    """
    res = OrderedDict()
    res['image_input'] = image
    algo_db = get_easy_algo_db()
    line_detector = algo_db.create_instance('line_detector',
                                            line_detector_name)
    image_prep = algo_db.create_instance('image_prep', image_prep_name)

    context = FakeContext()

    segment_list = image_prep.process(context,
                                      image,
                                      line_detector,
                                      transform=None)

    res['segments_on_image_input'] = vs_fancy_display(image_prep.image_cv,
                                                      segment_list)
    res['segments_on_image_resized'] = vs_fancy_display(
        image_prep.image_resized, segment_list)

    ai = AntiInstagram()
    ai.calculateTransform(res['image_input'])

    transform = ai.applyTransform

    res['image_input_transformed'] = transform(res['image_input'])
    res['image_input_transformed_then_convertScaleAbs'] = cv2.convertScaleAbs(
        res['image_input_transformed'])

    segment_list2 = image_prep.process(context,
                                       image,
                                       line_detector,
                                       transform=transform)

    res['segments2_on_image_input_transformed'] = \
        vs_fancy_display(image_prep.image_cv, segment_list2)
    res['segments2_on_image_input_transformed_resized'] = \
        vs_fancy_display(image_prep.image_resized, segment_list2)

    filename = (get_ros_package_path('duckietown') +
                "/config/baseline/calibration/camera_intrinsic/default.yaml")

    ci = load_camera_info(filename)
    #     (h,w) = image.shape[:2]
    ci_W = ci.width
    ci_H = ci.height
    mapx, mapy = cv2.initUndistortRectifyMap(ci.K, ci.D, ci.R, ci.P,
                                             (ci_W, ci_H), cv2.CV_32FC1)
    # mapx and mapy are (h, w) matrices that tell you
    # the x coordinate and the y coordinate for each point
    # in the first image
    #     print mapx.shape(),mapy.shape()
    res['grid'] = get_grid(image.shape[:2])
    res['grid_remap'] = cv2.remap(res['grid'], mapx, mapy, cv2.INTER_LINEAR)

    res['image_input_rect'] = cv2.remap(res['image_input'], mapx, mapy,
                                        cv2.INTER_LINEAR)
    segment_list_rect = apply_transform_to_segment(segment_list, mapx, mapy)
    res['segments2_on_image_input_rect'] = \
        vs_fancy_display(res['image_input_rect'], segment_list_rect)

    res['grid_undistort'] = cv2.undistort(res['grid'], ci.K, ci.D)
    res['image_input_undistort'] = cv2.undistort(res['image_input'], ci.K,
                                                 ci.D)

    homography = np.array([
        -4.89775e-05, -0.0002150858, -0.1818273, 0.00099274, 1.202336e-06,
        -0.3280241, -0.0004281805, -0.007185673, 1
    ]).reshape((3, 3))

    segment_list_undistort = undistort_segments(segment_list,
                                                ci_W,
                                                ci_H,
                                                ci.K,
                                                ci.D,
                                                ci.P,
                                                ci.R,
                                                homography=homography)
    res['segments_und_image_input_und'] = vs_fancy_display(
        res['image_input_undistort'], segment_list_undistort)

    #     homography_inv = np.linalg.inv(homography)
    res['image_input_undistort_warp'] = cv2.warpPerspective(
        res['image_input'],
        homography, (ci_W, ci_H),
        flags=cv2.WARP_INVERSE_MAP)
    # add flags=cv2.WARP_INVERSE_MAP to do the inverse

    return res