示例#1
0
def get_ipfs_hash(filename):
    # ipfs add --only-hash LICENSE
    # added QmcgpsyWgH8Y8ajJz1Cu72KnS5uo2Aa2LpzU7kinSupNKC LICENSE
    dtu.logger.debug('Computing IPFS hash for %s' % filename)
    cmd = base + ['add', '--only-hash', filename]
    cwd = '.'
    res = dtu.system_cmd_result(cwd,
                                cmd,
                                display_stdout=False,
                                display_stderr=False,
                                raise_on_error=True)

    out = res.stdout.strip().split(' ')
    #    print out
    if (len(out) < 3 or out[0] != 'added' or not out[1].startswith('Qm')):
        msg = 'Invalid output for ipds:\n%s' % dtu.indent(res.stdout, ' > ')
        raise Exception(msg)
    hashed = out[1]
    return hashed
示例#2
0
def call_summary():
    db = get_easy_algo_db()
    s = format_db(db)
    dtu.logger.info(s)

    errors = []
    for f in db.family_name2config.values():
        if not f.valid:
            errors.append('Family %s: %s' %
                          (f.family_name, f.error_if_invalid))
        for i in f.instances.values():
            if not i.valid:
                errors.append('Family %s / instance %r:\n\n%s' %
                              (f.family_name, i.instance_name,
                               dtu.indent(i.error_if_invalid, '  ')))

    if errors:
        msg = '\n' + '\n'.join(errors)
        raise Exception(msg)
示例#3
0
def create_roster(people, outd):

    s = ''
    S = '\n\n'
    s += '<div style="display:none" id="autogenerated-roster">'
    for k, person in people.items():
        jpg = get_image_for_person(k, 128)
        basename = k + '.small.jpg'
        jpg_file = os.path.join(outd, 'roster-images', basename)
        write_data_to_file(jpg, jpg_file)
        name = person.get_name()
        s += '<div id="%s-roster" class="roster-person">' % k
        s += '\n <span class="name">%s</span>' % name
        s += '\n <img src="%s"/>' % basename
        s += '\n' + indent(roster_css, '  ')
        s += '\n</div>' + S + S

    s += S + '</div>'
    return s
示例#4
0
def reduce_stats(a, b, a_plus_b):
    a_plus_b[
        'num_log_segments'] = a['num_log_segments'] + b['num_log_segments']
    a_plus_b['length'] = a['length'] + b['length']
    a_plus_b['nsamples'] = a['nsamples'] + b['nsamples']
    a_plus_b['mean'] = (a['mean'] * a['nsamples'] +
                        b['mean'] * b['nsamples']) / a_plus_b['nsamples']
    a_plus_b['min'] = min(a['min'], b['min'])
    a_plus_b['max'] = max(a['max'], b['max'])
    # Note: it is not possible to compute the median in an efficient manner
    a_plus_b['median'] = (a['median'] + b['median']) / 2.0
    print dtu.indent(a, 'a: ')
    print dtu.indent(b, 'b: ')
    print dtu.indent(a_plus_b, 'a_plus_b: ')
示例#5
0
def camera_info_from_yaml(calib_data):
    try:
        cam_info = CameraInfo()
        cam_info.width = calib_data['image_width']
        cam_info.height = calib_data['image_height']
#         cam_info.K = np.matrix(calib_data['camera_matrix']['data']).reshape((3,3))
#         cam_info.D = np.matrix(calib_data['distortion_coefficients']['data']).reshape((1,5))
#         cam_info.R = np.matrix(calib_data['rectification_matrix']['data']).reshape((3,3))
#         cam_info.P = np.matrix(calib_data['projection_matrix']['data']).reshape((3,4))
        cam_info.K = calib_data['camera_matrix']['data']
        cam_info.D = calib_data['distortion_coefficients']['data']
        cam_info.R = calib_data['rectification_matrix']['data']
        cam_info.P = calib_data['projection_matrix']['data']

        cam_info.distortion_model = calib_data['distortion_model']
        return cam_info
    except Exception as e:
        msg = 'Could not interpret data:'
        msg += '\n\n' + dtu.indent(yaml.dump(calib_data), '   ')
        dtu.raise_wrapped(InvalidCameraInfo, e, msg)
示例#6
0
    def check(self):
        # read hosts file
        fn = '/etc/hosts'
        contents = open(fn).read()
        
        l = 'Entire contents of %s:\n' % fn + dtu.indent(contents, '  > ')
        
        if '10.' in contents or '192.' in contents:
            msg = 'The %s file contains hard-wired IPs.' % fn
            raise CheckFailed(msg, l)

        if '.local' in contents:
            msg = 'The %s file contains hard-wired host names.' % fn
            raise CheckFailed(msg, l)
        
        hostname = socket.gethostname()
        
        if not hostname in contents:
            msg = 'The %s file does not contain an entry for your hostname %r.' % (fn, hostname)
            raise CheckFailed(msg, l)
        
        return contents
        
示例#7
0
    def create_instance(self, family_name, instance_name):
        family = self.get_family(family_name)
        if not family.valid:
            msg = ('Cannot instantiate %r because its family %r is invalid.' %
                   (instance_name, family_name))
            raise DTConfigException(msg)

        check_is_in('instance', instance_name, family.instances)
        instance = family.instances[instance_name]

        if not instance.valid:
            msg = ('Cannot instantiate because it is invalid:\n%s' %
                   indent(instance.error_if_invalid, '> '))
            raise DTConfigException(msg)
        res = instantiate(instance.constructor, instance.parameters)

        interface = import_name(family.interface)
        if not isinstance(res, interface):
            msg = ('I expected that %r would be a %s but it is a %s.' %
                   (instance_name, interface.__name__, type(res).__name__))
            raise DTConfigException(msg)

        return res
示例#8
0
    def query_results_one(self, branch, date, commit):
        possible = self.query_results(branch, date, commit)
        from easy_regression.conditions.eval import DataNotFound
        if len(possible) == 0:
            msg = 'Could not find any match for the query.'
            msg += '\n branch: %s' % branch
            msg += '\n   date: %s' % date
            msg += '\n commit: %s' % commit
            raise DataNotFound(msg)

        if len(possible) > 1:
            n = len(possible)
            msg = 'Found %d matches for this query.' % n
            msg += '\n   branch: %s' % branch
            msg += '\n     date: %s' % date
            msg += '\n   commit: %s' % commit
            msg += '\nThese are the matches:'
            for i, p in enumerate(possible):
                dtu.check_isinstance(p, ResultDBEntry)
                msg += '\n' + dtu.indent(str(p), ' %2d of %d: ' % (i + 1, n))
            raise AmbiguousQuery(msg)

        return possible[0]
示例#9
0
    def __init__(self,
                 logs,
                 processors=[],
                 analyzers=[],
                 checks=[],
                 topic_videos=[],
                 topic_images=[]):
        self.logs = logs

        self.processors = []
        for p in processors:
            p = copy.deepcopy(p)
            processor = p.pop('processor')
            prefix_in = p.pop('prefix_in', '')
            prefix_out = p.pop('prefix_out', '')
            if p:
                msg = 'Extra keys: %s' % p
                raise ValueError(msg)
            p2 = ProcessorEntry(prefix_in=prefix_in,
                                processor=processor,
                                prefix_out=prefix_out)
            self.processors.append(p2)

        self.analyzers = analyzers
        self.topic_videos = topic_videos
        self.topic_images = topic_images

        check_isinstance(checks, list)

        try:
            self.cwcs = parse_list_of_checks(checks)
        except RTParseError as e:
            msg = 'Cannot parse list of checks.'
            msg += '\n' + dtu.indent(dtu.yaml_dump_pretty(checks), '',
                                     'parsing: ')
            dtu.raise_wrapped(RTParseError, e, msg, compact=True)
示例#10
0
 def __str__(self):
     s = 'CheckResult:'
     s += '\n' + dtu.indent(self.status, '   status: ')
     s += '\n' + dtu.indent(self.summary, '  summary: ')
     s += '\n' + dtu.indent(self.details, '', '  details: ')
     return s
示例#11
0
def get_logs_local():
    raise_if_duplicated = False
    all_resources = get_all_resources()

    logs = OrderedDict()
    ignored = []
    for basename, filename in all_resources.basename2filename.items():
        if not basename.endswith('.bag'):
            continue

        censor = ['ii-datasets', 'RCDP', '160122_3cars_dark-mercedes']
        to_censor = False
        for c in censor:
            if c in filename:
                to_censor = True
        if to_censor:
            ignored.append(filename)
            dtu.logger.warn('Ignoring %s' % filename)
            continue

        if not is_valid_name(basename):
            msg = 'Ignoring Bag file with invalid file name "%r".' % (basename)
            msg += '\n Full path: %s' % filename
            dtu.logger.warn(msg)
            continue

        base = _get_base_base(basename)

        if basename != base + '.bag':
            continue
        #        print('basename: %s base: %s filename: %s related : %s' % (basename, base, filename,
        #                                                      related))
        l = physical_log_from_filename(filename,
                                       all_resources.base2basename2filename)

        if l.log_name in logs:
            old = logs[l.log_name]

            old_sha1 = DTR.from_yaml(old.resources['bag']).hash['sha1']
            new_sha1 = DTR.from_yaml(l.resources['bag']).hash['sha1']

            if old_sha1 == new_sha1:
                # just a duplicate
                msg = 'File is a duplicate: %s ' % filename
                dtu.logger.warn(msg)
                continue
            # Actually a different log
            msg = 'Found twice this log: %s' % l.log_name
            msg += '\nProbably it is a processed version.'
            msg += "\n\nVersion 1:"

            msg += '\n\n' + dtu.indent(str(old), '  ')
            msg += "\n\n\nVersion 2:"
            msg += '\n\ncurrent: %s' % filename
            msg += '\n\ncurrent: %s' % ("RCDP" in filename)
            msg += '\n\n' + dtu.indent(str(l), '  ')
            if raise_if_duplicated:
                raise Exception(msg)
            else:
                dtu.logger.error(msg)

        logs[l.log_name] = l

    return logs
示例#12
0
 def __str__(self):
     s = 'MakeTimeSlice  { %s : %s }' % (self.t0, self.t1)
     s += '\n' + dtu.indent(str(self.children[0]), '  ')
     return s
示例#13
0
def compare_faster():
    variables = collections.OrderedDict()
    variables['alpha'] = dict(min=-180,
                              max=180,
                              description="angle",
                              resolution=5,
                              units='deg',
                              units_display='deg')
    variables['r'] = dict(min=3,
                          max=5,
                          description="distance",
                          resolution=0.1,
                          units='m',
                          units_display='cm')
    # this will fail if precision is float32
    gh = GridHelper(variables, precision='float64')
    val_fast = gh.create_new()
    val_fast.fill(0)
    val_slow = gh.create_new()
    val_slow.fill(0)

    od = dtu.get_output_dir_for_test()

    F = 1

    alpha0 = 7
    # r0 = 4
    r0 = 4.1
    w0 = 1.
    value = dict(alpha=alpha0, r=r0)
    gh.add_vote(val_slow, value, w0, F)

    assert_equal(np.sum(val_slow > 0), 9)

    values = np.zeros((2, 1))
    values[0, 0] = alpha0
    values[1, 0] = r0
    weights = np.zeros(1)
    weights[0] = w0
    gh.add_vote_faster(val_fast, values, weights, F)

    assert_equal(np.sum(val_fast > 0), 9)

    d = grid_helper_plot(gh, val_slow)
    fn = os.path.join(od, 'compare_faster_slow.jpg')
    dtu.write_data_to_file(d.get_png(), fn)

    d = grid_helper_plot(gh, val_fast)
    fn = os.path.join(od, 'compare_faster_fast.jpg')
    dtu.write_data_to_file(d.get_png(), fn)

    D = val_fast - val_slow
    diff = np.max(np.abs(D))
    print('diff: %r' % diff)
    if diff > 1e-8:
        print(dtu.indent(array_as_string_sign(val_fast), 'val_fast '))
        print(dtu.indent(array_as_string_sign(val_slow), 'val_slow '))
        print(dtu.indent(array_as_string_sign(D), 'Diff '))
        print('non zero val_fast: %s' % val_fast[val_fast > 0])
        print('non zero val_slow: %s' % val_slow[val_slow > 0])

    assert_almost_equal(val_fast, val_slow)
def interpret_config_file(filename):
    """ 
        Returns a ConfigInfo.     
    """
    try:
        basename = os.path.basename(filename)
        base = basename.replace(SUFFIX, '')
        # now we have something like
        #   package-node.config_name.date
        # or
        #   package-node.config_name
        if not '.' in base:
            msg = 'Invalid filename %r.' % filename
            raise dtu.DTConfigException(msg)

        tokens = base.split('.')
        if len(tokens) > 3:
            msg = 'Too many periods/tokens (tokens=%s)' % tokens
            raise dtu.DTConfigException(msg)

        if len(tokens) <= 2:
            #  package-node.config_name
            package_node = tokens[0]
            if not '-' in package_node:
                msg = 'Expected a "-" in "%s".' % package_node
                raise dtu.DTConfigException(msg)
            i = package_node.index('-')
            package_name = package_node[:i]
            node_name = package_node[i + 1:]

        config_name = tokens[1]

        if len(tokens) == 3:
            # package-node.config_name.date
            date_effective = tokens[2]
        else:
            date_effective = '20170101'
        from dateutil.parser import parse

        try:
            date_effective = parse(date_effective)
        except:
            msg = 'Cannot interpret "%s" as a date.' % date_effective
            raise dtu.DTConfigException(msg)

        # now read file

        contents = open(filename).read()
        try:
            try:
                data = yaml.load(contents)
            except YAMLError as e:
                dtu.raise_wrapped(dtu.DTConfigException,
                                  e,
                                  'Invalid YAML',
                                  compact=True)

            if not isinstance(data, dict):
                msg = 'Expected a dictionary inside.'
                raise dtu.DTConfigException(msg)

            for field in ['description', 'values']:
                if not field in data:
                    msg = 'Missing field "%s".' % field
                    raise dtu.DTConfigException(msg)

            description = data.pop('description')
            if not isinstance(description, str):
                msg = 'I expected that "description" is a string, obtained %r.' % description
                raise dtu.DTConfigException(msg)

            extends = data.pop('extends', [])
            if not isinstance(extends, list):
                msg = 'I expected that "extends" is a list, obtained %r.' % extends
                raise dtu.DTConfigException(msg)

            values = data.pop('values')
            if not isinstance(values, dict):
                msg = 'I expected that "values" is a dictionary, obtained %s.' % type(
                    values)
                raise dtu.DTConfigException(msg)

            # Freeze the data
            extends = tuple(extends)
            values = frozendict(values)

        except dtu.DTConfigException as e:
            msg = 'Could not interpret the contents of the file\n'
            msg += '   %s\n' % dtu.friendly_path(filename)
            msg += 'Contents:\n' + dtu.indent(contents, ' > ')
            dtu.raise_wrapped(dtu.DTConfigException, e, msg, compact=True)

        return ConfigInfo(
            filename=filename,
            package_name=package_name,
            node_name=node_name,
            config_name=config_name,
            date_effective=date_effective,
            extends=extends,
            description=description,
            values=values,
            # not decided
            valid=None,
            error_if_invalid=None)

    except dtu.DTConfigException as e:
        msg = 'Invalid file %s' % dtu.friendly_path(filename)
        dtu.raise_wrapped(dtu.DTConfigException, e, msg, compact=True)
示例#15
0
def run_checks(entries):
    """ Returns the names of the failures  """
    results = [] 
    
    def record_result(r):
        results.append(r) 
    
    # raise NotRun if not previously run
    class NotRun(Exception): pass
    
    def get_previous_result_status(e):
        for r in results:
            if e == r.entry:
                return r.status
            
        logger.error('Could not find %s' % e)
        logger.error(results)
        raise NotRun()
    
    for entry in entries:
        
        # check dependencies
        only_run_if = entry.only_run_if
        if only_run_if is None:
            pass
        else:
            try:
                dep_status = get_previous_result_status(only_run_if)
            
                if dep_status in [ChecksConstants.FAIL, ChecksConstants.ERROR]:
                    msg = "Skipped because the previous test %r failed." % (only_run_if.desc)
                    r = Result(entry=entry, status=ChecksConstants.SKIP, out_short=msg, out_long='')
                    record_result(r)
                    continue
                
                elif dep_status in [ChecksConstants.SKIP]:
                    msg = "Skipped because the previous test %r skipped." % (only_run_if.desc)
                    r = Result(entry=entry, status=ChecksConstants.SKIP, out_short=msg, out_long='')
                    record_result(r)
                    continue

            except NotRun:
                msg = 'Dependency did not run yet.'
                r = Result(entry=entry, status=ChecksConstants.ERROR, out_short=msg, out_long='', )
                record_result(r)
                continue
        
        # at this point, either it's None or passed
        assert only_run_if is None or (get_previous_result_status(only_run_if) == ChecksConstants.OK)
    
        try:
            res = entry.check.check() or ''
            r = Result(entry=entry, status=ChecksConstants.OK, out_short='', out_long=res)
            record_result(r)
            
        except CheckError as e:
            r = Result(entry=entry, status=ChecksConstants.ERROR, 
                       out_short='Could not run test.',
                       out_long=e.long_explanation)
            record_result(r)
            
        except CheckFailed as e:
            r = Result(entry=entry, status=ChecksConstants.FAIL, 
                       out_short=e.compact,
                       out_long=e.long_explanation)
            record_result(r)
            
        except Exception as e:
            msg = 'Invalid test: it raised the exception %s.' % type(e).__name__
            l = 'I expect the tests to only raise CheckError or CheckFailed.'
            l += '\n\nEntire exception:\n\n'
            l += indent(traceback.format_exc(e), '  ')
            r = Result(entry=entry, status=ChecksConstants.ERROR, 
                       out_short=msg,
                       out_long=l)
            record_result(r)
            
    return results
示例#16
0
def raise_error(rdb, t, res, s):
    msg = s
    msg += '\n' + dtu.indent(str(res), 'obtained: ')
    msg += '\n' + dtu.indent(str(t), '', 'test: ')
    msg += '\n' + dtu.indent(str(rdb), '', 'rdb: ')
    raise Exception(msg)
示例#17
0
 def __str__(self):
     s = "Binary operation"
     s += '\n' + dtu.indent(self.a, '', '   a: ')
     s += '\n' + dtu.indent(self.op, '', '  op: ')
     s += '\n' + dtu.indent(self.b, '', '   b: ')
     return s
示例#18
0
文件: ldn.py 项目: sosorry/Software
    def on_received_image(self, context, image_msg):
        if not self.active:
            return

        self.intermittent_counter += 1

        with context.phase('decoding'):
            # Decode from compressed image with OpenCV
            try:
                image_cv = dtu.bgr_from_jpg(image_msg.data)
            except ValueError as e:
                self.loginfo('Could not decode image: %s' % e)
                return

        with context.phase('resizing'):
            # Resize and crop image
            hei_original, wid_original = image_cv.shape[0:2]

            if self.config.img_size[0] != hei_original or self.config.img_size[
                    1] != wid_original:
                # image_cv = cv2.GaussianBlur(image_cv, (5,5), 2)
                image_cv = cv2.resize(
                    image_cv,
                    (self.config.img_size[1], self.config.img_size[0]),
                    interpolation=cv2.INTER_NEAREST)
            image_cv = image_cv[self.config.top_cutoff:, :, :]

        with context.phase('correcting'):
            # apply color correction
            image_cv_corr = self.ai.applyTransform(image_cv)


#             image_cv_corr = cv2.convertScaleAbs(image_cv_corr)

        with context.phase('detection'):
            # Set the image to be detected
            self.detector.setImage(image_cv_corr)

            # Detect lines and normals
            white = self.detector.detectLines('white')
            yellow = self.detector.detectLines('yellow')
            red = self.detector.detectLines('red')

        with context.phase('preparing-images'):
            # SegmentList constructor
            segmentList = SegmentList()
            segmentList.header.stamp = image_msg.header.stamp

            # Convert to normalized pixel coordinates, and add segments to segmentList
            top_cutoff = self.config.top_cutoff
            s0, s1 = self.config.img_size[0], self.config.img_size[1]

            arr_cutoff = np.array((0, top_cutoff, 0, top_cutoff))
            arr_ratio = np.array((1. / s1, 1. / s0, 1. / s1, 1. / s0))
            if len(white.lines) > 0:
                lines_normalized_white = ((white.lines + arr_cutoff) *
                                          arr_ratio)
                segmentList.segments.extend(
                    toSegmentMsg(lines_normalized_white, white.normals,
                                 Segment.WHITE))
            if len(yellow.lines) > 0:
                lines_normalized_yellow = ((yellow.lines + arr_cutoff) *
                                           arr_ratio)
                segmentList.segments.extend(
                    toSegmentMsg(lines_normalized_yellow, yellow.normals,
                                 Segment.YELLOW))
            if len(red.lines) > 0:
                lines_normalized_red = ((red.lines + arr_cutoff) * arr_ratio)
                segmentList.segments.extend(
                    toSegmentMsg(lines_normalized_red, red.normals,
                                 Segment.RED))

            self.intermittent_log(
                '# segments: white %3d yellow %3d red %3d' %
                (len(white.lines), len(yellow.lines), len(red.lines)))

        # Publish segmentList
        with context.phase('publishing'):
            self.publishers.segment_list.publish(segmentList)

        # VISUALIZATION only below

        if self.config.verbose:

            with context.phase('draw-lines'):
                # Draw lines and normals
                image_with_lines = np.copy(image_cv_corr)
                drawLines(image_with_lines, white.lines, (0, 0, 0))
                drawLines(image_with_lines, yellow.lines, (255, 0, 0))
                drawLines(image_with_lines, red.lines, (0, 255, 0))

            with context.phase('published-images'):
                # Publish the frame with lines
                out = dtu.d8n_image_msg_from_cv_image(
                    image_with_lines, "bgr8", same_timestamp_as=image_msg)
                self.publishers.image_with_lines.publish(out)

            with context.phase('pub_edge/pub_segment'):
                out = dtu.d8n_image_msg_from_cv_image(
                    self.detector.edges, "mono8", same_timestamp_as=image_msg)
                self.publishers.edge.publish(out)

                colorSegment = color_segment(white.area, red.area, yellow.area)
                out = dtu.d8n_image_msg_from_cv_image(
                    colorSegment, "bgr8", same_timestamp_as=image_msg)
                self.publishers.color_segment.publish(out)

        if self.intermittent_log_now():
            self.info('stats from easy_node\n' +
                      dtu.indent(context.get_stats(), '> '))