Пример #1
0
 def test_select_attribute(self):
     a = [new(x=5, y=3, z=10),
          new(x=1, y=7, z=13),
          new(x=9, y=3, z=10),
          new(x=8, y=4, z=12)]
     b = Queryable(a).select('y').to_list()
     c = [3, 7, 3, 4]
     self.assertEqual(b, c)
Пример #2
0
 def test_select_attribute(self):
     a = [
         new(x=5, y=3, z=10),
         new(x=1, y=7, z=13),
         new(x=9, y=3, z=10),
         new(x=8, y=4, z=12)
     ]
     b = Queryable(a).select('y').to_list()
     c = [3, 7, 3, 4]
     self.assertEqual(b, c)
Пример #3
0
def make_html_suites(results):
    return query(results) \
        .group_by(
            lambda x: x['info'].suite_name,
            result_selector=lambda k, g: new(name=k, results=make_html_results(g))) \
        .order_by(lambda x: x.name) \
        .to_list()
Пример #4
0
def make_html_summary(results):
    counts = query(results) \
        .group_by(lambda x: x['status']) \
        .to_dictionary(lambda x: x.key, len)

    errors = counts.get('ERROR', 0)
    failures = counts.get('FAIL', 0)
    skipped = counts.get('SKIP', 0)
    passed = counts.get('PASS', 0)
    total = errors + failures + skipped + passed

    if errors:
        status = 'ERROR'
    elif failures:
        status = 'FAIL'
    else:
        status = 'PASS'

    return new(
        status=status,
        css=CSS_MAP.get(status),
        errors_count=errors,
        failures_count=failures,
        skipped_count=skipped,
        passed_count=passed,
        total_count=total,
    )
Пример #5
0
 def make_result(key, group):
     ktimes = query(group).select(lambda x: x.act.elapsed_time).to_list()
     total = sum(ktimes)
     count = len(ktimes)
     return new(
         kname=key,
         comp=group[0].comp,
         count=count,
         total_runtime=total,
         mean_runtime=(total / count),
     )
Пример #6
0
def make_html_failures(results, status):
    failures = query(results) \
        .where(lambda x: x['status'] == status) \
        .select(lambda x: new(
            name=x['info'].label(),
            body=x['reason'],
            job_url=x['build_url'])) \
        .order_by(lambda x: x.name) \
        .to_list()
    if len(failures):
        return {'count': len(failures), 'items': failures}
    return None
Пример #7
0
def make_junit_context(results):
    testcases = query(results) \
        .select(lambda x: new(
            classname=x['info'].platform.full,
            name='{}-{}'.format(x['info'].workload_name, x['info'].batch_size),
            time=x['cur.execution_duration'],
            skipped=is_skipped(x),
            failure=make_junit_failure(x),
            error=make_junit_error(x),
            stdout=make_junit_stdout(x))) \
        .to_list()
    return dict(testcases=testcases)
Пример #8
0
def generate_efficiency_chart(results, report_dir):
    data = query(results) \
        .where(lambda x: x['compare']) \
        .where(lambda x: x['efficiency']) \
        .order_by(lambda x: x['info'].label()) \
        .select(lambda x: new(label=x['info'].label(), value=x['efficiency'])) \
        .to_list()
    labels = query(data).select(lambda x: x.label).to_list()
    values = query(data).select(lambda x: x.value).to_list()
    if len(values):
        filename = report_dir / 'efficiency.png'
        ratio_plot(filename, labels, values, 'Efficiency relative to TF/GP100')
        return Image(filename)
    return None
Пример #9
0
def generate_ratio_chart(results, report_dir):
    data = query(results) \
        .where(lambda x: x['compare']) \
        .where(lambda x: x['ratio']) \
        .order_by(lambda x: x['info'].label()) \
        .select(lambda x: new(label=x['info'].label(), value=x['ratio'])) \
        .to_list()
    labels = query(data).select(lambda x: x.label).to_list()
    values = query(data).select(lambda x: x.value).to_list()
    if len(labels):
        filename = report_dir / 'ratios.png'
        ratio_plot(filename, labels, values, 'Throughput compared to golden')
        return Image(filename)
    return None
Пример #10
0
 def _make_one_result(x):
     info = x['info']
     return new(
         label=info.label(),
         status_css=CSS_MAP.get(x['status']),
         status=x['status'],
         gpu=info.platform.gpu,
         engine=info.platform.engine,
         workload=info.instance_name,
         batch_size=info.batch_size,
         cur_com=render_float(x['compile_duration']),
         cur_run=render_float(x['cur.execution_duration']),
         ref_run=render_float(x['ref.execution_duration']),
         ratio=render_float(x['ratio']),
         log=x['build_url'],
         reason=x['reason'],
     )
Пример #11
0
def generate_engines_chart(results, report_dir, filename, compare, caption):
    printf(caption)

    def get_by_engine(items, engine):
        for item in items:
            if engine in item.engine:
                return item
        raise KeyError

    def compare_durations(group):
        plaid = get_by_engine(group, 'plaid')
        stripe = get_by_engine(group, 'stripe')
        printf('{:25} {:25} {:25}'.format(group.key, plaid.dur, stripe.dur))
        return new(label=group.key, value=plaid.dur / stripe.dur)

    data = query(results) \
        .where(lambda x: x['compare']) \
        .where(lambda x: x[compare]) \
        .select(lambda x: new( \
            key=x['info'].label(inc_engine=False), \
            label=x['info'].label(), \
            engine=x['info'].platform.engine, \
            dur=x[compare], \
        )) \
        .group_by(lambda x: x.key) \
        .where(lambda x: len(x) > 1) \
        .select(lambda x: compare_durations(x)) \
        .order_by(lambda x: x.label) \
        .to_list()
    labels = query(data).select(lambda x: x.label).to_list()
    values = query(data).select(lambda x: x.value).to_list()
    if len(values):
        filename = report_dir / filename
        ratio_plot(filename, labels, values, caption)
        return Image(filename)
    return None
Пример #12
0
def main():
    parser = argparse.ArgumentParser(description='Display kernel times')
    parser.add_argument('--file',
                        help='a binary profile data file',
                        type=valid_path,
                        default=os.path.expanduser('~/eventlog.gz'))
    parser.add_argument('--program',
                        help='show program source for kernel in focus',
                        action='store_true')
    parser.add_argument('--ops',
                        help='show ops for each kernel',
                        action='store_true')
    parser.add_argument('--kernel',
                        help='focus on specified kernel, by default slowest')
    parser.add_argument('--dump',
                        help='dump kernels to specified directory',
                        type=valid_path)
    args = parser.parse_args()
    scope = ta.Scope()
    scope.read_eventlog(args.file)

    activities = []
    for act in [
            x for x in scope.activities
            if x.verb == 'tile::hal::opencl::Executing'
    ]:
        kid = act.ocl_runinfo.kernel_id
        comp = scope.get_activity(act, kid)
        kname = comp.ocl_kernelinfo.kname
        activities.append(new(
            act=act,
            kname=kname,
            comp=comp,
        ))

    def make_result(key, group):
        ktimes = query(group).select(lambda x: x.act.elapsed_time).to_list()
        total = sum(ktimes)
        count = len(ktimes)
        return new(
            kname=key,
            comp=group[0].comp,
            count=count,
            total_runtime=total,
            mean_runtime=(total / count),
        )

    by_runtime = query(activities) \
        .group_by(lambda x: x.kname, result_selector=make_result) \
        .order_by(lambda x: x.mean_runtime) \
        .to_list()

    hdr = '{:24}  {:>6}  {:>12}  {:>12}  {:>7}  {:>7}  {:11}  {:15}'
    fmt = '{:24}  {:>6}  {:12.3f}  {:12.6f}  {:>7}  {:>7}  {:11}  {:15}'
    print(
        hdr.format('kernel', 'count', 'cumulative', 'self', 'flops/s',
                   'bytes/s', 'type', 'vec'))
    for item in by_runtime:
        kinfo = item.comp.ocl_kernelinfo.kinfo
        type = kinfo.WhichOneof('kernel_type')
        ops = []
        vec = 'N/A'
        if type == 'contraction':
            ops = kinfo.contraction.ops
            vec = kinfo.contraction.vec
        elif type == 'element':
            ops = kinfo.element.ops
            vec = kinfo.element.vec
        elif type == 'zero':
            if kinfo.zero.copy:
                type = 'copy'
        elif type == 'special':
            type = kinfo.special.fn
        elif type is None:
            type = 'unknown'
        print(
            fmt.format(
                item.kname,
                item.count,
                item.total_runtime,
                item.mean_runtime,
                human_size(kinfo.flops / item.mean_runtime),
                human_size(kinfo.bytes / item.mean_runtime),
                type,
                str(vec),
            ))
        if args.ops:
            for op in ops:
                print(op)
        if args.dump:
            with open(os.path.join(args.dump, item.kname + '.cl'),
                      'w') as file_:
                src = item.comp.ocl_kernelinfo.src
                file_.write(src)
            with open(os.path.join(args.dump, item.kname + '.tile'),
                      'w') as file_:
                src = item.comp.hal_compilationinfo.program.code
                file_.write(src)
    print()

    if args.kernel:
        focus = query(by_runtime).single(lambda x: x.kname == args.kernel)
    else:
        focus = by_runtime[-1]

    if args.program:
        print('Program for kernel: {}'.format(focus.kname))
        print(focus.comp.hal_compilationinfo.program.code)
        print()

    print('Source code for kernel: {}'.format(focus.kname))
    print(focus.comp.ocl_kernelinfo.src)
    print()

    print('Memory used by kernel: {}'.format(focus.kname))
    for size, count in focus.comp.hal_compilationinfo.tmp_sizes.items():
        print('  {} tmp(s) of size {}'.format(count, size))
    for size, count in focus.comp.hal_compilationinfo.alloc_sizes.items():
        print('  {} alloc(s) of size {}'.format(count, size))
    print()
Пример #13
0
def make_junit_error(record):
    if record['status'] == 'ERROR':
        msg = '; '.join(record['errors'])
        return new(message=msg)
    return None
Пример #14
0
def make_junit_stdout(record):
    reason = record['reason']
    if reason:
        return new(text=reason)
    return None
Пример #15
0
def make_junit_failure(record):
    if record['status'] == 'FAIL':
        msg = '; '.join(record['failures'])
        return new(message=msg)
    return None
Пример #16
0
 def test_new_create(self):
     r = new(x=10, y=20, z=30)
     self.assertEqual(r.x, 10)
     self.assertEqual(r.y, 20)
     self.assertEqual(r.z, 30)
Пример #17
0
 def test_new_create_positional_error(self):
     self.assertRaises(TypeError, lambda: new(10, 20, 30))
Пример #18
0
 def test_new_create_empty(self):
     r = new()
Пример #19
0
 def compare_durations(group):
     plaid = get_by_engine(group, 'plaid')
     stripe = get_by_engine(group, 'stripe')
     printf('{:25} {:25} {:25}'.format(group.key, plaid.dur, stripe.dur))
     return new(label=group.key, value=plaid.dur / stripe.dur)