Exemple #1
0
        def generate_reports(reports, experiments=None):
            if not reports:
                print("No reports found. Sorry.")

            for rcls in reports:
                if experiments:
                    for exp in experiments:
                        report = rcls(exp, self._experiment_ids, self._outfile,
                                      schema.Session())
                else:
                    report = rcls(None, self._experiment_ids, self._outfile,
                                  schema.Session())
                report.generate()
Exemple #2
0
def create_run(cmd, project, exp, grp):
    """
    Create a new 'run' in the database.

    This creates a new transaction in the database and creates a new
    run in this transaction. Afterwards we return both the transaction as
    well as the run itself. The user is responsible for committing it when
    the time comes.

    Args:
        cmd: The command that has been executed.
        prj: The project this run belongs to.
        exp: The experiment this run belongs to.
        grp: The run_group (uuid) we blong to.

    Returns:
        The inserted tuple representing the run and the session opened with
        the new run. Don't forget to commit it at some point.
    """
    from benchbuild.utils import schema as s

    session = s.Session()
    run = s.Run(command=str(cmd),
                project_name=project.name,
                experiment_name=exp,
                run_group=str(grp),
                experiment_group=str(CFG["experiment_id"]))
    session.add(run)
    session.flush()

    return (run, session)
Exemple #3
0
def create_run_group(prj):
    """
    Create a new 'run_group' in the database.

    This creates a new transaction in the database and creates a new run_group
    within this transaction. Afterwards we return both the transaction as well
    as the run_group itself. The user is responsible for committing it when the
    time comes.

    Args:
        prj - The project for which we open the run_group.

    Returns:
        A tuple (group, session) containing both the newly created run_group and
        the transaction object.
    """
    from benchbuild.utils import schema

    session = schema.Session()
    group = schema.RunGroup(id=prj.run_uuid,
                            project=prj.name,
                            experiment=str(CFG["experiment_id"]))
    session.add(group)
    session.flush()

    return (group, session)
Exemple #4
0
    def __call__(self, cc, *args, project=None, **kwargs):
        if project:
            self.project = project

        original_command = cc[args]
        clang = cc["-Qunused-arguments"]
        clang = clang[args]
        clang = clang[project.cflags]
        clang = clang[project.ldflags]
        clang = clang["-mllvm", "-stats"]

        run_config = self.config
        session = schema.Session()
        with u_run.track_execution(clang, self.project, self.experiment) as _run:
            run_info = _run()
            if run_config is not None:
                db.persist_config(run_info.db_run, session, run_config)

            if not run_info.has_failed:
                stats = []
                cls = ExtractCompileStats
                for stat in cls.get_compilestats(run_info.stderr):
                    compile_s = CompileStat()
                    compile_s.name = stat["desc"].rstrip()
                    compile_s.component = stat["component"].rstrip()
                    compile_s.value = stat["value"]
                    stats.append(compile_s)

                components = settings.CFG["cs"]["components"].value
                names = settings.CFG["cs"]["names"].value

                stats = [s for s in stats if str(s.component) in components] \
                    if components is not None else stats
                stats = [s for s in stats if str(s.name) in names] \
                    if names is not None else stats

                if stats:
                    for stat in stats:
                        LOG.info(" [%s] %s = %s", stat.component, stat.name,
                                 stat.value)
                    db.persist_compilestats(run_info.db_run, run_info.session,
                                            stats)
                else:
                    LOG.info("No compilestats left, after filtering.")
                    LOG.warning("  Components: %s", components)
                    LOG.warning("  Names:      %s", names)
            else:
                with u_run.track_execution(original_command, self.project,
                                         self.experiment, **kwargs) as _run:
                    LOG.warning("Fallback to: %s", str(original_command))
                    run_info = _run()

        ret = self.call_next(cc, *args, **kwargs)
        ret.append(run_info)
        session.commit()
        return ret
Exemple #5
0
def render_experiment(experiment):
    template = get_template()
    sess = schema.Session()

    return template.render(name=experiment.name,
                           description=experiment.description,
                           start_date=experiment.begin,
                           end_date=experiment.end,
                           id=experiment.id,
                           num_completed_runs=get_completed_runs(
                               sess, experiment),
                           num_failed_runs=get_failed_runs(sess, experiment))
Exemple #6
0
    def __init__(self, exp_name, exp_ids, out_path):
        import benchbuild.utils.schema as schema
        import uuid
        self.out_path = out_path
        self.session = schema.Session()
        if not exp_ids:
            exp_ids = load_experiment_ids_from_names(
                self.session,
                [exp for exp in self.SUPPORTED_EXPERIMENTS if exp == exp_name])
            exp_ids = [v[0] for v in exp_ids]
        else:
            exp_ids = [uuid.UUID(v) for v in exp_ids]

        self.experiment_ids = exp_ids
Exemple #7
0
def refresh_root_window(root):
    session = schema.Session()
    all_db_exps = experiments_from_db(session)
    menu_top = SubMenu('Experiments in database', [
        SubMenu("{name} - {desc}".format(
            name=elem.name, desc=elem.description), [
                urwid.Text(render_experiment(elem)),
                urwid.Divider(),
                Choice("Delete", top=root, payload=elem)
            ],
                top=root) for elem in all_db_exps
    ],
                       top=root)
    return menu_top.menu
Exemple #8
0
        def handle_timing(run_infos):
            """Takes care of the formating for the timing statistics."""
            from benchbuild.utils import schema as s

            session = s.Session()
            for run_info in run_infos:
                if may_wrap:
                    timings = fetch_time_output(time_tag,
                                                time_tag + "{:g}-{:g}-{:g}",
                                                run_info.stderr.split("\n"))
                    if timings:
                        persist_time(run_info.db_run, session, timings)
                    else:
                        LOG.warning("No timing information found.")
            session.commit()
            return run_infos
Exemple #9
0
 def handle_timing(run_infos):
     """Takes care of the formating for the timing statistics."""
     session = schema.Session()
     for run_info in run_infos:
         LOG.debug("Persisting time for '%s'", run_info)
         if may_wrap:
             timings = time.fetch_time_output(
                 time_tag, time_tag + "{:g}-{:g}-{:g}-{:g}",
                 run_info.stderr.split("\n"))
             if timings:
                 mse_persist_time_and_memory(run_info.db_run, session,
                                             timings)
             else:
                 LOG.warning("No timing information found.")
     session.commit()
     return run_infos
Exemple #10
0
    def item_chosen(self, _):
        session = schema.Session()
        session.delete(self.payload)

        def confirm(_):
            session.commit()
            self.top.clear()
            self.top.open_box(refresh_root_window(self.top))

        def cancel(_):
            session.rollback()
            self.top.clear()
            self.top.open_box(refresh_root_window(self.top))

        response = urwid.Text([
            'Really delete: {name} {desc}?'.format(
                name=self.payload.name, desc=self.payload.description), '\n'
        ])
        done = MenuButton('Ok', confirm)
        cancel = MenuButton('Cancel', cancel)
        response_box = urwid.Filler(urwid.Pile([response, done, cancel]))
        self.top.open_box(urwid.AttrMap(response_box, 'options'))
        def handle_profileScopDetection(run_infos):
            """
            Takes care of writing the information of profileScopDetection into
            the database.
            """
            from benchbuild.utils import schema as s
            import parse

            instrumentedScopPattern \
                = parse.compile("{} [info] Instrumented SCoPs: {:d}")
            nonInstrumentedScopPattern \
                = parse.compile("{} [info] Not instrumented SCoPs: {:d}")
            invalidReasonPattern \
                = parse.compile("{} [info] {} is invalid because of: {}")
            instrumentedParentPattern \
                = parse.compile("{} [info] Instrumented parents: {:d}")
            nonInstrumentedParentPattern \
                = parse.compile("{} [info] Not instrumented parents: {:d}")

            instrumentedScopCounter = 0
            nonInstrumentedScopCounter = 0
            invalidReasons = {}
            instrumentedParentCounter = 0
            nonInstrumentedParentCounter = 0

            paths = glob.glob(
                os.path.join(os.path.realpath(os.path.curdir),
                             "profileScops.log"))

            def handle_data(line):
                nonlocal instrumentedScopCounter
                nonlocal nonInstrumentedScopCounter
                nonlocal invalidReasons
                nonlocal instrumentedParentCounter
                nonlocal nonInstrumentedParentCounter

                data = instrumentedScopPattern.parse(line)
                if data is not None:
                    instrumentedScopCounter += data[1]
                    return

                data = nonInstrumentedScopPattern.parse(line)
                if data is not None:
                    nonInstrumentedScopCounter += data[1]
                    return

                data = invalidReasonPattern.parse(line)
                if data is not None:
                    reason = data[2]
                    if reason not in invalidReasons:
                        invalidReasons[reason] = 0
                    invalidReasons[reason] += 1
                    return

                data = instrumentedParentPattern.parse(line)
                if data is not None:
                    instrumentedParentCounter += data[1]
                    return

                data = nonInstrumentedParentPattern.parse(line)
                if data is not None:
                    nonInstrumentedParentCounter += data[1]
                    return

            for path in paths:
                with open(path, 'r') as file_hdl:
                    for line in file_hdl:
                        handle_data(line)

            session = s.Session()
            for reason in invalidReasons:
                persist_scopinfos(run_infos[0], reason, invalidReasons[reason])

            session.commit()
            return run_infos
Exemple #12
0
    def evaluate(self, run_info: run.RunInfo):
        payload = run_info.payload
        run_id = run_info.db_run.id
        config = payload['config']
        merged = self.merge_results(payload)

        def yield_in_region(regions, merged_metrics):
            for value in merged_metrics.values():
                if value['region'] in regions:
                    yield value['event']

        def yield_not_in_region(regions, merged_metrics):
            for value in merged_metrics.values():
                if value['region'] not in regions:
                    yield value['event']

        def yield_not_in_region_rw(regions, merged_metrics):
            for value in merged_metrics.values():
                if value['region'] not in regions:
                    yield (value['region'], value['event'])

        def create_results(session,
                           merged_metrics,
                           name,
                           *regions,
                           subset_fn=yield_in_region,
                           aggr_fn=sum):
            cfg = config.get('name', None)
            value = aggr_fn(subset_fn(regions, merged_metrics))
            session.add(
                PJ_Result(config=cfg, name=name, run_id=run_id, value=value))

        def create_rw_results(session,
                              merged_metrics,
                              name,
                              *regions,
                              subset_fn=yield_not_in_region_rw):
            cfg = config.get('name', None)
            for region, value in subset_fn(regions, merged_metrics):
                session.add(
                    PJ_Result_Region(config=cfg,
                                     name=name,
                                     run_id=run_id,
                                     value=value,
                                     region_name=region))

        meta_regions = [
            'START', 'CODEGEN', 'CACHE_HIT', 'VARIANTS', 'BLOCKED', 'REQUESTS'
        ]
        session = schema.Session()
        create_results(session, merged, 't_all', 'START')
        create_results(session, merged, 't_codegen', 'CODEGEN')
        create_results(session, merged, 'n_cachehits', 'CACHE_HIT')
        create_results(session, merged, 'n_variants', 'VARIANTS')
        create_results(session, merged, 'n_blocked', 'BLOCKED')
        create_results(session, merged, 'n_requests', 'REQUESTS')
        create_results(session,
                       merged,
                       't_scops',
                       *meta_regions,
                       subset_fn=yield_not_in_region)
        create_rw_results(session,
                          merged,
                          't_region',
                          *meta_regions,
                          subset_fn=yield_not_in_region_rw)

        session.commit()