コード例 #1
0
def read_dbson(x):
    ''' Reads dbson file'''
    m = None
    try:
        with open (os.path.join(META_DIR, x), "rb") as f:
            m = ubjson.load (f)
    except:
        print (" recording exception with dbson=",x)
    return m
コード例 #2
0
ファイル: game.py プロジェクト: vladfi1/py-slippi
    def _parse_file(self, path):
        """Parses the .slp file at `path`. Called automatically by our constructor."""

        with open(path, 'rb') as f:
            json = ubjson.load(f)

        self.metadata = self.Metadata._parse(json['metadata'])

        stream = io.BytesIO(json['raw'])
        payload_sizes = self._parse_event_payloads(stream)

        try:
            while True:
                event = self._parse_event(stream, payload_sizes)
                if isinstance(event, evt.Frame.Event):
                    frame_index = event.id.frame - FIRST_FRAME_INDEX

                    if not self._out_of_order and len(
                            self.frames) != frame_index and len(
                                self.frames) != frame_index + 1:
                        self._out_of_order = True
                        print('out-of-order frame: %d' % event.id.frame,
                              file=sys.stderr)

                    while (len(self.frames) <= frame_index):
                        if self.frames:
                            self.frames[-1]._finalize()
                        self.frames.append(evt.Frame(event.id.frame))

                    port = self.frames[frame_index].ports[event.id.port]
                    if not port:
                        port = evt.Frame.Port()
                        self.frames[frame_index].ports[event.id.port] = port

                    if event.id.is_follower:
                        if port.follower is None:
                            port.follower = evt.Frame.Port.Data()
                        data = port.follower
                    else:
                        data = port.leader

                    if isinstance(event.data, evt.Frame.Port.Data.Pre):
                        data.pre = event.data
                    elif isinstance(event.data, evt.Frame.Port.Data.Post):
                        data.post = event.data
                    else:
                        raise Exception('unknown frame data type: %s' %
                                        event.data)
                elif isinstance(event, evt.Start):
                    self.start = event
                elif isinstance(event, evt.End):
                    self.end = event
                else:
                    raise Exception('unexpected event: %s' % event)
        except EofException:
            pass
コード例 #3
0
ファイル: log.py プロジェクト: mfkiwl/basalt-mirror
    def _load(self, path):

        if path.endswith("ubjson"):
            with open(path, 'rb') as f:
                data = ubjson.load(f)
        else:
            with open(path, 'r') as f:
                data = json.load(f)

        if isinstance(data, Mapping):
            data = self._convert(data)

        return munchify(data)
コード例 #4
0
def json_model(model_path: str, parameters: dict) -> dict:
    X = np.random.random((10, 3))
    y = np.random.randint(2, size=(10, ))

    dm1 = xgb.DMatrix(X, y)

    bst = xgb.train(parameters, dm1)
    bst.save_model(model_path)
    if model_path.endswith("ubj"):
        import ubjson
        with open(model_path, "rb") as ubjfd:
            model = ubjson.load(ubjfd)
    else:
        with open(model_path, 'r') as fd:
            model = json.load(fd)

    return model
コード例 #5
0
    def run_model_json_io(self, parameters: dict, ext: str) -> None:
        if ext == "ubj" and tm.no_ubjson()["condition"]:
            pytest.skip(tm.no_ubjson()["reason"])

        loc = locale.getpreferredencoding(False)
        model_path = 'test_model_json_io.' + ext
        j_model = json_model(model_path, parameters)
        assert isinstance(j_model['learner'], dict)

        bst = xgb.Booster(model_file=model_path)

        bst.save_model(fname=model_path)
        if ext == "ubj":
            import ubjson
            with open(model_path, "rb") as ubjfd:
                j_model = ubjson.load(ubjfd)
        else:
            with open(model_path, 'r') as fd:
                j_model = json.load(fd)

        assert isinstance(j_model['learner'], dict)

        os.remove(model_path)
        assert locale.getpreferredencoding(False) == loc

        json_raw = bst.save_raw(raw_format="json")
        from_jraw = xgb.Booster()
        from_jraw.load_model(json_raw)

        ubj_raw = bst.save_raw(raw_format="ubj")
        from_ubjraw = xgb.Booster()
        from_ubjraw.load_model(ubj_raw)

        old_from_json = from_jraw.save_raw(raw_format="deprecated")
        old_from_ubj = from_ubjraw.save_raw(raw_format="deprecated")

        assert old_from_json == old_from_ubj

        raw_json = bst.save_raw(raw_format="json")
        pretty = json.dumps(json.loads(raw_json), indent=2) + "\n\n"
        bst.load_model(bytearray(pretty, encoding="ascii"))

        old_from_json = from_jraw.save_raw(raw_format="deprecated")
        old_from_ubj = from_ubjraw.save_raw(raw_format="deprecated")

        assert old_from_json == old_from_ubj
コード例 #6
0
ファイル: jfile.py プロジェクト: Biophotonics/pyjdata
def loadb(fname, opt={}, **kwargs):
    """@brief Loading a binary (UBJSON) JData file and decode it to native Python data

    @param[in] fname: a binary (UBJSON based) JData file name
    @param[in] opt: options, if opt['decode']=True or 1 (default), call jdata.decode() before saving
    """
    opt.setdefault('decode',True)

    try:
        import ubjson
    except ImportError:
        raise ImportError('To read/write binary JData files, you must install the py-ubjson module by "pip install py-ubjson"')
    else:
        with open(fname, "r") as fid:
            data=ubjson.load(fid,**kwargs);
        if(opt['decode']):
            data=jd.decode(data,opt);
        return data
コード例 #7
0
def _parse(stream, handlers):
    expect_bytes(b'{U\x03raw[$U#l', stream)
    (length, ) = unpack('l', stream)  # currently unused

    payload_sizes = _parse_event_payloads(stream)
    _parse_events(stream, payload_sizes, handlers)

    expect_bytes(b'U\x08metadata', stream)

    json = ubjson.load(stream)
    raw_handler = handlers.get(ParseEvent.METADATA_RAW)
    if raw_handler:
        raw_handler(json)

    metadata = Metadata._parse(json)
    handler = handlers.get(ParseEvent.METADATA)
    if handler:
        handler(metadata)

    expect_bytes(b'}', stream)
コード例 #8
0
ファイル: io.py プロジェクト: felixmusil/rascal_benchmarks
def fromfile(fn):
    _, extension = os.path.splitext(fn)
    if extension == '.json':
        with open(fn, 'r') as f:
            data = json.load(f)
    elif extension == '.ubjson':
        with open(fn, 'rb') as f:
            data = ubjson.load(f)

    frames = []
    for idx in data['ids']:
        ff = data['{}'.format(idx)]
        frame = ase.Atoms(positions=ff['positions'],
                          cell=ff['cell'],
                          numbers=ff['numbers'],
                          pbc=ff['pbc'])
        if 'info' in ff:
            frame.info = ff['info']
        if 'arrays' in ff:
            for k, v in ff['arrays'].items():
                frame.set_array(k, np.array(v))
        frames.append(frame)
    return frames
コード例 #9
0
def _parse(stream, handlers):
    # For efficiency, don't send the whole file through ubjson.
    # Instead, assume `raw` is the first element. This is brittle and
    # ugly, but it's what the official parser does so it should be OK.
    expect_bytes(b'{U\x03raw[$U#l', stream)
    (length,) = unpack('l', stream)

    (bytes_read, payload_sizes) = _parse_event_payloads(stream)
    _parse_events(stream, payload_sizes, length - bytes_read, handlers)

    expect_bytes(b'U\x08metadata', stream)

    json = ubjson.load(stream)
    raw_handler = handlers.get(ParseEvent.METADATA_RAW)
    if raw_handler:
        raw_handler(json)

    metadata = Metadata._parse(json)
    handler = handlers.get(ParseEvent.METADATA)
    if handler:
        handler(metadata)

    expect_bytes(b'}', stream)
コード例 #10
0
ファイル: __init__.py プロジェクト: musec/py-cdg
def save(graph, output, filename):
    nodes = graph.nodes
    roots = ((k, v) for (k, v) in graph.nodes.items() if 'parent' not in v)

    functions = collections.defaultdict(new_empty_function)
    for (fn_name, fn_attrs) in roots:
        if 'children' not in fn_attrs:
            continue

        fn = functions[fn_name]

        # Blocks have children; anything else must be an argument.
        child_names = set(fn_attrs['children'])
        blocks = {
            n
            for n in child_names if n in nodes  #and 'children' in nodes[n]
        }
        args = child_names.difference(blocks)

        for block_name in blocks:
            block_attrs = nodes[block_name]
            block = fn['blocks'][block_name] = dict([
                (k, v) for (k, v) in nodes[block_name].items()
                if k not in ('parent', 'children')
            ])

            for child_name in block_attrs['children']:
                child_attrs = nodes[child_name] if child_name in nodes else {}
                if 'parent' in child_attrs:
                    child_attrs.pop('parent')
                assert 'children' not in child_attrs

                block[child_name] = child_attrs

        for arg_name in args:
            #if arg_name not in nodes:
            #    continue

            arg_attrs = nodes[arg_name]
            arg_attrs.pop('parent')
            assert 'children' not in arg_attrs

            fn['arguments'][arg_name] = dict(arg_attrs)

        functions[fn_name] = fn

    for (src, dest, data) in graph.edges(data=True):
        #if src not in nodes or dest not in nodes:
        #    continue

        (fn_name, src_name) = src.split('::', 1)
        kind = data['kind']

        data = {
            'from': src,
            'to': dest,
            'kind': EdgeKind.to_str(kind),
        }

        if kind == EdgeKind.Call:
            functions[fn_name]['calls'].append(data)
        else:
            functions[fn_name]['flows'].append(data)

    if filename.endswith('.cdg'):
        import ubjson
        cg = ubjson.load(stream)

    elif filename.endswith('.json'):
        import json
        cg = json.load(stream)

    elif filename.endswith('.yaml'):
        import yaml

        try:
            from yaml import CLoader as Loader
        except ImportError:
            from yaml import Loader

        cg = yaml.load(stream, Loader=Loader)

    import ubjson
    ubjson.dump({'functions': functions}, output)
コード例 #11
0
ファイル: learn.py プロジェクト: satojk/fishnet
def deserialize_data(src):
    with gzip.open(src, "rb") as f:
        return ubjson.load(f)
コード例 #12
0
ファイル: analyze-kconfig.py プロジェクト: derf/dfatool
def main():
    parser = argparse.ArgumentParser(
        formatter_class=argparse.RawDescriptionHelpFormatter,
        description=__doc__)
    dfatool.cli.add_standard_arguments(parser)
    parser.add_argument(
        "--boolean-parameters",
        action="store_true",
        help=
        "Use boolean (not categorial) parameters when building the NFP model",
    )
    parser.add_argument(
        "--show-failing-symbols",
        action="store_true",
        help=
        "Show Kconfig symbols related to build failures. Must be used with an experiment result directory.",
    )
    parser.add_argument(
        "--show-nop-symbols",
        action="store_true",
        help=
        "Show Kconfig symbols which are only present in a single configuration. Must be used with an experiment result directory.",
    )
    parser.add_argument(
        "--force-tree",
        action="store_true",
        help=
        "Build decision tree without checking for analytic functions first. Use this for large kconfig files.",
    )
    parser.add_argument(
        "--max-std",
        type=str,
        metavar="VALUE_OR_MAP",
        help=
        "Specify desired maximum standard deviation for decision tree generation, either as float (global) or <key>/<attribute>=<value>[,<key>/<attribute>=<value>,...]",
    )
    parser.add_argument(
        "--export-observations",
        type=str,
        metavar="FILE.json.xz",
        help=
        "Export observations (intermediate and generic benchmark data representation) to FILE",
    )
    parser.add_argument(
        "--export-observations-only",
        action="store_true",
        help="Exit after exporting observations",
    )
    parser.add_argument(
        "--export-webconf",
        type=str,
        help="Export kconfig-webconf NFP model to file",
        metavar="FILE",
    )
    parser.add_argument(
        "--config",
        type=str,
        help="Show model results for symbols in .config file",
        metavar="FILE",
    )
    parser.add_argument(
        "--sample-size",
        type=int,
        help="Restrict model generation to N random samples",
        metavar="N",
    )
    parser.add_argument(
        "--show-model",
        choices=["static", "paramdetection", "param", "all", "tex", "html"],
        action="append",
        default=list(),
        help=
        "static: show static model values as well as parameter detection heuristic.\n"
        "paramdetection: show stddev of static/lut/fitted model\n"
        "param: show parameterized model functions and regression variable values\n"
        "all: all of the above\n"
        "tex: print tex/pgfplots-compatible model data on stdout\n"
        "html: print model and quality data as HTML table on stdout",
    )
    parser.add_argument(
        "--show-quality",
        choices=["table", "summary", "all", "tex", "html"],
        action="append",
        default=list(),
        help=
        "table: show static/fitted/lut SMAPE and MAE for each name and attribute.\n"
        "summary: show static/fitted/lut SMAPE and MAE for each attribute, averaged over all states/transitions.\n"
        "all: all of the above.\n"
        "tex: print tex/pgfplots-compatible model quality data on stdout.",
    )
    parser.add_argument("kconfig_path", type=str, help="Path to Kconfig file")
    parser.add_argument(
        "model",
        type=str,
        help=
        "Path to experiment results directory or observations.json.xz file",
    )

    args = parser.parse_args()

    if args.log_level:
        numeric_level = getattr(logging, args.log_level.upper(), None)
        if not isinstance(numeric_level, int):
            print(f"Invalid log level: {args.log_level}", file=sys.stderr)
            sys.exit(1)
        logging.basicConfig(level=numeric_level)

    if args.export_dref:
        dref = dict()

    if os.path.isdir(args.model):
        attributes = KConfigAttributes(args.kconfig_path, args.model)
        if args.export_dref:
            dref.update(attributes.to_dref())

        if args.show_failing_symbols:
            show_failing_symbols(attributes)
        if args.show_nop_symbols:
            show_nop_symbols(attributes)

        observations = list()

        for param, attr in attributes.data:
            for key, value in attr.items():
                observations.append({
                    "name": key,
                    "param": param,
                    "attribute": value,
                })

        if args.sample_size:
            shuffled_data_indices = np.random.permutation(
                np.arange(len(observations)))
            sample_indices = shuffled_data_indices[:args.sample_size]
            new_observations = list()
            for sample_index in sample_indices:
                new_observations.append(observations[sample_index])
            observations = new_observations

        if args.export_observations:
            import lzma

            print(
                f"Exporting {len(observations)} observations to {args.export_observations}"
            )
            with lzma.open(args.export_observations, "wt") as f:
                json.dump(observations, f)
            if args.export_observations_only:
                return
    else:
        # show-failing-symbols, show-nop-symbols, DFATOOL_KCONF_WITH_CHOICE_NODES, DFATOOL_KCONF_IGNORE_NUMERIC, and DFATOOL_KCONF_IGNORE_STRING have no effect
        # in this branch.

        if args.model.endswith("xz"):
            import lzma

            with lzma.open(args.model, "rt") as f:
                observations = json.load(f)
        elif args.model.endswith("ubjson"):
            import ubjson

            with open(args.model, "rb") as f:
                observations = ubjson.load(f)
        else:
            with open(args.model, "r") as f:
                observations = json.load(f)

        if bool(int(os.getenv("DFATOOL_KCONF_IGNORE_STRING", 0))):
            attributes = KConfigAttributes(args.kconfig_path, None)
            for observation in observations:
                to_remove = list()
                for param in observation["param"].keys():
                    if param not in attributes.symbol_names:
                        to_remove.append(param)
                for param in to_remove:
                    observation["param"].pop(param)

    if args.boolean_parameters:
        dfatool.utils.observations_enum_to_bool(observations, kconfig=True)

    if args.param_shift:
        param_shift = dfatool.cli.parse_param_shift(args.param_shift)
        dfatool.utils.shift_param_in_observations(observations, param_shift)

    by_name, parameter_names = dfatool.utils.observations_to_by_name(
        observations)

    # Release memory
    del observations

    if args.filter_param:
        args.filter_param = list(
            map(lambda x: x.split("="), args.filter_param.split(",")))
        dfatool.utils.filter_aggregate_by_param(by_name, parameter_names,
                                                args.filter_param)

    if args.max_std:
        max_std = dict()
        if "=" in args.max_std:
            for kkv in args.max_std.split(","):
                kk, v = kkv.split("=")
                key, attr = kk.split("/")
                if key not in max_std:
                    max_std[key] = dict()
                max_std[key][attr] = float(v)
        else:
            for key in by_name.keys():
                max_std[key] = dict()
                for attr in by_name[key]["attributes"]:
                    max_std[key][attr] = float(args.max_std)
    else:
        max_std = None

    constructor_start = time.time()
    model = AnalyticModel(
        by_name,
        parameter_names,
        force_tree=args.force_tree,
        max_std=max_std,
    )
    constructor_duration = time.time() - constructor_start

    if not model.names:
        logging.error(
            f"Model contains no names. Is --filter-param={args.filter_param} set too restrictive?"
        )
        sys.exit(1)

    if args.info:
        dfatool.cli.print_info_by_name(model, by_name)

    if args.export_pgf_unparam:
        dfatool.cli.export_pgf_unparam(model, args.export_pgf_unparam)

    if args.cross_validate:
        xv_method, xv_count = args.cross_validate.split(":")
        xv_count = int(xv_count)
        xv = CrossValidator(
            AnalyticModel,
            by_name,
            parameter_names,
            force_tree=args.force_tree,
            max_std=max_std,
        )
        xv.parameter_aware = args.parameter_aware_cross_validation
    else:
        xv_method = None

    static_model = model.get_static()
    try:
        lut_model = model.get_param_lut()
    except RuntimeError as e:
        if args.force_tree:
            # this is to be expected
            logging.debug(f"Skipping LUT model: {e}")
        else:
            logging.warning(f"Skipping LUT model: {e}")
        lut_model = None

    fit_start_time = time.time()
    param_model, param_info = model.get_fitted()
    fit_duration = time.time() - fit_start_time

    if xv_method == "montecarlo":
        static_quality, _ = xv.montecarlo(lambda m: m.get_static(), xv_count)
        if lut_model:
            lut_quality, _ = xv.montecarlo(
                lambda m: m.get_param_lut(fallback=True), xv_count)
        else:
            lut_quality = None
        xv.export_filename = args.export_xv
        analytic_quality, xv_analytic_models = xv.montecarlo(
            lambda m: m.get_fitted()[0], xv_count)
    elif xv_method == "kfold":
        static_quality, _ = xv.kfold(lambda m: m.get_static(), xv_count)
        if lut_model:
            lut_quality, _ = xv.kfold(lambda m: m.get_param_lut(fallback=True),
                                      xv_count)
        else:
            lut_quality = None
        xv.export_filename = args.export_xv
        analytic_quality, xv_analytic_models = xv.kfold(
            lambda m: m.get_fitted()[0], xv_count)
    else:
        static_quality = model.assess(static_model)
        if args.export_raw_predictions:
            analytic_quality, raw_results = model.assess(param_model,
                                                         return_raw=True)
            with open(args.export_raw_predictions, "w") as f:
                json.dump(raw_results, f, cls=dfatool.utils.NpEncoder)
        else:
            analytic_quality = model.assess(param_model)
        xv_analytic_models = [model]
        if lut_model:
            lut_quality = model.assess(lut_model)
        else:
            lut_quality = None

    if "static" in args.show_model or "all" in args.show_model:
        print("--- static model ---")
        for name in model.names:
            for attribute in model.attributes(name):
                dfatool.cli.print_static(model, static_model, name, attribute)

    if "param" in args.show_model or "all" in args.show_model:
        print("--- param model ---")
        for name in model.names:
            for attribute in model.attributes(name):
                info = param_info(name, attribute)
                if type(info) is dfatool.cli.AnalyticFunction:
                    dfatool.cli.print_analyticinfo(
                        f"{name:20s} {attribute:15s}", info)
                elif type(info) is dfatool.cli.FOLFunction:
                    dfatool.cli.print_analyticinfo(
                        f"{name:20s} {attribute:15s}", info)
                elif type(info) is dfatool.cli.SplitFunction:
                    dfatool.cli.print_splitinfo(model.parameters, info,
                                                f"{name:20s} {attribute:15s}")

    if "table" in args.show_quality or "all" in args.show_quality:
        dfatool.cli.model_quality_table(
            ["static", "parameterized", "LUT"],
            [static_quality, analytic_quality, lut_quality],
            [None, param_info, None],
        )

    print("Model Error on Training Data:")
    for name in sorted(model.names):
        for attribute, error in sorted(analytic_quality[name].items(),
                                       key=lambda kv: kv[0]):
            mae = error["mae"]
            smape = error["smape"]
            print(
                f"{name:15s} {attribute:20s}  ± {mae:10.2}  /  {smape:5.1f}%")

    if args.show_model_size:
        dfatool.cli.print_model_size(model)

    if args.export_webconf:
        attributes = KConfigAttributes(args.kconfig_path, None)
        try:
            with open(f"{attributes.kconfig_root}/nfpkeys.json", "r") as f:
                nfpkeys = json.load(f)
        except FileNotFoundError:
            logging.error(
                f"{attributes.kconfig_root}/nfpkeys.json is missing, webconf model will be incomplete"
            )
            nfpkeys = None
        kconfig_hasher = hashlib.sha256()
        with open(args.kconfig_path, "rb") as f:
            kconfig_data = f.read()
            while len(kconfig_data) > 0:
                kconfig_hasher.update(kconfig_data)
                kconfig_data = f.read()
        kconfig_hash = str(kconfig_hasher.hexdigest())
        complete_json_model = model.to_json(with_param_name=True,
                                            param_names=parameter_names)
        json_model = dict()
        for name, attribute_data in complete_json_model["name"].items():
            for attribute, data in attribute_data.items():
                json_model[attribute] = data.copy()
                if nfpkeys:
                    json_model[attribute].update(nfpkeys[name][attribute])
        out_model = {
            "model": json_model,
            "modelType": "dfatool-kconfig",
            "project": "tbd",
            "kconfigHash": kconfig_hash,
            "symbols": attributes.symbol_names,
            "choices": attributes.choice_names,
        }
        with open(args.export_webconf, "w") as f:
            json.dump(out_model,
                      f,
                      sort_keys=True,
                      cls=dfatool.utils.NpEncoder)

    if args.export_dot:
        dfatool.cli.export_dot(model, args.export_dot)

    if args.export_dref:
        dref.update(
            model.to_dref(
                static_quality,
                lut_quality,
                analytic_quality,
                xv_models=xv_analytic_models,
            ))
        dref["constructor duration"] = (constructor_duration, r"\second")
        dref["regression duration"] = (fit_duration, r"\second")
        dfatool.cli.export_dataref(args.export_dref, dref)

    if args.config:
        kconf = kconfiglib.Kconfig(args.kconfig_path)
        kconf.load_config(args.config)
        print(f"Model result for .config: {model.value_for_config(kconf)}")

        for symbol in model.symbols:
            kconf2 = kconfiglib.Kconfig(args.kconfig_path)
            kconf2.load_config(args.config)
            kconf_sym = kconf2.syms[symbol]
            if kconf_sym.tri_value == 0 and 2 in kconf_sym.assignable:
                kconf_sym.set_value(2)
            elif kconf_sym.tri_value == 2 and 0 in kconf_sym.assignable:
                kconf_sym.set_value(0)
            else:
                continue

            # specific to multipass:
            # Do not suggest changes which affect the application
            skip = False
            num_changes = 0
            changed_symbols = list()
            for i, csymbol in enumerate(model.symbols):
                if kconf.syms[csymbol].tri_value != kconf2.syms[
                        csymbol].tri_value:
                    num_changes += 1
                    changed_symbols.append(csymbol)
                    if (csymbol.startswith("app_")
                            and kconf.syms[csymbol].tri_value !=
                            kconf2.syms[csymbol].tri_value):
                        skip = True
                        break
            if skip:
                continue

            try:
                model_diff = model.value_for_config(
                    kconf2) - model.value_for_config(kconf)
                if kconf_sym.choice:
                    print(
                        f"Setting {kconf_sym.choice.name} to {kconf_sym.name} changes {num_changes:2d} symbols, model change: {model_diff:+5.0f}"
                    )
                else:
                    print(
                        f"Setting {symbol} to {kconf_sym.str_value} changes {num_changes:2d} symbols, model change: {model_diff:+5.0f}"
                    )
            except TypeError:
                if kconf_sym.choice:
                    print(
                        f"Setting {kconf_sym.choice.name} to {kconf_sym.name} changes {num_changes:2d} symbols, model is undefined"
                    )
                else:
                    print(
                        f"Setting {symbol} to {kconf_sym.str_value} changes {num_changes:2d} symbols, model is undefined"
                    )
            for changed_symbol in changed_symbols:
                print(
                    f"    {changed_symbol:30s} -> {kconf2.syms[changed_symbol].str_value}"
                )
コード例 #13
0
ファイル: quicklook.py プロジェクト: shiningsurya/vlite-fast
def read_meta(x):
    """ Reads meta file """
    with open(x, "rb") as f:
        m = ubjson.load(f)
    return m
コード例 #14
0
def read_meta (x):
    ''' Reads meta file'''
    with open (x, "rb") as f:
        m = ubjson.load (f)
    return m
コード例 #15
0
ファイル: __init__.py プロジェクト: musec/py-cdg
def load(stream, filename):
    if filename.endswith('.cdg'):
        import ubjson
        cg = ubjson.load(stream)

    elif filename.endswith('.json'):
        import json
        cg = json.load(stream)

    elif filename.endswith('.yaml'):
        import yaml

        try:
            from yaml import CLoader as Loader
        except ImportError:
            from yaml import Loader

        cg = yaml.load(stream, Loader=Loader)

    else:
        raise ValueError('Unhandled file type: %s' % filename)

    graph = create(filename)

    fns = cg['functions']
    for (fn_name, props) in fns.items():
        graph.add_node(fn_name, children=set())

        if 'attributes' in props:
            graph.node[fn_name].update(props['attributes'])

        if 'arguments' in props:
            arguments = props['arguments']
            if arguments:
                for (name, attrs) in props['arguments'].items():
                    graph.add_node(name, parent=fn_name, **attrs)
                    graph.nodes[fn_name]['children'].add(name)

        if 'blocks' in props:
            for (block_name, values) in props['blocks'].items():
                graph.add_node(block_name, parent=fn_name, children=set())
                graph.nodes[fn_name]['children'].add(block_name)
                graph.nodes[block_name]['children'] = set(values)

                for (value_name, value_attrs) in values.items():
                    graph.add_node(value_name,
                                   parent=block_name,
                                   **value_attrs)

        if 'calls' in props:
            calls = props['calls']
            if calls:
                for call in calls:
                    source = call['from']
                    dest = call['to']

                    target = fns[dest] if dest in fns else None
                    if target and 'arguments' in target:
                        for arg in target['arguments']:
                            graph.add_edge(source, arg, kind=EdgeKind.Call)

                    else:
                        graph.add_edge(source, dest, kind=EdgeKind.Call)

        if 'flows' in props:
            flows = props['flows']
            if flows:
                for flow in flows:
                    source = flow['from']
                    dest = flow['to']
                    kind = EdgeKind.from_str(flow['kind'])

                    graph.add_edge(source, dest, kind=kind)

    return graph
コード例 #16
0
def decode_file(replay):
    # decodes ubjson file to json
    decoded = ubjson.load(replay)
    return decoded
コード例 #17
0
ファイル: data_parameters.py プロジェクト: derf/dfatool
    def __init__(
        self,
        logfile,
        cpu_conf=None,
        cpu_conf_str=None,
        radio_conf=None,
        radio_conf_str=None,
    ):
        """
        Load and enrich raw protobench log data.

        The enriched data can be accessed via the .aggregate class member,
        see the class documentation for details.
        """
        self.cpu = None
        self.radio = None
        with open(logfile, "rb") as f:
            self.data = ubjson.load(f)
        self.libraries = set()
        self.architectures = set()
        self.aggregate = dict()

        for arch_lib in self.data.keys():
            arch, lib, libopts = arch_lib.split(":")
            library = lib + ":" + libopts
            for benchmark in self.data[arch_lib].keys():
                for benchmark_item in self.data[arch_lib][benchmark].keys():
                    subv = self.data[arch_lib][benchmark][benchmark_item]
                    for aggregate_label, data_label, getter in Protolog.datamap:
                        try:
                            self.add_datapoint(
                                arch,
                                library,
                                (benchmark, benchmark_item),
                                subv,
                                aggregate_label,
                                data_label,
                                getter,
                            )
                        except KeyError:
                            pass
                        except TypeError as e:
                            logger.error(
                                "TypeError in {} {} {} {}: {} -> {}".format(
                                    arch_lib,
                                    benchmark,
                                    benchmark_item,
                                    aggregate_label,
                                    subv[data_label]["v"],
                                    str(e),
                                ))
                            pass
                    try:
                        codegen = codegen_for_lib(lib, libopts.split(","),
                                                  subv["data"])
                        if codegen.max_serialized_bytes != None:
                            self.add_datapoint(
                                arch,
                                library,
                                (benchmark, benchmark_item),
                                subv,
                                "buffer_size",
                                data_label,
                                lambda x: codegen.max_serialized_bytes,
                            )
                        else:
                            self.add_datapoint(
                                arch,
                                library,
                                (benchmark, benchmark_item),
                                subv,
                                "buffer_size",
                                data_label,
                                lambda x: 0,
                            )
                    except:
                        # avro's codegen will raise RuntimeError("Unsupported Schema") on unsupported data. Other libraries may just silently ignore it.
                        self.add_datapoint(
                            arch,
                            library,
                            (benchmark, benchmark_item),
                            subv,
                            "buffer_size",
                            data_label,
                            lambda x: 0,
                        )
                    # self.aggregate[(benchmark, benchmark_item)][arch][lib][aggregate_label] = getter(value[data_label]['v'])

        for key in self.aggregate.keys():
            for arch in self.aggregate[key].keys():
                for lib, val in self.aggregate[key][arch].items():
                    try:
                        val["cycles_encser"] = val["cycles_enc"] + val[
                            "cycles_ser"]
                    except KeyError:
                        pass
                    try:
                        val["cycles_desdec"] = val["cycles_des"] + val[
                            "cycles_dec"]
                    except KeyError:
                        pass
                    try:
                        for line in val["callcycles_median"].keys():
                            val["callcycles_median"][line] -= val["cycles_nop"]
                    except KeyError:
                        pass
                    try:
                        val["data_serdes_delta"] = val["data_serdes"] - val[
                            "data_nop"]
                    except KeyError:
                        pass
                    try:
                        val["data_serdes_delta_nobuf"] = (val["data_serdes"] -
                                                          val["data_nop"] -
                                                          val["buffer_size"])
                    except KeyError:
                        pass
                    try:
                        val["bss_serdes_delta"] = val["bss_serdes"] - val[
                            "bss_nop"]
                    except KeyError:
                        pass
                    try:
                        val["bss_serdes_delta_nobuf"] = (val["bss_serdes"] -
                                                         val["bss_nop"] -
                                                         val["buffer_size"])
                    except KeyError:
                        pass
                    try:
                        val["text_serdes_delta"] = val["text_serdes"] - val[
                            "text_nop"]
                    except KeyError:
                        pass
                    try:
                        val["total_dmem_ser"] = val["stack_alloc_ser"]
                        val["written_dmem_ser"] = val["stack_set_ser"]
                        val["total_dmem_ser"] += val["heap_ser"]
                        val["written_dmem_ser"] += val["heap_ser"]
                    except KeyError:
                        pass
                    try:
                        val["total_dmem_des"] = val["stack_alloc_des"]
                        val["written_dmem_des"] = val["stack_set_des"]
                        val["total_dmem_des"] += val["heap_des"]
                        val["written_dmem_des"] += val["heap_des"]
                    except KeyError:
                        pass
                    try:
                        val["total_dmem_serdes"] = max(val["total_dmem_ser"],
                                                       val["total_dmem_des"])
                    except KeyError:
                        pass
                    try:
                        val["text_ser_delta"] = val["text_ser"] - val[
                            "text_nop"]
                        val["text_serdes_delta"] = val["text_serdes"] - val[
                            "text_nop"]
                    except KeyError:
                        pass
                    try:
                        val["bss_ser_delta"] = val["bss_ser"] - val["bss_nop"]
                        val["bss_serdes_delta"] = val["bss_serdes"] - val[
                            "bss_nop"]
                    except KeyError:
                        pass
                    try:
                        val["data_ser_delta"] = val["data_ser"] - val[
                            "data_nop"]
                        val["data_serdes_delta"] = val["data_serdes"] - val[
                            "data_nop"]
                    except KeyError:
                        pass
                    try:
                        val["allmem_ser"] = (val["text_ser"] +
                                             val["data_ser"] + val["bss_ser"] +
                                             val["total_dmem_ser"] -
                                             val["buffer_size"])
                        val["allmem_serdes"] = (val["text_serdes"] +
                                                val["data_serdes"] +
                                                val["bss_serdes"] +
                                                val["total_dmem_serdes"] -
                                                val["buffer_size"])
                    except KeyError:
                        pass
                    try:
                        val["smem_serdes"] = (val["text_serdes"] +
                                              val["data_serdes"] +
                                              val["bss_serdes"] -
                                              val["buffer_size"])
                    except KeyError:
                        pass
                    try:
                        val["mem_serdes_delta"] = (
                            val["data_serdes_delta"] +
                            val["bss_serdes_delta"] +
                            max(val["stack_set_ser"], val["stack_set_des"]))
                    except KeyError:
                        pass

        if cpu_conf_str:
            cpu_conf = utils.parse_conf_str(cpu_conf_str)

        if cpu_conf:
            self.cpu_conf = cpu_conf
            cpu = self.cpu = cycles_to_energy.get_class(cpu_conf["model"])
            for key, value in cpu.default_params.items():
                if not key in cpu_conf:
                    cpu_conf[key] = value
            for key in self.aggregate.keys():
                for arch in self.aggregate[key].keys():
                    for lib, val in self.aggregate[key][arch].items():
                        # All energy data is stored in nanojoules (nJ)
                        try:
                            val["energy_enc"] = int(val["cycles_enc"] *
                                                    cpu.get_power(cpu_conf) /
                                                    cpu_conf["cpu_freq"] * 1e9)
                        except KeyError:
                            pass
                        except ValueError:
                            logger.warning(
                                "cycles_enc is NaN for {} -> {} -> {}".format(
                                    arch, lib, key))
                        try:
                            val["energy_ser"] = int(val["cycles_ser"] *
                                                    cpu.get_power(cpu_conf) /
                                                    cpu_conf["cpu_freq"] * 1e9)
                        except KeyError:
                            pass
                        except ValueError:
                            logger.warning(
                                "cycles_ser is NaN for {} -> {} -> {}".format(
                                    arch, lib, key))
                        try:
                            val["energy_encser"] = int(
                                val["cycles_encser"] *
                                cpu.get_power(cpu_conf) /
                                cpu_conf["cpu_freq"] * 1e9)
                        except KeyError:
                            pass
                        except ValueError:
                            logger.warning(
                                "cycles_encser is NaN for {} -> {} -> {}".
                                format(arch, lib, key))
                        try:
                            val["energy_des"] = int(val["cycles_des"] *
                                                    cpu.get_power(cpu_conf) /
                                                    cpu_conf["cpu_freq"] * 1e9)
                        except KeyError:
                            pass
                        except ValueError:
                            logger.warning(
                                "cycles_des is NaN for {} -> {} -> {}".format(
                                    arch, lib, key))
                        try:
                            val["energy_dec"] = int(val["cycles_dec"] *
                                                    cpu.get_power(cpu_conf) /
                                                    cpu_conf["cpu_freq"] * 1e9)
                        except KeyError:
                            pass
                        except ValueError:
                            logger.warning(
                                "cycles_dec is NaN for {} -> {} -> {}".format(
                                    arch, lib, key))
                        try:
                            val["energy_desdec"] = int(
                                val["cycles_desdec"] *
                                cpu.get_power(cpu_conf) /
                                cpu_conf["cpu_freq"] * 1e9)
                        except KeyError:
                            pass
                        except ValueError:
                            logger.warning(
                                "cycles_desdec is NaN for {} -> {} -> {}".
                                format(arch, lib, key))

        if radio_conf_str:
            radio_conf = utils.parse_conf_str(radio_conf_str)

        if radio_conf:
            self.radio_conf = radio_conf
            radio = self.radio = size_to_radio_energy.get_class(
                radio_conf["model"])
            for key, value in radio.default_params.items():
                if not key in radio_conf:
                    radio_conf[key] = value
            for key in self.aggregate.keys():
                for arch in self.aggregate[key].keys():
                    for lib, val in self.aggregate[key][arch].items():
                        try:
                            radio_conf["txbytes"] = val["serialized_size"]
                            if radio_conf["txbytes"] > 0:
                                val["energy_tx"] = int(
                                    radio.get_energy(radio_conf) * 1e9)
                            else:
                                val["energy_tx"] = 0
                            val["energy_encsertx"] = (val["energy_encser"] +
                                                      val["energy_tx"])
                            val["energy_desdecrx"] = (val["energy_desdec"] +
                                                      val["energy_tx"])
                        except KeyError:
                            pass