Exemplo n.º 1
43
def write_json_log(jsonlogfile: typing.TextIO, test_name: str, result: TestRun) -> None:
    jresult = {'name': test_name,
               'stdout': result.stdo,
               'result': result.res.value,
               'duration': result.duration,
               'returncode': result.returncode,
               'env': result.env,
               'command': result.cmd}  # type: typing.Dict[str, typing.Any]
    if result.stde:
        jresult['stderr'] = result.stde
    jsonlogfile.write(json.dumps(jresult) + '\n')
Exemplo n.º 2
3
 def _update_params(infile: Iterable[str], outfile: TextIO):
     startday_pattern = ' start time (days)= '
     stopday_pattern = ' stop time (days) = '
     for line in infile:
         if line.startswith(startday_pattern):
             line = '%s%f\n' % (startday_pattern, from_day)
         if line.startswith(stopday_pattern):
             line = '%s%f\n' % (stopday_pattern, to_day)
         outfile.write(line)
Exemplo n.º 3
0
    def save(self, *, config_fd: TextIO = None, encode: bool = False) -> None:
        with io.StringIO() as config_buffer:
            self.parser.write(config_buffer)
            config = config_buffer.getvalue()
            if encode:
                # Encode config using base64
                config = base64.b64encode(
                    config.encode(sys.getfilesystemencoding())
                ).decode(sys.getfilesystemencoding())

            if config_fd:
                config_fd.write(config)
            else:
                with open(self.save_path(), "w") as f:
                    f.write(config)
Exemplo n.º 4
0
def dump_info(file: TextIO) -> None:
    """Create the wiki page for item options, given a file to write to."""
    print(DOC_HEADER, file=file)
    
    for opt in DEFAULTS:
        if opt.default is None:
            default = ''
        elif type(opt.default) is Vec:
            default = '(`' + opt.default.join(' ') + '`)'
        else:
            default = ' = `' + repr(opt.default) + '`'
        file.write(INFO_DUMP_FORMAT.format(
            id=opt.name, 
            default=default,
            type=TYPE_NAMES[opt.type],
            desc='\n'.join(opt.doc),
        ))
Exemplo n.º 5
0
    def _process_includes(self,
                          file_in: TextIO,
                          filename: str,
                          file_out: TextIO) -> None:
        log.debug(f'Processing includes in "{filename}"')

        for line in file_in:
            match = self._include_pattern.search(line)
            if match:
                if self._nested >= self._maxnest:
                    raise MaxNestingExceededError(
                        f'Exceeded maximum include depth of {self._maxnest}'
                    )

                inc_name = match.group(1)
                log.debug(f'Found include directive: {line[:-1]}')
                f, included_name = self._open(inc_name, filename)
                self._nested += 1
                self._process_includes(f, filename, file_out)
                self._nested -= 1
            else:
                file_out.write(line)
Exemplo n.º 6
0
 def write(out: TextIO) -> None:
     name    = target.name
     package = target.package
     out.write(PKG_CONFIG.format(**locals()))
Exemplo n.º 7
0
def sort_stream(
    input_stream: TextIO,
    output_stream: TextIO,
    extension: Optional[str] = None,
    config: Config = DEFAULT_CONFIG,
    file_path: Optional[Path] = None,
    disregard_skip: bool = False,
    show_diff: Union[bool, TextIO] = False,
    **config_kwargs,
) -> bool:
    """Sorts any imports within the provided code stream, outputs to the provided output stream.
     Returns `True` if anything is modified from the original input stream, otherwise `False`.

    - **input_stream**: The stream of code with imports that need to be sorted.
    - **output_stream**: The stream where sorted imports should be written to.
    - **extension**: The file extension that contains imports. Defaults to filename extension or py.
    - **config**: The config object to use when sorting imports.
    - **file_path**: The disk location where the code string was pulled from.
    - **disregard_skip**: set to `True` if you want to ignore a skip set in config for this file.
    - **show_diff**: If `True` the changes that need to be done will be printed to stdout, if a
    TextIO stream is provided results will be written to it, otherwise no diff will be computed.
    - ****config_kwargs**: Any config modifications.
    """
    if show_diff:
        _output_stream = StringIO()
        _input_stream = StringIO(input_stream.read())
        changed = sort_stream(
            input_stream=_input_stream,
            output_stream=_output_stream,
            extension=extension,
            config=config,
            file_path=file_path,
            disregard_skip=disregard_skip,
            **config_kwargs,
        )
        _output_stream.seek(0)
        _input_stream.seek(0)
        show_unified_diff(
            file_input=_input_stream.read(),
            file_output=_output_stream.read(),
            file_path=file_path,
            output=output_stream if show_diff is True else cast(
                TextIO, show_diff),
            color_output=config.color_output,
        )
        return changed

    config = _config(path=file_path, config=config, **config_kwargs)
    content_source = str(file_path or "Passed in content")
    if not disregard_skip:
        if file_path and config.is_skipped(file_path):
            raise FileSkipSetting(content_source)

    _internal_output = output_stream

    if config.atomic:
        try:
            file_content = input_stream.read()
            compile(file_content, content_source, "exec", 0, 1)
            input_stream = StringIO(file_content)
        except SyntaxError:
            raise ExistingSyntaxErrors(content_source)

        if not output_stream.readable():
            _internal_output = StringIO()

    try:
        changed = core.process(
            input_stream,
            _internal_output,
            extension=extension or (file_path and file_path.suffix.lstrip("."))
            or "py",
            config=config,
        )
    except FileSkipComment:
        raise FileSkipComment(content_source)

    if config.atomic:
        _internal_output.seek(0)
        try:
            compile(_internal_output.read(), content_source, "exec", 0, 1)
            _internal_output.seek(0)
            if _internal_output != output_stream:
                output_stream.write(_internal_output.read())
        except SyntaxError:  # pragma: no cover
            raise IntroducedSyntaxErrors(content_source)

    return changed
Exemplo n.º 8
0
def main(
    script_path: Optional[str],
    stdout: TextIO,
    stderr: TextIO,
    args: Optional[List[str]] = None,
) -> None:
    """Main entry point to the type checker.

    Args:
        script_path: Path to the 'mypy' script (used for finding data files).
        args: Custom command-line arguments.  If not given, sys.argv[1:] will
        be used.
    """
    util.check_python_version('mypy')
    t0 = time.time()
    # To log stat() calls: os.stat = stat_proxy
    sys.setrecursionlimit(2**14)
    if args is None:
        args = sys.argv[1:]

    fscache = FileSystemCache()
    sources, options = process_options(args,
                                       stdout=stdout,
                                       stderr=stderr,
                                       fscache=fscache)

    messages = []
    formatter = util.FancyFormatter(stdout, stderr, options.show_error_codes)

    def flush_errors(new_messages: List[str], serious: bool) -> None:
        if options.pretty:
            new_messages = formatter.fit_in_terminal(new_messages)
        messages.extend(new_messages)
        f = stderr if serious else stdout
        try:
            for msg in new_messages:
                if options.color_output:
                    msg = formatter.colorize(msg)
                f.write(msg + '\n')
            f.flush()
        except BrokenPipeError:
            sys.exit(2)

    serious = False
    blockers = False
    res = None
    try:
        # Keep a dummy reference (res) for memory profiling below, as otherwise
        # the result could be freed.
        res = build.build(sources, options, None, flush_errors, fscache,
                          stdout, stderr)
    except CompileError as e:
        blockers = True
        if not e.use_stdout:
            serious = True
    if options.warn_unused_configs and options.unused_configs and not options.incremental:
        print("Warning: unused section(s) in %s: %s" %
              (options.config_file, ", ".join(
                  "[mypy-%s]" % glob
                  for glob in options.per_module_options.keys()
                  if glob in options.unused_configs)),
              file=stderr)
    maybe_write_junit_xml(time.time() - t0, serious, messages, options)

    if MEM_PROFILE:
        from mypy.memprofile import print_memory_profile
        print_memory_profile()

    code = 0
    if messages:
        code = 2 if blockers else 1
    if options.error_summary:
        if messages:
            n_errors, n_files = util.count_stats(messages)
            if n_errors:
                stdout.write(
                    formatter.format_error(n_errors, n_files, len(sources),
                                           options.color_output) + '\n')
        else:
            stdout.write(
                formatter.format_success(len(sources), options.color_output) +
                '\n')
        stdout.flush()
    if options.fast_exit:
        # Exit without freeing objects -- it's faster.
        #
        # NOTE: We don't flush all open files on exit (or run other destructors)!
        util.hard_exit(code)
    elif code:
        sys.exit(code)

    # HACK: keep res alive so that mypyc won't free it before the hard_exit
    list([res])
Exemplo n.º 9
0
def evaluate(gt_datas: dict, pd_datas: dict, d_min: float, d_max: float,
             out_file: TextIO) -> None:
    """Evaluate tracking output.

    Args:
        gt_datas: path to dataset
        pd_datas: list of path to tracker output
        d_min: minimum distance range
        d_max: maximum distance range
        out_file: output file object
    """
    acc_c = mm.MOTAccumulator(auto_id=True)
    acc_i = mm.MOTAccumulator(auto_id=True)
    acc_o = mm.MOTAccumulator(auto_id=True)

    ID_gt_all: List[str] = []

    count_all: int = 0
    fr_count: int = 0

    tqdm.write(f"{len(pd_datas)} {len(gt_datas)}")
    assert len(pd_datas) == len(gt_datas)

    pbar = tqdm(zip(pd_datas.items(), gt_datas.items()), total=len(gt_datas))
    for (log_id_pd, pd_data), (log_id_gt, gt_data) in pbar:
        fr_count += len(pd_data['frames'])
        pbar.set_postfix_str(s=f"Logs: {log_id_gt} AccumFrames: {fr_count} | "
                             f"PD: {len(pd_data['frames'])} "
                             f"GT: {len(gt_data['frames'])}]")

        assert len(pd_data['frames']) == len(gt_data['frames'])
        assert log_id_pd == log_id_gt

        for (_, hypos), (_, annos) in \
                zip(pd_data['frames'].items(), gt_data['frames'].items()):

            # Get entries in GT and PD
            gt, id_gts = create_entry(annos['annotations'], d_min, d_max)
            tracks, id_tracks = create_entry(hypos['annotations'], d_min,
                                             d_max)

            ID_gt_all.append(np.unique(id_gts).tolist())

            dists_c: List[List[float]] = []
            dists_i: List[List[float]] = []
            dists_o: List[List[float]] = []
            for _, gt_value in gt.items():
                gt_track_data_c: List[float] = []
                gt_track_data_i: List[float] = []
                gt_track_data_o: List[float] = []
                dists_c.append(gt_track_data_c)
                dists_i.append(gt_track_data_i)
                dists_o.append(gt_track_data_o)
                for _, track_value in tracks.items():
                    count_all += 1
                    gt_track_data_c.append(
                        get_distance(gt_value, track_value, "centroid"))
                    gt_track_data_i.append(
                        get_distance(gt_value, track_value, "iou"))
                    gt_track_data_o.append(
                        get_distance(gt_value, track_value, "orientation"))

            acc_c.update(id_gts, id_tracks, dists_c)
            acc_i.update(id_gts, id_tracks, dists_i)
            acc_o.update(id_gts, id_tracks, dists_o)

    ID_gt_all = np.unique([item for lists in ID_gt_all for item in lists])

    if count_all == 0:
        # fix for when all hypothesis is empty,
        # pymotmetric currently doesn't support this, see https://github.com/cheind/py-motmetrics/issues/49
        acc_c.update(id_gts, [-1], np.ones(np.shape(id_gts)) * np.inf)
        acc_i.update(id_gts, [-1], np.ones(np.shape(id_gts)) * np.inf)
        acc_o.update(id_gts, [-1], np.ones(np.shape(id_gts)) * np.inf)

    tqdm.write("Computing...")
    summary = mh.compute(
        acc_c,
        metrics=[
            "num_frames",
            "mota",
            "motp",
            "idf1",
            "mostly_tracked",
            "mostly_lost",
            "num_false_positives",
            "num_misses",
            "num_switches",
            "num_fragmentations",
        ],
        name="acc",
    )
    tqdm.write(f"summary = \n{summary}")
    num_tracks = len(ID_gt_all)
    if num_tracks == 0:
        num_tracks = 1

    num_frames = summary["num_frames"][0]
    mota = summary["mota"][0] * 100
    motp_c = summary["motp"][0]
    idf1 = summary["idf1"][0]
    most_track = summary["mostly_tracked"][0] / num_tracks
    most_lost = summary["mostly_lost"][0] / num_tracks
    num_fp = summary["num_false_positives"][0]
    num_miss = summary["num_misses"][0]
    num_switch = summary["num_switches"][0]
    num_frag = summary["num_fragmentations"][0]

    #acc_c.events.loc[acc_c.events.Type != "RAW",
    #                 "D"] = acc_i.events.loc[acc_c.events.Type != "RAW", "D"]

    sum_motp_i = mh.compute(acc_i, metrics=["motp"], name="acc")
    tqdm.write(f"MOTP-I = \n{sum_motp_i}")

    motp_i = sum_motp_i["motp"][0]

    # acc_c.events.loc[acc_c.events.Type != "RAW",
    #                 "D"] = acc_o.events.loc[acc_c.events.Type != "RAW", "D"]
    sum_motp_o = mh.compute(acc_o, metrics=["motp"], name="acc")
    tqdm.write(f"MOTP-O = \n{sum_motp_o}")

    motp_o = sum_motp_o["motp"][0]

    out_string = (f"{num_frames} {mota:.2f} "
                  f"{motp_c:.2f} {motp_o:.2f} {motp_i:.2f} "
                  f"{idf1:.2f} {most_track:.2f} {most_lost:.2f} "
                  f"{num_fp} {num_miss} {num_switch} {num_frag}\n")
    out_file.write(out_string)
Exemplo n.º 10
0
def parseXML(xmlFile: str, destinationFile: TextIO):
    for variables in ElementTree.parse(xmlFile).getroot().findall('variables'):
        for data in variables.findall('data'):
            variablesVersion = data.attrib['Version']
            for items in data.findall('Items'):
                for variable in items.findall('Variable'):
                    name = variable.attrib['Name']
                    description = variable.attrib.get('Descr', "")
                    typeAttrib = variable.attrib["Type"]
                    if variablesVersion == "3":
                        varType = typeByGuid(typeAttrib)
                    elif variablesVersion == "4":
                        varType = typeByName(typeAttrib)
                    else:
                        varType = typeAttrib

                    variableValue = ''

                    for valueChild in variable.findall('DefValue'):
                        # attrib will have its name based on type, like `StrValue`.
                        # Because there is only one attrib, parse them 'all'
                        for defValue in valueChild.attrib.values():
                            variableValue = defValue

                    if description or variableValue:
                        destinationFile.write(f'        /**\n')
                    if description:
                        destinationFile.write(f'         * {description}\n')
                    if description and variableValue:
                        destinationFile.write(f'         *\n')
                    if variableValue:
                        destinationFile.write('         * @default ')
                        if varType == "int" or varType == "double":
                            destinationFile.write(variableValue)
                        elif varType == "boolean":
                            destinationFile.write(variableValue.lower())
                        else:  # string
                            variableValue = variableValue.replace('\\', '\\\\')
                            destinationFile.write(f'"{variableValue}"')

                        destinationFile.write('\n')
                    if description or variableValue:
                        destinationFile.write(f'         */\n')

                    # TODO: optional readonly - to not change variables during tests
                    destinationFile.write(f'        {name}: {varType};\n')
Exemplo n.º 11
0
def write_to_conll_eval_file(prediction_file: TextIO,
                             gold_file: TextIO,
                             verb_index: Optional[int],
                             sentence: List[str],
                             prediction: List[str],
                             gold_labels: List[str]):
    """
    Prints predicate argument predictions and gold labels for a single verbal
    predicate in a sentence to two provided file references.

    Parameters
    ----------
    prediction_file : TextIO, required.
        A file reference to print predictions to.
    gold_file : TextIO, required.
        A file reference to print gold labels to.
    verb_index : Optional[int], required.
        The index of the verbal predicate in the sentence which
        the gold labels are the arguments for, or None if the sentence
        contains no verbal predicate.
    sentence : List[str], required.
        The word tokens.
    prediction : List[str], required.
        The predicted BIO labels.
    gold_labels : List[str], required.
        The gold BIO labels.
    """
    verb_only_sentence = ["-"] * len(sentence)
    if verb_index:
        verb_only_sentence[verb_index] = sentence[verb_index]

    conll_format_predictions = convert_bio_tags_to_conll_format(prediction)
    conll_format_gold_labels = convert_bio_tags_to_conll_format(gold_labels)

    for word, predicted, gold in zip(verb_only_sentence,
                                     conll_format_predictions,
                                     conll_format_gold_labels):
        prediction_file.write(word.ljust(15))
        prediction_file.write(predicted.rjust(15) + "\n")
        gold_file.write(word.ljust(15))
        gold_file.write(gold.rjust(15) + "\n")
    prediction_file.write("\n")
    gold_file.write("\n")
Exemplo n.º 12
0
def writelines_nl(fileobj: TextIO, lines: Iterable[str]) -> None:
    # Since fileobj.writelines() doesn't add newlines...
    # http://stackoverflow.com/questions/13730107/writelines-writes-lines-without-newline-just-fills-the-file  # noqa
    fileobj.write('\n'.join(lines) + '\n')
Exemplo n.º 13
0
def process_tag_link(log_file: typing.TextIO, link):
    print("Start processing tag link:" + link)
    log_file.write(link)
    log_file.write("\n")
Exemplo n.º 14
0
    def dump_cpp_enum(cls, file: TextIO) -> None:
        """
        Dump codes C++ enum representation to an open file

        :param file: Where to dump
        :return: None
        """
        file.write("// Generated error code enum\n")
        file.write("namespace ErrorCodes {\n")
        file.write("\tenum Errors {\n")

        for name, code in cls.get_codes().items():
            file.write(f"\t\t{name} = {code.raw_code},\n")

        file.write("\t};\n")
        file.write("};\n")
Exemplo n.º 15
0
def generate_latex_footer(f: TextIO):
    f.write("\\end{document}")
Exemplo n.º 16
0
 def log_cmd(self, f: TextIO) -> None:
     f.write(f'{self.get_str_cmd()}\n')
Exemplo n.º 17
0
def generate_latex_header(f: TextIO):
    f.write("\\documentclass{article}\n")
    f.write("\\usepackage{graphicx}\n")
    f.write("\\usepackage[margin=1cm]{geometry}\n")
    f.write("\\usepackage{amsmath}\n")
    f.write("\\pagenumbering{gobble}\n")
    f.write("\\begin{document}\n")
Exemplo n.º 18
0
def latex_emit_reaction(f: TextIO, reaction: dict, reaction_index=None):
    """
    reaction should be a dictionary with keys
    "reactants"
    "products"
    "dG"
    """
    f.write("$$\n")
    first = True
    if reaction_index is not None:
        f.write(str(reaction_index) + ":\n")
    for reactant_index in reaction["reactants"]:
        if first:
            first = False
        else:
            f.write("+\n")

        latex_emit_molecule(f, reactant_index)

    f.write("\\xrightarrow{" + ("%.2f" % reaction["dG"]) + "}\n")

    first = True
    for product_index in reaction["products"]:
        if first:
            first = False
        else:
            f.write("+\n")

        latex_emit_molecule(f, product_index)

    f.write("$$")
    f.write("\n\n\n")
Exemplo n.º 19
0
def eval_tracks(
    path_tracker_output_root: _PathLike,
    path_dataset_root: _PathLike,
    d_min: float,
    d_max: float,
    out_file: TextIO,
    centroid_method: str,
    diffatt: str,
    category: str = "VEHICLE",
) -> None:
    """Evaluate tracking output.

    Args:
        path_tracker_output: list of path to tracker output, one for each log
        path_dataset: path to dataset
        d_min: minimum distance range
        d_max: maximum distance range
        out_file: output file object
        centroid_method: method for ground truth centroid estimation
        diffatt: difficulty attribute ['easy',  'far', 'fast', 'occ', 'short']
        category: such as "VEHICLE" "PEDESTRIAN"
    """
    acc_c = mm.MOTAccumulator(auto_id=True)
    acc_i = mm.MOTAccumulator(auto_id=True)
    acc_o = mm.MOTAccumulator(auto_id=True)

    ID_gt_all: List[str] = []

    count_all: int = 0
    if diffatt is not None:
        import argoverse.evaluation

        pkl_path = os.path.join(os.path.dirname(argoverse.evaluation.__file__),
                                "dict_att_all.pkl")
        if not os.path.exists(pkl_path):
            # generate them on the fly
            print(pkl_path)
            raise NotImplementedError

        pickle_in = open(
            pkl_path,
            "rb")  # open(f"{path_dataset_root}/dict_att_all.pkl","rb")
        dict_att_all = pickle.load(pickle_in)

    path_datasets = glob.glob(os.path.join(path_dataset_root, "*"))
    num_total_gt = 0

    for path_dataset in path_datasets:  # path_tracker_output, path_dataset in zip(path_tracker_outputs, path_datasets):

        log_id = pathlib.Path(path_dataset).name
        if len(log_id) == 0 or log_id.startswith("_"):
            continue

        path_tracker_output = os.path.join(path_tracker_output_root, log_id)

        path_track_data = sorted(
            glob.glob(
                os.path.join(os.fspath(path_tracker_output),
                             "per_sweep_annotations_amodal", "*")))

        logger.info("log_id = %s", log_id)

        city_info_fpath = f"{path_dataset}/city_info.json"
        city_info = read_json_file(city_info_fpath)
        city_name = city_info["city_name"]
        logger.info("city name = %s", city_name)

        for ind_frame in range(len(path_track_data)):
            if ind_frame % 50 == 0:
                # print("%d/%d" % (ind_frame, len(path_track_data)))
                logger.info("%d/%d" % (ind_frame, len(path_track_data)))

            timestamp_lidar = int(path_track_data[ind_frame].split("/")
                                  [-1].split("_")[-1].split(".")[0])
            path_gt = os.path.join(
                path_dataset, "per_sweep_annotations_amodal",
                f"tracked_object_labels_{timestamp_lidar}.json")

            if not os.path.exists(path_gt):
                logger.warning("Missing ", path_gt)
                continue

            gt_data = read_json_file(path_gt)

            pose_data = read_json_file(
                f"{path_dataset}/poses/city_SE3_egovehicle_{timestamp_lidar}.json"
            )
            rotation = np.array(pose_data["rotation"])
            translation = np.array(pose_data["translation"])
            ego_R = quat2rotmat(rotation)
            ego_t = translation
            egovehicle_to_city_se3 = SE3(rotation=ego_R, translation=ego_t)

            gt: Dict[str, Dict[str, Any]] = {}
            id_gts = []
            for i in range(len(gt_data)):

                if gt_data[i]["label_class"] != category:
                    continue

                if diffatt is not None:

                    if diffatt not in dict_att_all["test"][log_id][
                            gt_data[i]["track_label_uuid"]]["difficult_att"]:
                        continue

                bbox, orientation = label_to_bbox(gt_data[i])

                center = np.array([
                    gt_data[i]["center"]["x"], gt_data[i]["center"]["y"],
                    gt_data[i]["center"]["z"]
                ])
                if bbox[3] > 0 and in_distance_range_pose(
                        np.zeros(3), center, d_min, d_max):
                    track_label_uuid = gt_data[i]["track_label_uuid"]
                    gt[track_label_uuid] = {}
                    gt[track_label_uuid]["centroid"] = center

                    gt[track_label_uuid]["bbox"] = bbox
                    gt[track_label_uuid]["orientation"] = orientation
                    gt[track_label_uuid]["width"] = gt_data[i]["width"]
                    gt[track_label_uuid]["length"] = gt_data[i]["length"]
                    gt[track_label_uuid]["height"] = gt_data[i]["height"]

                    if track_label_uuid not in ID_gt_all:
                        ID_gt_all.append(track_label_uuid)

                    id_gts.append(track_label_uuid)
                    num_total_gt += 1

            tracks: Dict[str, Dict[str, Any]] = {}
            id_tracks: List[str] = []

            track_data = read_json_file(path_track_data[ind_frame])

            for track in track_data:
                key = track["track_label_uuid"]

                if track["label_class"] != category or track["height"] == 0:
                    continue

                center = np.array([
                    track["center"]["x"], track["center"]["y"],
                    track["center"]["z"]
                ])
                bbox, orientation = label_to_bbox(track)
                if in_distance_range_pose(np.zeros(3), center, d_min, d_max):
                    tracks[key] = {}
                    tracks[key]["centroid"] = center
                    tracks[key]["bbox"] = bbox
                    tracks[key]["orientation"] = orientation
                    tracks[key]["width"] = track["width"]
                    tracks[key]["length"] = track["length"]
                    tracks[key]["height"] = track["height"]

                    id_tracks.append(key)

            dists_c: List[List[float]] = []
            dists_i: List[List[float]] = []
            dists_o: List[List[float]] = []
            for gt_key, gt_value in gt.items():
                gt_track_data_c: List[float] = []
                gt_track_data_i: List[float] = []
                gt_track_data_o: List[float] = []
                dists_c.append(gt_track_data_c)
                dists_i.append(gt_track_data_i)
                dists_o.append(gt_track_data_o)
                for track_key, track_value in tracks.items():
                    count_all += 1
                    gt_track_data_c.append(
                        get_distance(gt_value, track_value, "centroid"))
                    gt_track_data_i.append(
                        get_distance(gt_value, track_value, "iou"))
                    gt_track_data_o.append(
                        get_distance(gt_value, track_value, "orientation"))

            acc_c.update(id_gts, id_tracks, dists_c)
            acc_i.update(id_gts, id_tracks, dists_i)
            acc_o.update(id_gts, id_tracks, dists_o)
    # print(count_all)
    if count_all == 0:
        # fix for when all hypothesis is empty,
        # pymotmetric currently doesn't support this, see https://github.com/cheind/py-motmetrics/issues/49
        acc_c.update(id_gts, ["dummy_id"], np.ones(np.shape(id_gts)) * np.inf)
        acc_i.update(id_gts, ["dummy_id"], np.ones(np.shape(id_gts)) * np.inf)
        acc_o.update(id_gts, ["dummy_id"], np.ones(np.shape(id_gts)) * np.inf)

    summary = mh.compute(
        acc_c,
        metrics=[
            "num_frames",
            "mota",
            "motp",
            "idf1",
            "mostly_tracked",
            "mostly_lost",
            "num_false_positives",
            "num_misses",
            "num_switches",
            "num_fragmentations",
        ],
        name="acc",
    )
    logger.info("summary = %s", summary)
    num_tracks = len(ID_gt_all)

    fn = os.path.basename(path_tracker_output)
    num_frames = summary["num_frames"][0]
    mota = summary["mota"][0] * 100
    motp_c = summary["motp"][0]
    idf1 = summary["idf1"][0]
    most_track = summary["mostly_tracked"][0] / num_tracks
    most_lost = summary["mostly_lost"][0] / num_tracks
    num_fp = summary["num_false_positives"][0]
    num_miss = summary["num_misses"][0]
    num_switch = summary["num_switches"][0]
    num_flag = summary["num_fragmentations"][0]

    acc_c.events.loc[acc_c.events.Type != "RAW",
                     "D"] = acc_i.events.loc[acc_c.events.Type != "RAW", "D"]

    sum_motp_i = mh.compute(acc_c, metrics=["motp"], name="acc")
    logger.info("MOTP-I = %s", sum_motp_i)
    num_tracks = len(ID_gt_all)

    fn = os.path.basename(path_tracker_output)
    motp_i = sum_motp_i["motp"][0]

    acc_c.events.loc[acc_c.events.Type != "RAW",
                     "D"] = acc_o.events.loc[acc_c.events.Type != "RAW", "D"]
    sum_motp_o = mh.compute(acc_c, metrics=["motp"], name="acc")
    logger.info("MOTP-O = %s", sum_motp_o)
    num_tracks = len(ID_gt_all)

    fn = os.path.basename(path_tracker_output)
    motp_o = sum_motp_o["motp"][0]

    out_string = (
        f"{fn} {num_frames} {mota:.2f} {motp_c:.2f} {motp_o:.2f} {motp_i:.2f} {idf1:.2f} {most_track:.2f} "
        f"{most_lost:.2f} {num_fp} {num_miss} {num_switch} {num_flag} \n")
    out_file.write(out_string)
Exemplo n.º 20
0
 def render(self, out: TextIO) -> None:
     out.write("%s %s\n" % (self.id, self.rest))
Exemplo n.º 21
0
    def dump_qml_dictionary(cls, file: TextIO):
        """
         Dump code QML map representation to an open file
        :param file: Where to dump
        :return: None
        """
        file.write("import QtQuick 2.10\n")
        file.write(
            "/* Generated by sla-errors. Your edits to this file will be lost. */\n"
        )
        file.write("pragma Singleton\n")
        file.write("Item {\n")
        file.write("\treadonly property var messages:{\n")

        for code in cls.get_codes().values():
            if code.message:
                file.write(f"\t\t{code.raw_code}: qsTr({code.raw_message}),\n")

        file.write("\t}\n")
        file.write("}\n")
Exemplo n.º 22
0
 def render(self, out: TextIO) -> None:
     out.write("}\n")
Exemplo n.º 23
0
def append_to_log(lf: T.TextIO, line: str) -> None:
    lf.write(line)
    if not line.endswith('\n'):
        lf.write('\n')
    lf.flush()
Exemplo n.º 24
0
def writeln(file: TextIO, line: Text) -> int:
    return file.write(line + '\n')
Exemplo n.º 25
0
 def __init__(self, stream: TextIO, fixed_tables=False):
     self.stream = stream
     if fixed_tables:
         stream.write(PREFACE)
     stream.write("0\nSECTION\n2\nENTITIES\n")  # write header
Exemplo n.º 26
0
def write_start(f: TextIO):
    f.write(
        "// WARNING: THIS FILE WAS AUTO GENERATED BY make_translation.py. PLEASE DO NOT EDIT.\n"
    )
    f.write("\n")
    f.write('#include "Translation.h"\n')
Exemplo n.º 27
0
def fail(msg: str, stderr: TextIO) -> None:
    stderr.write('%s\n' % msg)
    sys.exit(2)
Exemplo n.º 28
0
def write_to_conll_2009_eval_file(prediction_file: TextIO,
                                  gold_file: TextIO,
                                  pred_indices: List[List[int]],
                                  gold_senses: List[List[str]],
                                  predicted_senses: List[List[str]],
                                  sentence: List[str],
                                  gold_tags: List[List[str]],
                                  predicted_tags: List[List[str]],
                                  pos_tags: Optional[List[str]] = None):
    """
    Prints predicate argument predictions and optionally gold labels for a single 
    predicate in a sentence to two provided file references.

    Parameters
    ----------
    prediction_file : TextIO, required.
        A file reference to print predictions to.
    gold_file : TextIO, required.
        A file reference to print gold labels to.
    gold_senses : List[List[str]]
        The gold predicate senses.
    pred_indices : Optional[int], required.
        The index of the predicate in the sentence which
        the gold labels are the arguments for, or None if the sentence
        contains no predicate.
    sentence : List[str], required.
        The word tokens.
    predicted_tags : List[str], required.
        The predicted BIO labels.
    gold_tags : List[str], required.
        The gold BIO labels.
    """
    pred_only_sentence = ["_"] * len(sentence)
    gold_only_sentence = ["_"] * len(sentence)
    pred_indicators = ["_"] * len(sentence)

    for i, pred_index_set in enumerate(pred_indices):
        for pidx, val in enumerate(pred_index_set):
            if val:
                pred_only_sentence[pidx] = predicted_senses[i]
                if len(gold_senses) > 0:
                    gold_only_sentence[pidx] = gold_senses[i]
                pred_indicators[pidx] = 'Y'

    lines = []
    for idx in range(len(sentence)):
        word = sentence[idx].text
        line = ["_"] * (14 + len(pred_indices))
        line[0] = str(idx+1)
        line[1] = word

        if pos_tags is not None:
            line[4] = pos_tags[idx]
            line[5] = pos_tags[idx]

        if pred_indicators[idx] == 'Y':
            line[12] = pred_indicators[idx]
            line[13] = pred_only_sentence[idx]
        for i, predicate_tags in enumerate(predicted_tags):
            if predicate_tags[idx] != 'O':
                tag = predicate_tags[idx]
                line[14+i] = '-'.join(tag.split('-')[1:]) # remove the B- from the beginning of the tag
        prediction_file.write('\t'.join(line)+'\n')
        prediction_file.flush()
        lines.append(line)

        if pred_indicators[idx] == 'Y':
            line[13] = gold_only_sentence[idx]
        for i, predicate_tags in enumerate(gold_tags):
            if predicate_tags[idx] != 'O':
                tag = predicate_tags[idx]
                line[14+i] = '-'.join(tag.split('-')[1:]) # remove the B- from the beginning of the tag
        gold_file.write('\t'.join(line)+'\n')
        gold_file.flush()

    prediction_file.write("\n")
    gold_file.write("\n")
Exemplo n.º 29
0
def process(
    input_stream: TextIO,
    output_stream: TextIO,
    extension: str = "py",
    config: Config = DEFAULT_CONFIG,
) -> bool:
    """Parses stream identifying sections of contiguous imports and sorting them

    Code with unsorted imports is read from the provided `input_stream`, sorted and then
    outputted to the specified `output_stream`.

    - `input_stream`: Text stream with unsorted import sections.
    - `output_stream`: Text stream to output sorted inputs into.
    - `config`: Config settings to use when sorting imports. Defaults settings.
        - *Default*: `isort.settings.DEFAULT_CONFIG`.
    - `extension`: The file extension or file extension rules that should be used.
        - *Default*: `"py"`.
        - *Choices*: `["py", "pyi", "pyx"]`.

    Returns `True` if there were changes that needed to be made (errors present) from what
    was provided in the input_stream, otherwise `False`.
    """
    line_separator: str = config.line_ending
    add_imports: List[str] = [format_natural(addition) for addition in config.add_imports]
    import_section: str = ""
    next_import_section: str = ""
    next_cimports: bool = False
    in_quote: str = ""
    first_comment_index_start: int = -1
    first_comment_index_end: int = -1
    contains_imports: bool = False
    in_top_comment: bool = False
    first_import_section: bool = True
    indent: str = ""
    isort_off: bool = False
    code_sorting: Union[bool, str] = False
    code_sorting_section: str = ""
    code_sorting_indent: str = ""
    cimports: bool = False
    made_changes: bool = False
    stripped_line: str = ""
    end_of_file: bool = False
    verbose_output: List[str] = []

    if config.float_to_top:
        new_input = ""
        current = ""
        isort_off = False
        for line in chain(input_stream, (None,)):
            if isort_off and line is not None:
                if line == "# isort: on\n":
                    isort_off = False
                new_input += line
            elif line in ("# isort: split\n", "# isort: off\n", None) or str(line).endswith(
                "# isort: split\n"
            ):
                if line == "# isort: off\n":
                    isort_off = True
                if current:
                    if add_imports:
                        add_line_separator = line_separator or "\n"
                        current += add_line_separator + add_line_separator.join(add_imports)
                        add_imports = []
                    parsed = parse.file_contents(current, config=config)
                    verbose_output += parsed.verbose_output
                    extra_space = ""
                    while current and current[-1] == "\n":
                        extra_space += "\n"
                        current = current[:-1]
                    extra_space = extra_space.replace("\n", "", 1)
                    sorted_output = output.sorted_imports(
                        parsed, config, extension, import_type="import"
                    )
                    made_changes = made_changes or _has_changed(
                        before=current,
                        after=sorted_output,
                        line_separator=parsed.line_separator,
                        ignore_whitespace=config.ignore_whitespace,
                    )
                    new_input += sorted_output
                    new_input += extra_space
                    current = ""
                new_input += line or ""
            else:
                current += line or ""

        input_stream = StringIO(new_input)

    for index, line in enumerate(chain(input_stream, (None,))):
        if line is None:
            if index == 0 and not config.force_adds:
                return False

            not_imports = True
            end_of_file = True
            line = ""
            if not line_separator:
                line_separator = "\n"

            if code_sorting and code_sorting_section:
                sorted_code = textwrap.indent(
                    isort.literal.assignment(
                        code_sorting_section,
                        str(code_sorting),
                        extension,
                        config=_indented_config(config, indent),
                    ),
                    code_sorting_indent,
                )
                made_changes = made_changes or _has_changed(
                    before=code_sorting_section,
                    after=sorted_code,
                    line_separator=line_separator,
                    ignore_whitespace=config.ignore_whitespace,
                )
                output_stream.write(sorted_code)
        else:
            stripped_line = line.strip()
            if stripped_line and not line_separator:
                line_separator = line[len(line.rstrip()) :].replace(" ", "").replace("\t", "")

            for file_skip_comment in FILE_SKIP_COMMENTS:
                if file_skip_comment in line:
                    raise FileSkipComment("Passed in content")

            if not in_quote and stripped_line == "# isort: off":
                isort_off = True

            if (
                (index == 0 or (index in (1, 2) and not contains_imports))
                and stripped_line.startswith("#")
                and stripped_line not in config.section_comments
            ):
                in_top_comment = True
            elif in_top_comment and (
                not line.startswith("#") or stripped_line in config.section_comments
            ):
                in_top_comment = False
                first_comment_index_end = index - 1

            was_in_quote = bool(in_quote)
            if (not stripped_line.startswith("#") or in_quote) and '"' in line or "'" in line:
                char_index = 0
                if first_comment_index_start == -1 and (
                    line.startswith('"') or line.startswith("'")
                ):
                    first_comment_index_start = index
                while char_index < len(line):
                    if line[char_index] == "\\":
                        char_index += 1
                    elif in_quote:
                        if line[char_index : char_index + len(in_quote)] == in_quote:
                            in_quote = ""
                            if first_comment_index_end < first_comment_index_start:
                                first_comment_index_end = index
                    elif line[char_index] in ("'", '"'):
                        long_quote = line[char_index : char_index + 3]
                        if long_quote in ('"""', "'''"):
                            in_quote = long_quote
                            char_index += 2
                        else:
                            in_quote = line[char_index]
                    elif line[char_index] == "#":
                        break
                    char_index += 1

            not_imports = bool(in_quote) or was_in_quote or in_top_comment or isort_off
            if not (in_quote or was_in_quote or in_top_comment):
                if isort_off:
                    if stripped_line == "# isort: on":
                        isort_off = False
                elif stripped_line.endswith("# isort: split"):
                    not_imports = True
                elif stripped_line in CODE_SORT_COMMENTS:
                    code_sorting = stripped_line.split("isort: ")[1].strip()
                    code_sorting_indent = line[: -len(line.lstrip())]
                    not_imports = True
                elif code_sorting:
                    if not stripped_line:
                        sorted_code = textwrap.indent(
                            isort.literal.assignment(
                                code_sorting_section,
                                str(code_sorting),
                                extension,
                                config=_indented_config(config, indent),
                            ),
                            code_sorting_indent,
                        )
                        made_changes = made_changes or _has_changed(
                            before=code_sorting_section,
                            after=sorted_code,
                            line_separator=line_separator,
                            ignore_whitespace=config.ignore_whitespace,
                        )
                        output_stream.write(sorted_code)
                        not_imports = True
                        code_sorting = False
                        code_sorting_section = ""
                        code_sorting_indent = ""
                    else:
                        code_sorting_section += line
                        line = ""
                elif stripped_line in config.section_comments:
                    if import_section and not contains_imports:
                        output_stream.write(import_section)
                        import_section = line
                        not_imports = False
                    else:
                        import_section += line
                    indent = line[: -len(line.lstrip())]
                elif not (stripped_line or contains_imports):
                    not_imports = True
                elif (
                    not stripped_line
                    or stripped_line.startswith("#")
                    and (not indent or indent + line.lstrip() == line)
                    and not config.treat_all_comments_as_code
                    and stripped_line not in config.treat_comments_as_code
                ):
                    import_section += line
                elif stripped_line.startswith(IMPORT_START_IDENTIFIERS):
                    new_indent = line[: -len(line.lstrip())]
                    import_statement = line
                    stripped_line = line.strip().split("#")[0]
                    while stripped_line.endswith("\\") or (
                        "(" in stripped_line and ")" not in stripped_line
                    ):
                        if stripped_line.endswith("\\"):
                            while stripped_line and stripped_line.endswith("\\"):
                                line = input_stream.readline()
                                stripped_line = line.strip().split("#")[0]
                                import_statement += line
                        else:
                            while ")" not in stripped_line:
                                line = input_stream.readline()
                                stripped_line = line.strip().split("#")[0]
                                import_statement += line

                    if (
                        import_statement.lstrip().startswith("from")
                        and "import" not in import_statement
                    ):
                        line = import_statement
                        not_imports = True
                    else:
                        did_contain_imports = contains_imports
                        contains_imports = True

                        cimport_statement: bool = False
                        if (
                            import_statement.lstrip().startswith(CIMPORT_IDENTIFIERS)
                            or " cimport " in import_statement
                            or " cimport*" in import_statement
                            or " cimport(" in import_statement
                            or ".cimport" in import_statement
                        ):
                            cimport_statement = True

                        if cimport_statement != cimports or (
                            new_indent != indent
                            and import_section
                            and (not did_contain_imports or len(new_indent) < len(indent))
                        ):
                            indent = new_indent
                            if import_section:
                                next_cimports = cimport_statement
                                next_import_section = import_statement
                                import_statement = ""
                                not_imports = True
                                line = ""
                            else:
                                cimports = cimport_statement
                        else:
                            if new_indent != indent:
                                if import_section and did_contain_imports:
                                    import_statement = indent + import_statement.lstrip()
                                else:
                                    indent = new_indent
                        import_section += import_statement
                else:
                    not_imports = True

        if not_imports:
            raw_import_section: str = import_section
            if (
                add_imports
                and (stripped_line or end_of_file)
                and not config.append_only
                and not in_top_comment
                and not in_quote
                and not import_section
                and not line.lstrip().startswith(COMMENT_INDICATORS)
            ):
                import_section = line_separator.join(add_imports) + line_separator
                if end_of_file and index != 0:
                    output_stream.write(line_separator)
                contains_imports = True
                add_imports = []

            if next_import_section and not import_section:  # pragma: no cover
                raw_import_section = import_section = next_import_section
                next_import_section = ""

            if import_section:
                if add_imports and not indent:
                    import_section = (
                        line_separator.join(add_imports) + line_separator + import_section
                    )
                    contains_imports = True
                    add_imports = []

                if not indent:
                    import_section += line
                    raw_import_section += line
                if not contains_imports:
                    output_stream.write(import_section)

                else:
                    leading_whitespace = import_section[: -len(import_section.lstrip())]
                    trailing_whitespace = import_section[len(import_section.rstrip()) :]
                    if first_import_section and not import_section.lstrip(
                        line_separator
                    ).startswith(COMMENT_INDICATORS):
                        import_section = import_section.lstrip(line_separator)
                        raw_import_section = raw_import_section.lstrip(line_separator)
                        first_import_section = False

                    if indent:
                        import_section = "".join(
                            line[len(indent) :] for line in import_section.splitlines(keepends=True)
                        )

                    parsed_content = parse.file_contents(import_section, config=config)
                    verbose_output += parsed_content.verbose_output

                    sorted_import_section = output.sorted_imports(
                        parsed_content,
                        _indented_config(config, indent),
                        extension,
                        import_type="cimport" if cimports else "import",
                    )
                    if not (import_section.strip() and not sorted_import_section):
                        if indent:
                            sorted_import_section = (
                                leading_whitespace
                                + textwrap.indent(sorted_import_section, indent).strip()
                                + trailing_whitespace
                            )

                        made_changes = made_changes or _has_changed(
                            before=raw_import_section,
                            after=sorted_import_section,
                            line_separator=line_separator,
                            ignore_whitespace=config.ignore_whitespace,
                        )
                        output_stream.write(sorted_import_section)
                        if not line and not indent and next_import_section:
                            output_stream.write(line_separator)

                if indent:
                    output_stream.write(line)
                    if not next_import_section:
                        indent = ""

                if next_import_section:
                    cimports = next_cimports
                    contains_imports = True
                else:
                    contains_imports = False
                import_section = next_import_section
                next_import_section = ""
            else:
                output_stream.write(line)
                not_imports = False

            if stripped_line and not in_quote and not import_section and not next_import_section:
                if stripped_line == "yield":
                    while not stripped_line or stripped_line == "yield":
                        new_line = input_stream.readline()
                        if not new_line:
                            break

                        output_stream.write(new_line)
                        stripped_line = new_line.strip().split("#")[0]

                if stripped_line.startswith("raise") or stripped_line.startswith("yield"):
                    while stripped_line.endswith("\\"):
                        new_line = input_stream.readline()
                        if not new_line:
                            break

                        output_stream.write(new_line)
                        stripped_line = new_line.strip().split("#")[0]

    if made_changes and config.only_modified:
        for output_str in verbose_output:
            print(output_str)

    return made_changes
Exemplo n.º 30
0
def printHeader(destinationFile: TextIO, baseFile: str):
    destinationFile.write(f'''/**
 * Declaration of Project variables.
 * NOTE: Auto-generated from {baseFile}
 */
''')
Exemplo n.º 31
0
def main(
    assignment_dir: str,
    logs_dir: str,
    template: str,
    outfile: TextIO,
    contam_warning: float,
    unmapped_warning: float,
):
    """This script generates a HTML file with a table containing information about the
    composition and lineage of each sample.
    """
    contam_warning /= 100
    unmapped_warning /= 100

    def highlight_high_contam(col: pd.Series):
        """Highlights cells in a column if their contamination level is over the
        threshold
        """
        return [
            f"background-color: {NORD12}" if val > contam_warning else ""
            for val in col
        ]

    def highlight_high_unmapped(col: pd.Series):
        """Highlights cells if their unmapped level is over the threshold
        """
        return [
            f"background-color: {NORD12}" if val > unmapped_warning else ""
            for val in col
        ]

    def highlight_abnormal_lineages(col: pd.Series):
        """Highlights cells if their lineage is not one of the numbered majors.
        """
        return [
            f"background-color: {NORD12}" if v.isalpha() else "" for v in col
        ]

    data = defaultdict(dict)
    logfiles = Path(logs_dir).rglob("*.log")
    for file in logfiles:
        sample = file.name.split(".")[0]
        tech = file.name.split(".")[1]
        num_keep, num_contam, num_unmapped = ripgrep_search(file)
        total = sum([num_keep, num_contam, num_unmapped])
        data[sample].update({
            f"{tech}_keep": num_keep,
            f"{tech}_keep%": num_keep / total,
            f"{tech}_contam": num_contam,
            f"{tech}_contam%": num_contam / total,
            f"{tech}_unmapped": num_unmapped,
            f"{tech}_unmapped%": num_unmapped / total,
            f"{tech}_total": total,
        })

    assignment_files = Path(assignment_dir).rglob("*.csv")
    for file in assignment_files:
        fields = file.read_text().split("\n")[1].split(",")
        sample = file.name.split(".")[0]
        data[sample]["major_lineage"] = fields[1]
        data[sample]["full_lineage"] = fields[2]
        data[sample]["found_lineages"] = " ".join(fields[3].split(";"))

    df = pd.DataFrame(data).T
    df.index.name = "sample"

    percent_format_cols = [
        s for s in data[list(data.keys())[0]].keys() if s.endswith("%")
    ]
    df_styled = (df.style.apply(
        highlight_high_contam,
        subset=["illumina_contam%", "nanopore_contam%"
                ]).apply(highlight_abnormal_lineages, subset=[
                    "major_lineage"
                ]).apply(highlight_high_unmapped,
                         subset=["nanopore_unmapped%", "illumina_unmapped%"
                                 ]).format("{:.2%}",
                                           subset=percent_format_cols))

    table_html = df_styled.render()
    template_content = Path(template).read_text()
    html = jinja2.Template(template_content).render(
        table=table_html,
        contam_warning=contam_warning,
        unmapped_warning=unmapped_warning,
    )
    outfile.write(html)
    outfile.close()
Exemplo n.º 32
0
def fail(msg: str, stderr: TextIO, options: Options) -> None:
    """Fail with a serious error."""
    stderr.write('%s\n' % msg)
    maybe_write_junit_xml(0.0, serious=True, messages=[msg], options=options)
    sys.exit(2)
Exemplo n.º 33
0
 def write_prediction(self, prediction, batch, output: TextIO):
     batch_tokens = self.spans_to_tokens(prediction, batch)
     for tokens in batch_tokens:
         output.write(' '.join(tokens))
         output.write('\n')
Exemplo n.º 34
0
 def _save_file_handle(self, f: typing.TextIO) -> None:
     f.write(self.__str__())
     f.truncate()
def printHeader(destinationFile: TextIO, baseFile: str):
    destinationFile.write(f'''/**
 * Mapping for all known objects.
 * NOTE: Auto-generated from {baseFile}
 */\n
''')
Exemplo n.º 36
0
 def render(self, out: TextIO) -> None:
     out.write("%s -> %s %s\n" % (self.fro, self.to, self.rest))
Exemplo n.º 37
0
def write_ninja_rnd(handle: TextIO, toolchain: Toolchain, otbn_dir: str,
                    count: int, start_seed: int, size: int) -> None:
    '''Write a build.ninja to build random binaries.

    The rules build everything in the same directory as the build.ninja file.
    OTBN tooling is found through the toolchain argument.

    '''
    assert count > 0
    assert start_seed >= 0
    assert size > 0

    otbn_rig = os.path.join(otbn_dir, 'dv/rig/otbn-rig')

    handle.write(
        'rule rig-gen\n'
        '  command = {rig} gen --size {size} --seed $seed -o $out\n\n'.format(
            rig=otbn_rig, size=size))

    handle.write('rule rig-asm\n'
                 '  command = {rig} asm -o $seed $in\n\n'.format(rig=otbn_rig))

    handle.write(
        'rule as\n'
        '  command = RV32_TOOL_AS={rv32_as} {otbn_as} -o $out $in\n\n'.format(
            rv32_as=toolchain.rv32_tool_as, otbn_as=toolchain.otbn_as))

    handle.write('rule ld\n'
                 '  command = RV32_TOOL_LD={rv32_ld} '
                 '{otbn_ld} -o $out -T $ldscript $in\n'.format(
                     rv32_ld=toolchain.rv32_tool_ld,
                     otbn_ld=toolchain.otbn_ld))

    for seed in range(start_seed, start_seed + count):
        # Generate the .s and .ld files.
        handle.write('build {seed}.json: rig-gen\n'
                     '  seed = {seed}\n'.format(seed=seed))

        handle.write('build {seed}.s {seed}.ld: rig-asm {seed}.json\n'
                     '  seed = {seed}\n'.format(seed=seed))

        # Assemble the asm file to an object
        handle.write('build {seed}.o: as {seed}.s\n'.format(seed=seed))

        # Link the object to an ELF, using the relevant LD file
        handle.write('build {seed}.elf: ld {seed}.o\n'
                     '  ldscript = {seed}.ld\n\n'.format(seed=seed))
Exemplo n.º 38
0
 def render(self, out: TextIO) -> None:
     out.write("subgraph %s {\n" % self.name)
     out.write("rank = same\n")
Exemplo n.º 39
0
 def export(self, sounds: Iterable['Sound'], file: TextIO):
     """Write SoundScripts to a file.
     
     Pass a file-like object open for text writing, and an iterable
     of Sounds to write to the file.
     """
     for snd in sounds:
         file.write('"{}"\n\t{{\n'.format(snd.name))
         
         file.write('\t' 'channel {}\n'.format(snd.channel.value))
         
         file.write('\t' 'soundlevel {}\n'.format(join_float(snd.level)))
         
         if snd.volume != (1, 1):
             file.write('\tvolume {}\n'.format(join_float(snd.volume)))
         if snd.pitch != (100, 100):
             file.write('\tpitch {}\n'.format(join_float(snd.pitch)))
         
         if len(snd.sounds) > 1:
             file.write('\trandwav\n\t\t{\n')
             for wav in snd.sounds:
                 file.write('\t\twave "{}"\n'.format(wav))
             file.write('\t\t}\n')
         else:
             file.write('\twave "{}"\n'.format(snd.sounds[0]))
         
         if snd.stack_start or snd.stack_stop or snd.stack_update:
             file.write(
                 '\t' 'soundentry_version 2\n'
                 '\t' 'operator_stacks\n'
                 '\t\t' '{\n'
             )
             if snd.stack_start:
                 file.write(
                     '\t\t' 'start_stack\n'
                     '\t\t\t' '{\n'
                 )
                 for prop in snd.stack_start:
                     for line in prop.export():
                         file.write('\t\t\t' + line)
                 file.write('\t\t\t}\n')
             if snd.stack_update:
                 file.write(
                     '\t\t' 'update_stack\n'
                     '\t\t\t' '{\n'
                 )
                 for prop in snd.stack_update:
                     for line in prop.export():
                         file.write('\t\t\t' + line)
                 file.write('\t\t\t}\n')
             if snd.stack_stop:
                 file.write(
                     '\t\t' 'stop_stack\n'
                     '\t\t\t' '{\n'
                 )
                 for prop in snd.stack_stop:
                     for line in prop.export():
                         file.write('\t\t\t' + line)
                 file.write('\t\t\t}\n')
             file.write('\t\t}\n')
         file.write('\t}\n')
Exemplo n.º 40
0
 def render(self, out: TextIO) -> None:
     self.start.render(out)
     for i in self.elements:
         i.render(out)
     out.write("\n")
Exemplo n.º 41
0
def dump_conn(out: TextIO,
              conn: sqlite3.Connection,
              include_schema=False) -> None:
    schema_cur = conn.cursor()
    schema_cur.execute(
        "select name, sql from sqlite_master where type = 'table'")
    for name, sql in cast(Iterator[Tuple[str, str]], schema_cur):
        # Write CREATE TABLE first
        if include_schema:
            out.write(sql)
            out.write(";\n")
        # Write INSERTs
        cur = conn.cursor()
        cur.execute(f"select * from {name}")
        for row in cur:
            out.write("INSERT INTO ")
            out.write(name)
            out.write(" VALUES (")
            for i, col in enumerate(row):
                if i != 0:
                    out.write(", ")
                dump_value(out, col)
            out.write(");\n")
        # Write indexes and triggers
        if include_schema:
            cur.execute("select sql from sqlite_master where tbl_name = ? "
                        "and type in ('index', 'trigger')")
            for (sql, ) in cast(Iterator[Tuple[str]], cur):
                out.write(sql)
                out.write(";\n")
Exemplo n.º 42
0
def dump_conditions(file: TextIO) -> None:
    """Dump docs for all the condition flags, results and metaconditions."""

    LOGGER.info('Dumping conditions...')

    # Delete existing data, after the marker.
    file.seek(0, io.SEEK_SET)

    prelude = []

    for line in file:
        if DOC_MARKER in line:
            break
        prelude.append(line)

    file.seek(0, io.SEEK_SET)
    file.truncate(0)

    if not prelude:
        # No marker, blank the whole thing.
        LOGGER.warning('No intro text before marker!')

    for line in prelude:
        file.write(line)
    file.write(DOC_MARKER + '\n\n')

    file.write(DOC_META_COND)

    ALL_META.sort(key=lambda i: i[1])  # Sort by priority
    for flag_key, priority, func in ALL_META:
        file.write('#### `{}` ({}):\n\n'.format(flag_key, priority))
        dump_func_docs(file, func)
        file.write('\n')

    for lookup, name in [
            (ALL_FLAGS, 'Flags'),
            (ALL_RESULTS, 'Results'),
            ]:
        print('<!------->', file=file)
        print('# ' + name, file=file)
        print('<!------->', file=file)

        lookup_grouped = defaultdict(list)  # type: Dict[str, List[Tuple[str, Tuple[str, ...], Callable]]]

        for flag_key, aliases, func in lookup:
            group = getattr(func, 'group', 'ERROR')
            if group is None:
                group = '00special'
            lookup_grouped[group].append((flag_key, aliases, func))

        # Collapse 1-large groups into Ungrouped.
        for group in list(lookup_grouped):
            if len(lookup_grouped[group]) < 2:
                lookup_grouped[''].extend(lookup_grouped[group])
                del lookup_grouped[group]

        if not lookup_grouped['']:
            del lookup_grouped['']

        for header_ind, (group, funcs) in enumerate(sorted(lookup_grouped.items())):
            if group == '':
                group = 'Ungrouped Conditions'

            if header_ind:
                # Not before the first one...
                print('---------\n', file=file)

            if group == '00special':
                print(DOC_SPECIAL_GROUP, file=file)
            else:
                print('### ' + group + '\n', file=file)

            LOGGER.info('Doing {} group...', group)

            for flag_key, aliases, func in funcs:
                print('#### `{}`:\n'.format(flag_key), file=file)
                if aliases:
                    print('**Aliases:** `' + '`, `'.join(aliases) + '`' + '  \n', file=file)
                dump_func_docs(file, func)
                file.write('\n')
Exemplo n.º 43
0
    def write(self, stream: TextIO) -> None:
        """
        Write style data to file-like object <stream>.

        """
        index = self.index
        stream.write(' %d{\n' % index)
        stream.write('  name="%s\n' % color_name(index))
        stream.write('  localized_name="%s\n' % color_name(index))
        stream.write('  description="%s\n' % self.description)
        stream.write('  color=%d\n' % self._color)
        if self._color != OBJECT_COLOR:
            stream.write('  mode_color=%d\n' % self._mode_color)
        stream.write('  color_policy=%d\n' % self._color_policy)
        stream.write('  physical_pen_number=%d\n' % self.physical_pen_number)
        stream.write('  virtual_pen_number=%d\n' % self.virtual_pen_number)
        stream.write('  screen=%d\n' % self.screen)
        stream.write('  linepattern_size=%s\n' % str(self.linepattern_size))
        stream.write('  linetype=%d\n' % self.linetype)
        stream.write('  adaptive_linetype=%s\n' %
                     str(bool(self.adaptive_linetype)).upper())
        stream.write('  lineweight=%s\n' % str(self.lineweight))
        stream.write('  fill_style=%d\n' % self.fill_style)
        stream.write('  end_style=%d\n' % self.end_style)
        stream.write('  join_style=%d\n' % self.join_style)
        stream.write(' }\n')
Exemplo n.º 44
0
def writeline_nl(fileobj: TextIO, line: str) -> None:
    fileobj.write(line + '\n')
Exemplo n.º 45
0
def write_conll_formatted_tags_to_file(
    prediction_file: TextIO,
    gold_file: TextIO,
    verb_index: Optional[int],
    sentence: List[str],
    conll_formatted_predictions: List[str],
    conll_formatted_gold_labels: List[str],
):
    """
    Prints predicate argument predictions and gold labels for a single verbal
    predicate in a sentence to two provided file references.

    The CoNLL SRL format is described in
    [the shared task data README](https://www.lsi.upc.edu/~srlconll/conll05st-release/README).

    This function expects IOB2-formatted tags, where the B- tag is used in the beginning
    of every chunk (i.e. all chunks start with the B- tag).

    # Parameters

    prediction_file : `TextIO`, required.
        A file reference to print predictions to.
    gold_file : `TextIO`, required.
        A file reference to print gold labels to.
    verb_index : `Optional[int]`, required.
        The index of the verbal predicate in the sentence which
        the gold labels are the arguments for, or None if the sentence
        contains no verbal predicate.
    sentence : `List[str]`, required.
        The word tokens.
    conll_formatted_predictions : `List[str]`, required.
        The predicted CoNLL-formatted labels.
    conll_formatted_gold_labels : `List[str]`, required.
        The gold CoNLL-formatted labels.
    """
    verb_only_sentence = ["-"] * len(sentence)
    if verb_index is not None:
        verb_only_sentence[verb_index] = sentence[verb_index]

    for word, predicted, gold in zip(verb_only_sentence,
                                     conll_formatted_predictions,
                                     conll_formatted_gold_labels):
        prediction_file.write(word.ljust(15))
        prediction_file.write(predicted.rjust(15) + "\n")
        gold_file.write(word.ljust(15))
        gold_file.write(gold.rjust(15) + "\n")
    prediction_file.write("\n")
    gold_file.write("\n")
Exemplo n.º 46
0
def dump_value(out: TextIO, value: Any) -> None:
    if value is None:
        out.write("NULL")
    elif isinstance(value, (int, float)):
        out.write(str(value))
    elif isinstance(value, bytes):
        out.write("X'")
        for byte in value:
            out.write(format(byte, "02X"))
        out.write("'")
    elif isinstance(value, str):
        out.write("'")
        value = re.sub(r"'", "''", value)
        value = re.sub("\0", "'||X'00'||'", value)
        out.write(value)
        out.write("'")
    else:
        raise TypeError(f"unsupported value: {value}")
Exemplo n.º 47
0
def write_language(lang: dict, defs: dict, f: TextIO) -> None:
    language_code: str = lang["languageCode"]
    logging.info(f"Generating block for {language_code}")
    # Iterate over all of the text to build up the symbols & counts
    text_list = get_letter_counts(defs, lang)
    # From the letter counts, need to make a symbol translator & write out the font
    font_table_text, symbol_conversion_table = get_font_map_and_table(
        text_list)

    try:
        lang_name = lang["languageLocalName"]
    except KeyError:
        lang_name = language_code

    f.write(f"\n// ---- {lang_name} ----\n\n")
    f.write(font_table_text)
    f.write(f"\n// ---- {lang_name} ----\n\n")

    str_table: List[str] = []
    str_group_messages: List[TranslationItem] = []
    str_group_messageswarn: List[TranslationItem] = []
    str_group_characters: List[TranslationItem] = []
    str_group_settingdesc: List[TranslationItem] = []
    str_group_settingshortnames: List[TranslationItem] = []
    str_group_settingmenuentries: List[TranslationItem] = []
    str_group_settingmenuentriesdesc: List[TranslationItem] = []

    eid: str

    # ----- Reading SettingsDescriptions
    obj = lang["menuOptions"]

    for index, mod in enumerate(defs["menuOptions"]):
        eid = mod["id"]
        str_group_settingdesc.append(
            TranslationItem(f"[{index:02d}] {eid}", len(str_table)))
        str_table.append(obj[eid]["desc"])

    # ----- Reading Message strings

    obj = lang["messages"]

    for mod in defs["messages"]:
        eid = mod["id"]
        source_text = ""
        if "default" in mod:
            source_text = mod["default"]
        if eid in obj:
            source_text = obj[eid]
        str_group_messages.append(TranslationItem(eid, len(str_table)))
        str_table.append(source_text)

    obj = lang["messagesWarn"]

    for mod in defs["messagesWarn"]:
        eid = mod["id"]
        if isinstance(obj[eid], list):
            if not obj[eid][1]:
                source_text = obj[eid][0]
            else:
                source_text = obj[eid][0] + "\n" + obj[eid][1]
        else:
            source_text = "\n" + obj[eid]
        str_group_messageswarn.append(TranslationItem(eid, len(str_table)))
        str_table.append(source_text)

    # ----- Reading Characters

    obj = lang["characters"]

    for mod in defs["characters"]:
        eid = mod["id"]
        str_group_characters.append(TranslationItem(eid, len(str_table)))
        str_table.append(obj[eid])

    # Write out firmware constant options
    constants = get_constants()
    for x in constants:
        f.write(
            f'const char* {x[0]} = "{convert_string(symbol_conversion_table, x[1])}";//{x[1]} \n'
        )
    f.write("\n")

    # Debug Menu
    f.write("const char* DebugMenu[] = {\n")

    for c in get_debug_menu():
        f.write(f'\t "{convert_string(symbol_conversion_table, c)}",//{c} \n')
    f.write("};\n\n")

    # ----- Reading SettingsDescriptions
    obj = lang["menuOptions"]

    for index, mod in enumerate(defs["menuOptions"]):
        eid = mod["id"]
        if isinstance(obj[eid]["text2"], list):
            if not obj[eid]["text2"][1]:
                source_text = obj[eid]["text2"][0]
            else:
                source_text = obj[eid]["text2"][0] + "\n" + obj[eid]["text2"][1]
        else:
            source_text = "\n" + obj[eid]["text2"]
        str_group_settingshortnames.append(
            TranslationItem(f"[{index:02d}] {eid}", len(str_table)))
        str_table.append(source_text)

    # ----- Reading Menu Groups
    obj = lang["menuGroups"]

    for index, mod in enumerate(defs["menuGroups"]):
        eid = mod["id"]
        if isinstance(obj[eid]["text2"], list):
            if not obj[eid]["text2"][1]:
                source_text = obj[eid]["text2"][0]
            else:
                source_text = obj[eid]["text2"][0] + "\n" + obj[eid]["text2"][1]
        else:
            source_text = "\n" + obj[eid]["text2"]
        str_group_settingmenuentries.append(
            TranslationItem(f"[{index:02d}] {eid}", len(str_table)))
        str_table.append(source_text)

    # ----- Reading Menu Groups Descriptions
    obj = lang["menuGroups"]

    for index, mod in enumerate(defs["menuGroups"]):
        eid = mod["id"]
        str_group_settingmenuentriesdesc.append(
            TranslationItem(f"[{index:02d}] {eid}", len(str_table)))
        str_table.append(obj[eid]["desc"])

    f.write("\n")

    @dataclass
    class RemappedTranslationItem:
        str_index: int
        str_start_offset: int = 0

    # ----- Perform suffix merging optimization:
    #
    # We sort the backward strings so that strings with the same suffix will
    # be next to each other, e.g.:
    #   "ef\0",
    #   "cdef\0",
    #   "abcdef\0",
    backward_sorted_table: List[Tuple[int, str, bytes]] = sorted(
        ((i, s,
          bytes(reversed(convert_string_bytes(symbol_conversion_table, s))))
         for i, s in enumerate(str_table)),
        key=lambda x: x[2],
    )
    str_remapping: List[
        Optional[RemappedTranslationItem]] = [None] * len(str_table)
    for i, (str_index, source_str,
            converted) in enumerate(backward_sorted_table[:-1]):
        j = i
        while backward_sorted_table[j + 1][2].startswith(converted):
            j += 1
        if j != i:
            str_remapping[str_index] = RemappedTranslationItem(
                str_index=backward_sorted_table[j][0],
                str_start_offset=len(backward_sorted_table[j][2]) -
                len(converted),
            )

    # ----- Write the string table:
    str_offsets = [-1] * len(str_table)
    offset = 0
    write_null = False
    f.write("const char TranslationStringsData[] = {\n")
    for i, source_str in enumerate(str_table):
        if write_null:
            f.write(' "\\0"\n')
        write_null = True
        if str_remapping[i] is not None:
            write_null = False
            continue
        # Find what items use this string
        str_used_by = [i] + [
            j for j, r in enumerate(str_remapping) if r and r.str_index == i
        ]
        for j in str_used_by:
            for group, pre_info in [
                (str_group_messages, "messages"),
                (str_group_messageswarn, "messagesWarn"),
                (str_group_characters, "characters"),
                (str_group_settingdesc, "SettingsDescriptions"),
                (str_group_settingshortnames, "SettingsShortNames"),
                (str_group_settingmenuentries, "SettingsMenuEntries"),
                (str_group_settingmenuentriesdesc,
                 "SettingsMenuEntriesDescriptions"),
            ]:
                for item in group:
                    if item.str_index == j:
                        f.write(f"  //     - {pre_info} {item.info}\n")
            if j == i:
                f.write(f"  // {offset: >4}: {escape(source_str)}\n")
                str_offsets[j] = offset
            else:
                remapped = str_remapping[j]
                assert remapped is not None
                f.write(
                    f"  // {offset + remapped.str_start_offset: >4}: {escape(str_table[j])}\n"
                )
                str_offsets[j] = offset + remapped.str_start_offset
        converted_str = convert_string(symbol_conversion_table, source_str)
        f.write(f'  "{converted_str}"')
        str_offsets[i] = offset
        # Sanity check: Each "char" in `converted_str` should be in format
        # `\xFF`, so the length should be divisible by 4.
        assert len(converted_str) % 4 == 0
        # Add the length and the null terminator
        offset += len(converted_str) // 4 + 1
    f.write("\n};\n\n")

    def get_offset(idx: int) -> int:
        assert str_offsets[idx] >= 0
        return str_offsets[idx]

    f.write("const TranslationIndexTable TranslationIndices = {\n")

    # ----- Write the messages string indices:
    for group in [
            str_group_messages, str_group_messageswarn, str_group_characters
    ]:
        for item in group:
            f.write(
                f"  .{item.info} = {get_offset(item.str_index)}, // {escape(str_table[item.str_index])}\n"
            )
        f.write("\n")

    # ----- Write the settings index tables:
    for group, name in [
        (str_group_settingdesc, "SettingsDescriptions"),
        (str_group_settingshortnames, "SettingsShortNames"),
        (str_group_settingmenuentries, "SettingsMenuEntries"),
        (str_group_settingmenuentriesdesc, "SettingsMenuEntriesDescriptions"),
    ]:
        max_len = 30
        f.write(f"  .{name} = {{\n")
        for item in group:
            f.write(
                f"    /* {item.info.ljust(max_len)[:max_len]} */ {get_offset(item.str_index)}, // {escape(str_table[item.str_index])}\n"
            )
        f.write(f"  }}, // {name}\n\n")

    f.write("}; // TranslationIndices\n\n")
    f.write("const TranslationIndexTable *const Tr = &TranslationIndices;\n")
    f.write(
        "const char *const TranslationStrings = TranslationStringsData;\n\n")

    f.write(
        f"const bool HasFahrenheit = {('true' if lang.get('tempUnitFahrenheit', True) else 'false')};\n"
    )

    f.write("\n// Verify SettingsItemIndex values:\n")
    for i, mod in enumerate(defs["menuOptions"]):
        eid = mod["id"]
        f.write(
            f"static_assert(static_cast<uint8_t>(SettingsItemIndex::{eid}) == {i});\n"
        )
    f.write(
        f"static_assert(static_cast<uint8_t>(SettingsItemIndex::NUM_ITEMS) == {len(defs['menuOptions'])});\n"
    )
Exemplo n.º 48
0
def write_to_conll_2012_eval_file(prediction_file: TextIO,
                             gold_file: TextIO,
                             pred_index: Optional[int],
                             sentence: List[str],
                             prediction: List[str],
                             gold_labels: List[str]):
    """
    Prints predicate argument predictions and gold labels for a single 
    predicate in a sentence to two provided file references.

    Parameters
    ----------
    prediction_file : TextIO, required.
        A file reference to print predictions to.
    gold_file : TextIO, required.
        A file reference to print gold labels to.
    pred_index : Optional[int], required.
        The index of the predicate in the sentence which
        the gold labels are the arguments for, or None if the sentence
        contains no predicate.
    sentence : List[str], required.
        The word tokens.
    prediction : List[str], required.
        The predicted BIO labels.
    gold_labels : List[str], required.
        The gold BIO labels.
    """
    pred_only_sentence = ["-"] * len(sentence)
    if pred_index:
        pred_only_sentence[pred_index] = sentence[pred_index]

    conll_format_predictions = convert_bio_tags_to_conll_2012_format(prediction)
    conll_format_gold_labels = convert_bio_tags_to_conll_2012_format(gold_labels)

    for word, predicted, gold in zip(pred_only_sentence,
                                     conll_format_predictions,
                                     conll_format_gold_labels):
        prediction_file.write(word.ljust(15))
        prediction_file.write(predicted.rjust(15) + "\n")
        gold_file.write(word.ljust(15))
        gold_file.write(gold.rjust(15) + "\n")
    prediction_file.write("\n")
    gold_file.write("\n")
Exemplo n.º 49
0
def score_and_write_batch(model: keras.Model,
                          file_out: TextIO,
                          batch_size: int,
                          python_batch_size: int,
                          tensor_type: str,
                          annotation_set: str,
                          window_size: int,
                          read_limit: int,
                          tensor_dir: str = '') -> None:
    """Score a batch of variants with a CNN model. Write tab delimited temp file with scores.

    This function is tightly coupled with the CNNScoreVariants.java
    It requires data written to the fifo in the order given by transferToPythonViaFifo

    Arguments
        model: a keras model
        file_out: The temporary VCF-like file where variants scores will be written
        batch_size: The total number of variants available in the fifo
        python_batch_size: the number of variants to process in each inference
        tensor_type: The name for the type of tensor to make
        annotation_set: The name for the set of annotations to use
        window_size: The size of the context window of genomic bases, i.e the width of the tensor
        read_limit: The maximum number of reads to encode in a tensor, i.e. the height of the tensor
        tensor_dir : If this path exists write hd5 files for each tensor (optional for debugging)
    """
    annotation_batch = []
    reference_batch = []
    variant_types = []
    variant_data = []
    read_batch = []
    for _ in range(batch_size):
        fifo_line = tool.readDataFIFO()
        fifo_data = fifo_line.split(defines.DATA_TYPE_SEPARATOR)

        variant_data.append(fifo_data[CONTIG_FIFO_INDEX] + defines.DATA_TYPE_SEPARATOR
                            + fifo_data[POS_FIFO_INDEX] + defines.DATA_TYPE_SEPARATOR
                            + fifo_data[REF_FIFO_INDEX] + defines.DATA_TYPE_SEPARATOR + fifo_data[ALT_FIFO_INDEX])
        reference_batch.append(reference_string_to_tensor(fifo_data[REF_STRING_FIFO_INDEX]))
        annotation_batch.append(annotation_string_to_tensor(annotation_set, fifo_data[ANNOTATION_FIFO_INDEX]))
        variant_types.append(fifo_data[VARIANT_TYPE_FIFO_INDEX].strip())

        fifo_idx = VARIANT_FIFO_FIELDS
        if tensor_type in defines.TENSOR_MAPS_2D and len(fifo_data) > fifo_idx:
            read_tuples = []
            var = Variant(fifo_data[CONTIG_FIFO_INDEX], int(fifo_data[POS_FIFO_INDEX]), fifo_data[POS_FIFO_INDEX],
                          fifo_data[ALT_FIFO_INDEX], fifo_data[VARIANT_TYPE_FIFO_INDEX])
            while fifo_idx+READ_ELEMENTS <= len(fifo_data):
                read_tuples.append(
                    Read(fifo_data[fifo_idx + READ_BASES_FIFO_INDEX],
                         list(map(int, fifo_data[fifo_idx+READ_QUAL_FIFO_INDEX].split(defines.DATA_VALUE_SEPARATOR))),
                         fifo_data[fifo_idx+READ_CIGAR_FIFO_INDEX],
                         bool_from_java(fifo_data[fifo_idx+READ_REVERSE_FIFO_INDEX]),
                         bool_from_java(fifo_data[fifo_idx+READ_MATE_REVERSE_FIFO_INDEX]),
                         bool_from_java(fifo_data[fifo_idx+READ_FIRST_IN_PAIR_FIFO_INDEX]),
                         int(fifo_data[fifo_idx+READ_MQ_FIFO_INDEX]),
                         int(fifo_data[fifo_idx+READ_REF_START_FIFO_INDEX])))
                fifo_idx += READ_ELEMENTS
            _, ref_start, _ = get_variant_window(window_size, var)
            insert_dict = get_inserts(read_tuples, var, window_size)
            tensor = read_tuples_to_tensor(read_tuples, ref_start, insert_dict, tensor_type, window_size, read_limit)
            reference_sequence_into_tensor(fifo_data[4], tensor, insert_dict, window_size, read_limit)
            if os.path.exists(tensor_dir):
                _write_tensor_to_hd5(tensor, annotation_batch[-1], fifo_data[0], fifo_data[1], fifo_data[6],
                                     tensor_type, annotation_set, tensor_dir)
            read_batch.append(tensor)

    if tensor_type in defines.TENSOR_MAPS_1D:
        predictions = model.predict([np.array(reference_batch), np.array(annotation_batch)],
                                    batch_size=python_batch_size)
    elif tensor_type in defines.TENSOR_MAPS_2D:
        predictions = model.predict(
            [np.array(read_batch), np.array(annotation_batch)], batch_size=python_batch_size)
    else:
        raise ValueError('Unknown tensor mapping.  Check architecture file.', tensor_type)

    indel_scores = predictions_to_indel_scores(predictions)
    snp_scores = predictions_to_snp_scores(predictions)

    for i in range(batch_size):
        if 'SNP' == variant_types[i]:
            file_out.write(variant_data[i] + defines.DATA_TYPE_SEPARATOR + '{0:.3f}'.format(snp_scores[i]) + '\n')
        elif 'INDEL' == variant_types[i]:
            file_out.write(variant_data[i] + defines.DATA_TYPE_SEPARATOR + '{0:.3f}'.format(indel_scores[i]) + '\n')
        else:
            file_out.write(variant_data[i] + defines.DATA_TYPE_SEPARATOR
                           + '{0:.3f}'.format(max(snp_scores[i], indel_scores[i])) + '\n')
Exemplo n.º 50
0
def generate(targets: List[Target], out: TextIO) -> None:
    uses_pkg_config = any(isinstance(t, Pkg) for t in targets)

    out.write(LISTS_PROLOGUE.format(**locals()))
    out.write(INSTALL_TARGETS)
    if uses_pkg_config:
        out.write('include(FindPkgConfig)\n')

    bde_targets = []
    for target in reversed(targets):
        if isinstance(target, Group) or isinstance(target, Package):
            generate_bde(target, out)
            if len(list(target.drivers())):
                bde_targets.append(target)
        elif isinstance(target, CMake):
            path = target.path()
            out.write('add_subdirectory({path} {target.name})\n'.format(
                                            **locals()).replace('\\', '/'))
        elif isinstance(target, Pkg):
            generate_pkg(target, out)

        if target.overrides:
            out.write(f'include({target.overrides})\n'.replace('\\', '/'))

    if bde_targets:
        out.write(ALL_TESTS_PROLOGUE)
        for target in bde_targets:
            out.write(f'    {target.name}.t\n')
        out.write(COMMAND_EPILOGUE)
Exemplo n.º 51
0
def generate_pkg(target: Pkg, out: TextIO) -> None:
    name    = target.name
    package = target.package
    out.write(PKG_CONFIG.format(**locals()))
Exemplo n.º 52
0
def generate_bde(target: BdeTarget, out: TextIO) -> None:
    out.write(LIBRARY_PROLOGUE.format(**locals()))
    for component in target.sources():
        out.write('    {}\n'.format(component).replace('\\', '/'))
    out.write(COMMAND_EPILOGUE)

    target_upper = target.name.upper()
    out.write(DEFINE_SYMBOL.format(**locals()))

    out.write(INCLUDE_DIRECTORIES_PROLOGUE.format(**locals()))
    for include in target.includes():
        out.write('    {}\n'.format(include).replace('\\', '/'))
    out.write(COMMAND_EPILOGUE)

    out.write(LINK_LIBRARIES_PROLOGUE.format(**locals()))
    for dependency in target.dependencies():
        if dependency.has_output:
            out.write('    {}\n'.format(dependency.name))
    out.write(COMMAND_EPILOGUE)

    if target.lazily_bound:
        out.write(LAZILY_BOUND_FLAG.format(**locals()))

    drivers = []
    for driver in target.drivers():
        name = os.path.splitext(os.path.basename(driver))[0]
        out.write(TESTING_DRIVER.format(**locals()).replace('\\', '/'))
        drivers.append(name)

    if drivers:
        out.write(TEST_TARGET_PROLOGUE.format(**locals()))
        for driver in drivers:
            out.write('    {}\n'.format(driver))
        out.write(COMMAND_EPILOGUE)

    out.write(INSTALL_HEADERS_PROLOGUE)
    for header in target.headers():
        out.write('    {}\n'.format(header).replace('\\', '/'))
    out.write(INSTALL_HEADERS_DESTINATION)
    out.write(COMMAND_EPILOGUE)

    out.write(INSTALL_LIBRARY.format(**locals()))
Exemplo n.º 53
0
    def _format(self, object: object, stream: TextIO, indent: int,
                allowance: int, context: Dict[int, int], level: int) -> None:
        level = level + 1
        objid = _id(object)
        if objid in context:
            stream.write(_recursion(object))
            self._recursive = True
            self._readable = False
            return
        rep = self._repr(object, context, level - 1)
        typ = _type(object)
        sepLines = _len(rep) > (self._width - 1 - indent - allowance)
        write = stream.write

        if self._depth and level > self._depth:
            write(rep)
            return

        if sepLines:
            r = getattr(typ, "__repr__", None)
            if issubclass(typ, dict):
                dictobj = cast(dict, object)
                write('{')
                if self._indent_per_level > 1:
                    write((self._indent_per_level - 1) * ' ')
                length = _len(dictobj)
                if length:
                    context[objid] = 1
                    indent = indent + self._indent_per_level
                    if issubclass(typ, _OrderedDict):
                        items = list(dictobj.items())
                    else:
                        items = sorted(dictobj.items(), key=_safe_tuple)
                    key, ent = items[0]
                    rep = self._repr(key, context, level)
                    write(rep)
                    write(': ')
                    self._format(ent, stream, indent + _len(rep) + 2,
                                  allowance + 1, context, level)
                    if length > 1:
                        for key, ent in items[1:]:
                            rep = self._repr(key, context, level)
                            write(',\n%s%s: ' % (' '*indent, rep))
                            self._format(ent, stream, indent + _len(rep) + 2,
                                          allowance + 1, context, level)
                    indent = indent - self._indent_per_level
                    del context[objid]
                write('}')
                return

            if ((issubclass(typ, list) and r is list.__repr__) or
                (issubclass(typ, tuple) and r is tuple.__repr__) or
                (issubclass(typ, set) and r is set.__repr__) or
                (issubclass(typ, frozenset) and r is frozenset.__repr__)
               ):
                anyobj = Any(object) # TODO Collection?
                length = _len(anyobj)
                if issubclass(typ, list):
                    write('[')
                    endchar = ']'
                    lst = anyobj
                elif issubclass(typ, set):
                    if not length:
                        write('set()')
                        return
                    write('{')
                    endchar = '}'
                    lst = sorted(anyobj, key=_safe_key)
                elif issubclass(typ, frozenset):
                    if not length:
                        write('frozenset()')
                        return
                    write('frozenset({')
                    endchar = '})'
                    lst = sorted(anyobj, key=_safe_key)
                    indent += 10
                else:
                    write('(')
                    endchar = ')'
                    lst = list(anyobj)
                if self._indent_per_level > 1:
                    write((self._indent_per_level - 1) * ' ')
                if length:
                    context[objid] = 1
                    indent = indent + self._indent_per_level
                    self._format(lst[0], stream, indent, allowance + 1,
                                 context, level)
                    if length > 1:
                        for ent in lst[1:]:
                            write(',\n' + ' '*indent)
                            self._format(ent, stream, indent,
                                          allowance + 1, context, level)
                    indent = indent - self._indent_per_level
                    del context[objid]
                if issubclass(typ, tuple) and length == 1:
                    write(',')
                write(endchar)
                return

        write(rep)
Exemplo n.º 54
-1
def output_file(out: TextIO, fin: TextIO, keep_license: bool) -> None:
	skip = LICENSE_LINES
	if keep_license: skip = 0
	while True:
		line = fin.readline()
		if not line:
			break
		if skip:
			skip -= 1
			continue
		out.write(line)