Ejemplo n.º 1
43
def write_json_log(jsonlogfile: typing.TextIO, test_name: str, result: TestRun) -> None:
    jresult = {'name': test_name,
               'stdout': result.stdo,
               'result': result.res.value,
               'duration': result.duration,
               'returncode': result.returncode,
               'env': result.env,
               'command': result.cmd}  # type: typing.Dict[str, typing.Any]
    if result.stde:
        jresult['stderr'] = result.stde
    jsonlogfile.write(json.dumps(jresult) + '\n')
Ejemplo n.º 2
3
 def _update_params(infile: Iterable[str], outfile: TextIO):
     startday_pattern = ' start time (days)= '
     stopday_pattern = ' stop time (days) = '
     for line in infile:
         if line.startswith(startday_pattern):
             line = '%s%f\n' % (startday_pattern, from_day)
         if line.startswith(stopday_pattern):
             line = '%s%f\n' % (stopday_pattern, to_day)
         outfile.write(line)
    def from_pty(cls, stdout: TextIO, term: Optional[str] = None) -> 'Vt100_Output':
        """
        Create an Output class from a pseudo terminal.
        (This will take the dimensions by reading the pseudo
        terminal attributes.)
        """
        # Normally, this requires a real TTY device, but people instantiate
        # this class often during unit tests as well. For convenience, we print
        # an error message, use standard dimensions, and go on.
        isatty = stdout.isatty()
        fd = stdout.fileno()

        if not isatty and fd not in cls._fds_not_a_terminal:
            msg = 'Warning: Output is not to a terminal (fd=%r).\n'
            sys.stderr.write(msg % fd)
            cls._fds_not_a_terminal.add(fd)

        def get_size() -> Size:
            # If terminal (incorrectly) reports its size as 0, pick a
            # reasonable default.  See
            # https://github.com/ipython/ipython/issues/10071
            rows, columns = (None, None)

            if isatty:
                rows, columns = _get_size(stdout.fileno())
            return Size(rows=rows or 24, columns=columns or 80)

        return cls(stdout, get_size, term=term)
Ejemplo n.º 4
0
Archivo: __init__.py Proyecto: gwk/pat
def pat_dependency(src_path: str, src_file: TextIO) -> str:
  '''
  Return a list of dependencies.
  A .pat file always has a single dependency: the source file it patches.
  '''
  version_line = src_file.readline()
  orig_line = src_file.readline()
  orig_path = orig_line.strip()
  if not orig_path:
    failF('pat error: {}:2:1: line specifying original path is missing or empty.', src_path)
  return orig_path
Ejemplo n.º 5
0
    def save(self, *, config_fd: TextIO = None, encode: bool = False) -> None:
        with io.StringIO() as config_buffer:
            self.parser.write(config_buffer)
            config = config_buffer.getvalue()
            if encode:
                # Encode config using base64
                config = base64.b64encode(
                    config.encode(sys.getfilesystemencoding())
                ).decode(sys.getfilesystemencoding())

            if config_fd:
                config_fd.write(config)
            else:
                with open(self.save_path(), "w") as f:
                    f.write(config)
Ejemplo n.º 6
0
    def load(self, *, config_fd: TextIO = None) -> None:
        config = ""
        if config_fd:
            config = config_fd.read()
        else:
            # Local configurations (per project) are supposed to be static.
            # That's why it's only checked for 'loading' and never written to.
            # Essentially, all authentication-related changes, like
            # login/logout or macaroon-refresh, will not be persisted for the
            # next runs.
            file_path = ""
            if os.path.exists(LOCAL_CONFIG_FILENAME):
                file_path = LOCAL_CONFIG_FILENAME

                # FIXME: We don't know this for sure when loading the config.
                # Need a better separation of concerns.
                logger.warn(
                    "Using local configuration ({!r}), changes will not be "
                    "persisted.".format(file_path)
                )
            else:
                file_path = BaseDirectory.load_first_config(
                    "snapcraft", "snapcraft.cfg"
                )
            if file_path and os.path.exists(file_path):
                with open(file_path, "r") as f:
                    config = f.read()

        if config:
            _load_potentially_base64_config(self.parser, config)
Ejemplo n.º 7
0
def dump_info(file: TextIO) -> None:
    """Create the wiki page for item options, given a file to write to."""
    print(DOC_HEADER, file=file)
    
    for opt in DEFAULTS:
        if opt.default is None:
            default = ''
        elif type(opt.default) is Vec:
            default = '(`' + opt.default.join(' ') + '`)'
        else:
            default = ' = `' + repr(opt.default) + '`'
        file.write(INFO_DUMP_FORMAT.format(
            id=opt.name, 
            default=default,
            type=TYPE_NAMES[opt.type],
            desc='\n'.join(opt.doc),
        ))
Ejemplo n.º 8
0
    def __init__(self, f: TextIO):
        """
        Create a new `PushbackFile` object to wrap a file-like object.

        **Parameters**

        - `f` (file-like object): A file-like object that contains both a
          `write()` method and a `flush()` method.
        """
        self.__buf = [c for c in ''.join(f.readlines())]
Ejemplo n.º 9
0
 def _generate_template_to_writer(self, base: pathlib.Path,
                                  source: pathlib.Path,
                                  writer: TextIO, **extra_variables):
     try:
         template = self.env.get_template(str(source))
     except jinja2.TemplateNotFound as e:
         raise GeneratorError("Template {} not found (search path {})"
                              .format(source,
                                      self._format_search_path())
                              ) from e
     now = datetime.datetime.now(datetime.timezone.utc).astimezone()
     comment = ("Generated on {} from {} by {}"
                .format(now.strftime(self.DATETIME_FORMAT), source,
                        self._current_user))
     relative_source = source.relative_to(base)
     stream = template.stream(**self.config, **extra_variables,
                              comment=comment,
                              source_base=base,
                              source=relative_source,
                              source_dir=relative_source.parent)
     writer.writelines(stream)
Ejemplo n.º 10
0
    def _process_includes(self,
                          file_in: TextIO,
                          filename: str,
                          file_out: TextIO) -> None:
        log.debug(f'Processing includes in "{filename}"')

        for line in file_in:
            match = self._include_pattern.search(line)
            if match:
                if self._nested >= self._maxnest:
                    raise MaxNestingExceededError(
                        f'Exceeded maximum include depth of {self._maxnest}'
                    )

                inc_name = match.group(1)
                log.debug(f'Found include directive: {line[:-1]}')
                f, included_name = self._open(inc_name, filename)
                self._nested += 1
                self._process_includes(f, filename, file_out)
                self._nested -= 1
            else:
                file_out.write(line)
Ejemplo n.º 11
0
    def _search(self, f: TextIO, filename: Optional[str] = None) -> bool:
        paragraph = []
        last_empty = False
        found = False
        eop_line = None

        def print_paragraph(paragraph: Sequence[str]) -> NoReturn:
            if self._print_file_header:
                print(f'::::::::::\n{filename}\n::::::::::\n')
                self._print_file_header = False
            print('\n'.join(paragraph))
            if self.print_eop and (eop_line is not None):
                print(eop_line)
            else:
                print()

        for line in f.readlines():
            if self.eop_regexp.match(line):
                # End of current paragraph, or a redundent (consecutive)
                # end-of-paragraph mark.  If it's truly the first one since
                # the end of the paragraph, search the accumulated lines of
                # the paragraph.

                if line[-1] == '\n':
                    eop_line = line[:-1]
                else:
                    eop_line = line

                if not last_empty:
                    last_empty = True
                    found = self._search_paragraph(paragraph)
                    if found:
                        print_paragraph(paragraph)
                    paragraph = []

            else:
                # Save this line in the current paragraph buffer
                if line[-1] == '\n':
                    line = line[:-1]
                paragraph += [line]
                last_empty = False

        # We might have a paragraph left in the buffer. If so, search it.

        if not last_empty:
            if self._search_paragraph(paragraph):
                found = True
                print_paragraph(paragraph)

        return found
Ejemplo n.º 12
0
    def __init__(self, stdin: TextIO) -> None:
        # Test whether the given input object has a file descriptor.
        # (Idle reports stdin to be a TTY, but fileno() is not implemented.)
        try:
            # This should not raise, but can return 0.
            stdin.fileno()
        except io.UnsupportedOperation:
            if 'idlelib.run' in sys.modules:
                raise io.UnsupportedOperation(
                    'Stdin is not a terminal. Running from Idle is not supported.')
            else:
                raise io.UnsupportedOperation('Stdin is not a terminal.')

        # Even when we have a file descriptor, it doesn't mean it's a TTY.
        # Normally, this requires a real TTY device, but people instantiate
        # this class often during unit tests as well. They use for instance
        # pexpect to pipe data into an application. For convenience, we print
        # an error message and go on.
        isatty = stdin.isatty()
        fd = stdin.fileno()

        if not isatty and fd not in Vt100Input._fds_not_a_terminal:
            msg = 'Warning: Input is not to a terminal (fd=%r).\n'
            sys.stderr.write(msg % fd)
            Vt100Input._fds_not_a_terminal.add(fd)

        #
        self.stdin = stdin

        # Create a backup of the fileno(). We want this to work even if the
        # underlying file is closed, so that `typeahead_hash()` keeps working.
        self._fileno = stdin.fileno()

        self._buffer: List[KeyPress] = []  # Buffer to collect the Key objects.
        self.stdin_reader = PosixStdinReader(self._fileno)
        self.vt100_parser = Vt100Parser(
            lambda key_press: self._buffer.append(key_press))
Ejemplo n.º 13
0
def _prepare_graph_struct(name: Optional[str], graph: TextIO, hosts: List[str], graph_format: str) -> dict:
    if graph_format == 'raw':
        return json.load(graph)
    assert name and hosts, 'Only raw graph format can not set hosts and name'
    result = GraphStruct()
    result.graph_name = name
    result.clusters.from_json({'I': hosts})
    if graph_format == 'script':
        task = ExtendedTaskStruct()
        task.task_name = 'main'
        task.hosts.append('I')
        task.task_struct.executor.name = 'shell'
        executor_cfg = ShellExecutorConfig()
        executor_cfg.shell_script = graph.read()
        task.task_struct.executor.config = executor_cfg.to_json()
        result.tasks.from_json([task.to_json()])
    elif graph_format == 'makefile':
        raise NotImplementedError()
    return result.to_json()
def load_profiles(profiles_file: TextIO, person_to_friends: Dict[str, List[str]], \
                  person_to_networks: Dict[str, List[str]]) -> None:
    '''Update the person_to_friends dictionary and the person_to_networks
    dictionary to include data from profiles_file.

    '''
    user = none
    for l in profiles_file.readlines():
        if not user:
            user = to_user(l)
            create_key(user, person_to_friends)
            create_key(user, person_to_networks)
        else:
            if len(l.strip()) == 0:
                user = none
            elif ',' in l:
                person_to_friends[user].append(to_user(l))
            else:
                person_to_networks[user].append(l.strip())
Ejemplo n.º 15
0
    def map_input(self, data: typing.TextIO) -> libioc.Config.Data.Data:
        """Parse and normalize JSON data."""
        try:
            content = data.read().strip()
        except (FileNotFoundError, PermissionError) as e:
            raise libioc.errors.JailConfigError(
                message=str(e),
                logger=self.logger
            )

        if content == "":
            return libioc.Config.Data.Data()

        try:
            result = json.loads(content)  # type: typing.Dict[str, typing.Any]
            return libioc.Config.Data.Data(result)
        except json.decoder.JSONDecodeError as e:
            raise libioc.errors.JailConfigError(
                message=str(e),
                logger=self.logger
            )
Ejemplo n.º 16
0
def lex(input: TextIO) -> Iterator[Command]:
    whitespace = re.compile('\s+')

    parsing_item    = False
    parsing_command = False
    command, args   = '', []
    while True:
        char = input.read(1)
        if char is '':
            break
        if whitespace.match(char):
            if parsing_item:
                if parsing_command:
                    args.append('')
                else:
                    command += char
            parsing_item = False
        elif char == '(':
            if parsing_command:
                raise RuntimeError('Nested command')
            args.append('')
            parsing_command = True
            parsing_item    = False
        elif char == ')':
            if not parsing_command:
                raise RuntimeError('Unexpected ")"')
            if args[-1] == '':
                args = args[:-1]
            yield command, args
            command, args   = '', []
            parsing_item    = False
            parsing_command = False
        else:
            if parsing_command:
                args[-1] += char
            else:
                command += char
            parsing_item = True
Ejemplo n.º 17
0
def writelines_nl(fileobj: TextIO, lines: Iterable[str]) -> None:
    # Since fileobj.writelines() doesn't add newlines...
    # http://stackoverflow.com/questions/13730107/writelines-writes-lines-without-newline-just-fills-the-file  # noqa
    fileobj.write('\n'.join(lines) + '\n')
Ejemplo n.º 18
0
def writeln(file: TextIO, line: Text) -> int:
    return file.write(line + '\n')
Ejemplo n.º 19
0
def main(input_file: typing.TextIO, output_file: typing.TextIO):
    """Convert joshi to conll"""

    output_file.write("#begin document test_entities\n")

    topic = 0
    next_cluster = 0
    for line in input_file:
        doc = json.loads(line)
        tok_offset = 0
        word_offset = 0
        sent_offset = 0

        word_mapping = defaultdict(lambda: [])
        sent_mentions = set()

        doclen = len(doc["subtoken_map"])

        for clst_id, clst in enumerate(doc['predicted_clusters'],
                                       start=next_cluster):

            for start, end in clst:

                if (start >= doclen) or (end >= doclen):
                    continue

                print(start, end)
                sent_mentions.update(
                    [doc['sentence_map'][start], doc['sentence_map'][end]])
                if start == end:
                    word_mapping[start].append(f"({clst_id})")
                else:
                    word_mapping[start].append(f"({clst_id}")
                    word_mapping[end].append(f"{clst_id})")

        next_cluster += len(doc['predicted_clusters'])

        for sent in doc['sentences']:

            doc_id = doc['doc_ids'][0] if tok_offset < doc['doc_boundaries'][
                1] else doc['doc_ids'][1]

            words = tidy_up_tokens(sent)

            sent_flag = "True" if sent_offset in sent_mentions else "False"

            for word in words:

                if word in ['[CLS]', '[SEP]']:
                    tok_offset += 1
                    continue

                cluster_str = "|".join(word_mapping[tok_offset]
                                       ) if tok_offset in word_mapping else "-"

                row = [
                    str(topic), f"{topic}_0", f"{topic}_{doc_id}",
                    str(sent_offset),
                    str(word_offset), word, sent_flag, cluster_str
                ]
                output_file.write("\t".join(row) + "\n")

                tok_offset += 1
                word_offset += 1

            sent_offset += 1

    output_file.write("#end document")
Ejemplo n.º 20
0
 def render(self, out: TextIO) -> None:
     self.start.render(out)
     for i in self.elements:
         i.render(out)
     out.write("\n")
Ejemplo n.º 21
0
 def render(self, out: TextIO) -> None:
     out.write("subgraph %s {\n" % self.name)
     out.write("rank = same\n")
Ejemplo n.º 22
0
    def _format(self, object: object, stream: TextIO, indent: int,
                allowance: int, context: Dict[int, int], level: int) -> None:
        level = level + 1
        objid = _id(object)
        if objid in context:
            stream.write(_recursion(object))
            self._recursive = True
            self._readable = False
            return
        rep = self._repr(object, context, level - 1)
        typ = _type(object)
        sepLines = _len(rep) > (self._width - 1 - indent - allowance)
        write = stream.write

        if self._depth and level > self._depth:
            write(rep)
            return

        if sepLines:
            r = getattr(typ, "__repr__", None)
            if issubclass(typ, dict):
                dictobj = cast(dict, object)
                write('{')
                if self._indent_per_level > 1:
                    write((self._indent_per_level - 1) * ' ')
                length = _len(dictobj)
                if length:
                    context[objid] = 1
                    indent = indent + self._indent_per_level
                    if issubclass(typ, _OrderedDict):
                        items = list(dictobj.items())
                    else:
                        items = sorted(dictobj.items(), key=_safe_tuple)
                    key, ent = items[0]
                    rep = self._repr(key, context, level)
                    write(rep)
                    write(': ')
                    self._format(ent, stream, indent + _len(rep) + 2,
                                  allowance + 1, context, level)
                    if length > 1:
                        for key, ent in items[1:]:
                            rep = self._repr(key, context, level)
                            write(',\n%s%s: ' % (' '*indent, rep))
                            self._format(ent, stream, indent + _len(rep) + 2,
                                          allowance + 1, context, level)
                    indent = indent - self._indent_per_level
                    del context[objid]
                write('}')
                return

            if ((issubclass(typ, list) and r is list.__repr__) or
                (issubclass(typ, tuple) and r is tuple.__repr__) or
                (issubclass(typ, set) and r is set.__repr__) or
                (issubclass(typ, frozenset) and r is frozenset.__repr__)
               ):
                anyobj = Any(object) # TODO Collection?
                length = _len(anyobj)
                if issubclass(typ, list):
                    write('[')
                    endchar = ']'
                    lst = anyobj
                elif issubclass(typ, set):
                    if not length:
                        write('set()')
                        return
                    write('{')
                    endchar = '}'
                    lst = sorted(anyobj, key=_safe_key)
                elif issubclass(typ, frozenset):
                    if not length:
                        write('frozenset()')
                        return
                    write('frozenset({')
                    endchar = '})'
                    lst = sorted(anyobj, key=_safe_key)
                    indent += 10
                else:
                    write('(')
                    endchar = ')'
                    lst = list(anyobj)
                if self._indent_per_level > 1:
                    write((self._indent_per_level - 1) * ' ')
                if length:
                    context[objid] = 1
                    indent = indent + self._indent_per_level
                    self._format(lst[0], stream, indent, allowance + 1,
                                 context, level)
                    if length > 1:
                        for ent in lst[1:]:
                            write(',\n' + ' '*indent)
                            self._format(ent, stream, indent,
                                          allowance + 1, context, level)
                    indent = indent - self._indent_per_level
                    del context[objid]
                if issubclass(typ, tuple) and length == 1:
                    write(',')
                write(endchar)
                return

        write(rep)
Ejemplo n.º 23
0
def fail(msg: str, stderr: TextIO) -> None:
    stderr.write('%s\n' % msg)
    sys.exit(2)
Ejemplo n.º 24
0
    def __process_existing_topic_content(
            self, topic_file: tp.TextIO,
            skeleton_headings_iter: tp.Iterator[SectionHeading]
    ) -> tp.List[str]:
        """
        This method checks that all heading related lines in the topic file
        correspond correctly to the skeleton. If lines are missing, the user is
        asked if the missing heading should be added.

        Args:
            topic_file: the topic markdown file to update
            skeleton_headings_iter: iterator that points to the next expected
                                    topic heading

        Returns: existing topic lines, where headings were updated according to
                 the skeleton
        """
        updated_topic_lines = []
        emitting_doc_text = True

        for line in topic_file.readlines():
            if line.startswith("###"):
                next_heading = next(skeleton_headings_iter)
                current_heading = self.lookup_heading(line.split(":")[0])

                # Add headers that are completely missing
                while (current_heading.header_text !=
                       next_heading.header_text):
                    print(f"Could not find section "
                          f"({next_heading.header_text}) before section "
                          f"({current_heading.header_text}).")
                    if _cli_yn_choice("Should I insert it before?"):
                        updated_topic_lines.append(next_heading.header_text)
                        updated_topic_lines.extend(
                            next_heading.convert_meta_text_to_lines())

                    next_heading = next(skeleton_headings_iter)

                emitting_doc_text = False

                # Write out heading
                updated_topic_lines.append(line)
                updated_topic_lines.extend(
                    current_heading.convert_meta_text_to_lines())

            elif line.startswith("##"):
                # Verify that the title heading has correct meta text
                emitting_doc_text = False
                next_heading = next(skeleton_headings_iter)
                updated_topic_lines.append(line)
                updated_topic_lines.extend(
                    self.get_title_heading().convert_meta_text_to_lines())
            elif line.startswith("_") or line.strip().endswith("_"):
                # Ignore meta lines
                # Meta lines are not allowed to contain modifications by the
                # topic writer and are always inserted with the title heading
                # from the skeleton.
                continue
            elif emitting_doc_text or line != "\n":
                # Skip new lines if we aren't emitting normal document text
                emitting_doc_text = True
                updated_topic_lines.append(line)

        return updated_topic_lines
Ejemplo n.º 25
0
def release_config(lock: TextIO) -> None:
    """Release lock file"""
    lock.close()
Ejemplo n.º 26
0
def write_formula(file: TextIO, num_vars: int,
                  formula: List[List[int]]) -> None:
    file.write("p cnf {} {}\n".format(num_vars, len(formula)))
    for clause in formula:
        lit_str = " ".join([str(l) for l in clause]) + " 0\n"
        file.write(lit_str)
Ejemplo n.º 27
0
Archivo: core.py Proyecto: PyCQA/isort
def process(
    input_stream: TextIO,
    output_stream: TextIO,
    extension: str = "py",
    raise_on_skip: bool = True,
    config: Config = DEFAULT_CONFIG,
) -> bool:
    """Parses stream identifying sections of contiguous imports and sorting them

    Code with unsorted imports is read from the provided `input_stream`, sorted and then
    outputted to the specified `output_stream`.

    - `input_stream`: Text stream with unsorted import sections.
    - `output_stream`: Text stream to output sorted inputs into.
    - `config`: Config settings to use when sorting imports. Defaults settings.
        - *Default*: `isort.settings.DEFAULT_CONFIG`.
    - `extension`: The file extension or file extension rules that should be used.
        - *Default*: `"py"`.
        - *Choices*: `["py", "pyi", "pyx"]`.

    Returns `True` if there were changes that needed to be made (errors present) from what
    was provided in the input_stream, otherwise `False`.
    """
    line_separator: str = config.line_ending
    add_imports: List[str] = [
        format_natural(addition) for addition in config.add_imports
    ]
    import_section: str = ""
    next_import_section: str = ""
    next_cimports: bool = False
    in_quote: str = ""
    was_in_quote: bool = False
    first_comment_index_start: int = -1
    first_comment_index_end: int = -1
    contains_imports: bool = False
    in_top_comment: bool = False
    first_import_section: bool = True
    indent: str = ""
    isort_off: bool = False
    skip_file: bool = False
    code_sorting: Union[bool, str] = False
    code_sorting_section: str = ""
    code_sorting_indent: str = ""
    cimports: bool = False
    made_changes: bool = False
    stripped_line: str = ""
    end_of_file: bool = False
    verbose_output: List[str] = []
    lines_before: List[str] = []
    auto_reexporting: bool = False
    line_index: int = 0

    if config.float_to_top:
        new_input = ""
        current = ""
        isort_off = False
        for line in chain(input_stream, (None, )):
            if isort_off and line is not None:
                if line == "# isort: on\n":
                    isort_off = False
                new_input += line
            elif line in ("# isort: split\n", "# isort: off\n",
                          None) or str(line).endswith("# isort: split\n"):
                if line == "# isort: off\n":
                    isort_off = True
                if current:
                    if add_imports:
                        add_line_separator = line_separator or "\n"
                        current += add_line_separator + add_line_separator.join(
                            add_imports)
                        add_imports = []
                    parsed = parse.file_contents(current, config=config)
                    verbose_output += parsed.verbose_output
                    extra_space = ""
                    while current and current[-1] == "\n":
                        extra_space += "\n"
                        current = current[:-1]
                    extra_space = extra_space.replace("\n", "", 1)
                    sorted_output = output.sorted_imports(parsed,
                                                          config,
                                                          extension,
                                                          import_type="import")
                    made_changes = made_changes or _has_changed(
                        before=current,
                        after=sorted_output,
                        line_separator=parsed.line_separator,
                        ignore_whitespace=config.ignore_whitespace,
                    )
                    new_input += sorted_output
                    new_input += extra_space
                    current = ""
                new_input += line or ""
            else:
                current += line or ""

        input_stream = StringIO(new_input)

    for index, line in enumerate(chain(input_stream, (None, ))):
        if line is None:
            if index == 0 and not config.force_adds:
                return False

            not_imports = True
            end_of_file = True
            line = ""
            if not line_separator:
                line_separator = "\n"

            if code_sorting and code_sorting_section:
                sorted_code = textwrap.indent(
                    isort.literal.assignment(
                        code_sorting_section,
                        str(code_sorting),
                        extension,
                        config=_indented_config(config, indent),
                    ),
                    code_sorting_indent,
                )
                made_changes = made_changes or _has_changed(
                    before=code_sorting_section,
                    after=sorted_code,
                    line_separator=line_separator,
                    ignore_whitespace=config.ignore_whitespace,
                )
                line_index += output_stream.write(sorted_code)
        else:
            stripped_line = line.strip()
            if stripped_line and not line_separator:
                line_separator = line[len(line.rstrip()):].replace(
                    " ", "").replace("\t", "")

            for file_skip_comment in FILE_SKIP_COMMENTS:
                if file_skip_comment in line:
                    if raise_on_skip:
                        raise FileSkipComment("Passed in content")
                    isort_off = True
                    skip_file = True

            if not in_quote:
                if stripped_line == "# isort: off":
                    isort_off = True
                elif stripped_line.startswith("# isort: dont-add-imports"):
                    add_imports = []
                elif stripped_line.startswith("# isort: dont-add-import:"):
                    import_not_to_add = stripped_line.split(
                        "# isort: dont-add-import:", 1)[1].strip()
                    add_imports = [
                        import_to_add for import_to_add in add_imports
                        if not import_to_add == import_not_to_add
                    ]

            if ((index == 0 or (index in (1, 2) and not contains_imports))
                    and stripped_line.startswith("#")
                    and stripped_line not in config.section_comments
                    and stripped_line not in CODE_SORT_COMMENTS):
                in_top_comment = True
            elif in_top_comment and (not line.startswith("#") or stripped_line
                                     in config.section_comments
                                     or stripped_line in CODE_SORT_COMMENTS):
                in_top_comment = False
                first_comment_index_end = index - 1

            was_in_quote = bool(in_quote)
            if (not stripped_line.startswith("#")
                    or in_quote) and '"' in line or "'" in line:
                char_index = 0
                if first_comment_index_start == -1 and (line.startswith('"') or
                                                        line.startswith("'")):
                    first_comment_index_start = index
                while char_index < len(line):
                    if line[char_index] == "\\":
                        char_index += 1
                    elif in_quote:
                        if line[char_index:char_index +
                                len(in_quote)] == in_quote:
                            in_quote = ""
                            if first_comment_index_end < first_comment_index_start:
                                first_comment_index_end = index
                    elif line[char_index] in ("'", '"'):
                        long_quote = line[char_index:char_index + 3]
                        if long_quote in ('"""', "'''"):
                            in_quote = long_quote
                            char_index += 2
                        else:
                            in_quote = line[char_index]
                    elif line[char_index] == "#":
                        break
                    char_index += 1

            not_imports = bool(
                in_quote) or was_in_quote or in_top_comment or isort_off
            if not (in_quote or was_in_quote or in_top_comment):
                if isort_off:
                    if not skip_file and stripped_line == "# isort: on":
                        isort_off = False
                elif stripped_line.endswith("# isort: split"):
                    not_imports = True
                elif stripped_line in CODE_SORT_COMMENTS:
                    code_sorting = stripped_line.split("isort: ")[1].strip()
                    code_sorting_indent = line[:-len(line.lstrip())]
                    not_imports = True
                elif config.sort_reexports and stripped_line.startswith(
                        "__all__"):
                    code_sorting = LITERAL_TYPE_MAPPING[stripped_line.split(
                        " = ")[1][0]]
                    code_sorting_indent = line[:-len(line.lstrip())]
                    not_imports = True
                    code_sorting_section += line
                    auto_reexporting = True
                    line_index -= len(line) - 1
                elif code_sorting:
                    if not stripped_line:
                        sorted_code = textwrap.indent(
                            isort.literal.assignment(
                                code_sorting_section,
                                str(code_sorting),
                                extension,
                                config=_indented_config(config, indent),
                            ),
                            code_sorting_indent,
                        )
                        made_changes = made_changes or _has_changed(
                            before=code_sorting_section,
                            after=sorted_code,
                            line_separator=line_separator,
                            ignore_whitespace=config.ignore_whitespace,
                        )
                        if auto_reexporting:
                            output_stream.seek(line_index, 0)
                        line_index += output_stream.write(sorted_code)
                        not_imports = True
                        code_sorting = False
                        code_sorting_section = ""
                        code_sorting_indent = ""
                        auto_reexporting = False
                    else:
                        code_sorting_section += line
                        line = ""
                elif (stripped_line in config.section_comments
                      or stripped_line in config.section_comments_end):
                    if import_section and not contains_imports:
                        output_stream.write(import_section)
                        import_section = line
                        not_imports = False
                    else:
                        import_section += line
                    indent = line[:-len(line.lstrip())]
                elif not (stripped_line or contains_imports):
                    not_imports = True
                elif (not stripped_line or stripped_line.startswith("#") and
                      (not indent or indent + line.lstrip() == line)
                      and not config.treat_all_comments_as_code
                      and stripped_line not in config.treat_comments_as_code):
                    import_section += line
                elif stripped_line.startswith(IMPORT_START_IDENTIFIERS):
                    new_indent = line[:-len(line.lstrip())]
                    import_statement = line
                    stripped_line = line.strip().split("#")[0]
                    while stripped_line.endswith("\\") or (
                            "(" in stripped_line and ")" not in stripped_line):
                        if stripped_line.endswith("\\"):
                            while stripped_line and stripped_line.endswith(
                                    "\\"):
                                line = input_stream.readline()
                                stripped_line = line.strip().split("#")[0]
                                import_statement += line
                        else:
                            while ")" not in stripped_line:
                                line = input_stream.readline()

                                if not line:  # end of file without closing parenthesis
                                    raise ExistingSyntaxErrors(
                                        "Parenthesis is not closed")

                                stripped_line = line.strip().split("#")[0]
                                import_statement += line

                    if (import_statement.lstrip().startswith("from")
                            and "import" not in import_statement):
                        line = import_statement
                        not_imports = True
                    else:
                        did_contain_imports = contains_imports
                        contains_imports = True

                        cimport_statement: bool = False
                        if (import_statement.lstrip().startswith(
                                CIMPORT_IDENTIFIERS)
                                or " cimport " in import_statement
                                or " cimport*" in import_statement
                                or " cimport(" in import_statement
                                or ".cimport" in import_statement):
                            cimport_statement = True

                        if cimport_statement != cimports or (
                                new_indent != indent and import_section and
                            (not did_contain_imports
                             or len(new_indent) < len(indent))):
                            indent = new_indent
                            if import_section:
                                next_cimports = cimport_statement
                                next_import_section = import_statement
                                import_statement = ""
                                not_imports = True
                                line = ""
                            else:
                                cimports = cimport_statement
                        else:
                            if new_indent != indent:
                                if import_section and did_contain_imports:
                                    import_statement = indent + import_statement.lstrip(
                                    )
                                else:
                                    indent = new_indent
                        import_section += import_statement
                else:
                    not_imports = True

        line_index += len(line)

        if not_imports:

            if not was_in_quote and config.lines_before_imports > -1:
                if line.strip() == "":
                    lines_before += line
                    continue
                if not import_section:
                    output_stream.write("".join(lines_before))
                lines_before = []

            raw_import_section: str = import_section
            if (add_imports and (stripped_line or end_of_file)
                    and not config.append_only and not in_top_comment
                    and not was_in_quote and not import_section
                    and not line.lstrip().startswith(COMMENT_INDICATORS)
                    and not (line.rstrip().endswith(DOCSTRING_INDICATORS)
                             and "=" not in line)):
                add_line_separator = line_separator or "\n"
                import_section = add_line_separator.join(
                    add_imports) + add_line_separator
                if end_of_file and index != 0:
                    output_stream.write(add_line_separator)
                contains_imports = True
                add_imports = []

            if next_import_section and not import_section:  # pragma: no cover
                raw_import_section = import_section = next_import_section
                next_import_section = ""

            if import_section:
                if add_imports and (contains_imports
                                    or not config.append_only) and not indent:
                    import_section = (line_separator.join(add_imports) +
                                      line_separator + import_section)
                    contains_imports = True
                    add_imports = []

                if not indent:
                    import_section += line
                    raw_import_section += line
                if not contains_imports:
                    output_stream.write(import_section)

                else:
                    leading_whitespace = import_section[:-len(import_section.
                                                              lstrip())]
                    trailing_whitespace = import_section[len(import_section.
                                                             rstrip()):]
                    if first_import_section and not import_section.lstrip(
                            line_separator).startswith(COMMENT_INDICATORS):
                        import_section = import_section.lstrip(line_separator)
                        raw_import_section = raw_import_section.lstrip(
                            line_separator)
                        first_import_section = False

                    if indent:
                        import_section = "".join(
                            line[len(indent):]
                            for line in import_section.splitlines(
                                keepends=True))

                    parsed_content = parse.file_contents(import_section,
                                                         config=config)
                    verbose_output += parsed_content.verbose_output

                    sorted_import_section = output.sorted_imports(
                        parsed_content,
                        _indented_config(config, indent),
                        extension,
                        import_type="cimport" if cimports else "import",
                    )
                    if not (import_section.strip()
                            and not sorted_import_section):
                        if indent:
                            sorted_import_section = (
                                leading_whitespace + textwrap.indent(
                                    sorted_import_section, indent).strip() +
                                trailing_whitespace)

                        made_changes = made_changes or _has_changed(
                            before=raw_import_section,
                            after=sorted_import_section,
                            line_separator=line_separator,
                            ignore_whitespace=config.ignore_whitespace,
                        )
                        output_stream.write(sorted_import_section)
                        if not line and not indent and next_import_section:
                            output_stream.write(line_separator)

                if indent:
                    output_stream.write(line)
                    if not next_import_section:
                        indent = ""

                if next_import_section:
                    cimports = next_cimports
                    contains_imports = True
                else:
                    contains_imports = False
                import_section = next_import_section
                next_import_section = ""
            else:
                output_stream.write(line)
                not_imports = False

            if stripped_line and not in_quote and not import_section and not next_import_section:
                if stripped_line == "yield":
                    while not stripped_line or stripped_line == "yield":
                        new_line = input_stream.readline()
                        if not new_line:
                            break

                        output_stream.write(new_line)
                        stripped_line = new_line.strip().split("#")[0]

                if stripped_line.startswith(
                        "raise") or stripped_line.startswith("yield"):
                    while stripped_line.endswith("\\"):
                        new_line = input_stream.readline()
                        if not new_line:
                            break

                        output_stream.write(new_line)
                        stripped_line = new_line.strip().split("#")[0]

    if made_changes and config.only_modified:
        for output_str in verbose_output:
            print(output_str)

    return made_changes
Ejemplo n.º 28
0
    def _generate_header_body(self, header: Header, module_name: str,
                              d: TextIO, dst: pathlib.Path,
                              namespace: str) -> None:
        functions = []
        for node in header.nodes:
            if node.name in self.rename_map:
                node.name = self.rename_map[node.name]

            if isinstance(node, EnumNode):

                # separate file
                with (dst.parent / f'{node.name}.cs').open(
                        "w", encoding='utf-8') as dd:
                    dd.write(f'/// {dst.stem}.h')
                    with namespace_context(dd, namespace):
                        write_enum(dd, node)

            elif isinstance(node, TypedefNode):
                if node.typedef_type.type in self.rename_map:
                    continue
                write_alias(d, node)
                d.write('\n')

            elif isinstance(node, StructNode):
                if node.is_forward:
                    continue
                if node.name[0] == 'C':  # class
                    continue
                if (self.name_count.get(node.name, 0) > 1
                        and len(node.methods) == 0 and not node.base
                        and len(node.fields) == 0):
                    print(f'forward decl: {node.name}')
                    # maybe forward declaration
                    continue
                snippet = struct_map.get(node.name)

                # separate file
                with (dst.parent / f'{node.name}.cs').open(
                        "w", encoding='utf-8') as dd:
                    dd.write(f'/// {dst.stem}.h')
                    with namespace_context(dd, namespace):
                        if snippet:
                            # replace
                            dd.write(snippet)
                        else:
                            write_struct(dd, node)

            elif isinstance(node, FunctionNode):
                functions.append(node)

        if functions:
            d.write(f'public static class {module_name.upper()}{{\n')
            for m in header.macro_defnitions:
                write_const(d, m)
            dll = dll_map.get(header.name)

            used_function = set()
            for f in functions:
                if f.name in used_function:
                    continue
                used_function.add(f.name)

                if f.name in ['D3DDisassemble10Effect']:
                    # ignore
                    continue

                func = func_map.get(f.name)
                if func:
                    # replace
                    d.write(func)
                else:
                    write_function(d, f, '', extern=dll)
                d.write('\n')
            d.write('}\n')
Ejemplo n.º 29
0
def write_struct(d: TextIO, node: StructNode) -> None:
    if node.name[0] == 'I':
        # com interface
        base = node.base

        if node.methods:
            d.write(f'[Annotation(MethodCount={len(node.methods)})]\n')
        if not base or base == 'IUnknown':
            # IUnknown
            d.write(f'public class {node.name} : ComPtr{{\n')
        else:
            d.write(f'public class {node.name}: {base} {{\n')

        d.write(f'''
    static /*readonly*/ Guid s_uuid = new Guid("{node.iid}");
    public override ref /*readonly*/ Guid IID => ref s_uuid;
''')
        # static int MethodCount => {len(node.methods)};
        if node.methods:
            d.write(f'''
    int VTableIndexBase => VTableIndexBase<{node.name}>.Value;
''')

        for i, m in enumerate(node.methods):
            write_function(d, m, '    ', index=i)
        d.write(f'}}\n')
    else:

        d.write(f'[Annotation(Size={node.size})]\n')
        if any(x.field_type == 'union' for x in node.fields):
            # include union
            d.write(
                '[StructLayout(LayoutKind.Explicit, CharSet = CharSet.Unicode)]\n'
            )
            d.write(f'public struct {node.name}{{\n')
            offset = 0
            indent = '    '
            indent2 = indent + '    '
            for f in node.fields:
                if f.field_type == 'union':
                    d.write(f'{indent}#region union\n')
                    for x in f.fields:
                        d.write(f'{indent2}[FieldOffset({offset})]\n')
                        write_field(d, x, indent2)
                        d.write('\n')
                    d.write(f'{indent}#endregion\n')
                else:
                    d.write(f'{indent}[FieldOffset({offset})]\n')
                    write_field(d, f, indent)
                d.write('\n')
                offset += 4
            d.write(f'}}\n')

        else:

            d.write(
                '[StructLayout(LayoutKind.Sequential, CharSet=CharSet.Unicode)]\n'
            )
            d.write(f'public struct {node.name}{{\n')
            for f in node.fields:
                write_field(d, f, '    ')
                d.write('\n')
            d.write(f'}}\n')
Ejemplo n.º 30
0
 def title(f: TextIO, title: str):
     f.write(title)
     f.write('\n')
     f.write('-'*len(title))
     f.write('\n\n')
Ejemplo n.º 31
0
def main(
    script_path: Optional[str],
    stdout: TextIO,
    stderr: TextIO,
    args: Optional[List[str]] = None,
) -> None:
    """Main entry point to the type checker.

    Args:
        script_path: Path to the 'mypy' script (used for finding data files).
        args: Custom command-line arguments.  If not given, sys.argv[1:] will
        be used.
    """
    util.check_python_version('mypy')
    t0 = time.time()
    # To log stat() calls: os.stat = stat_proxy
    sys.setrecursionlimit(2**14)
    if args is None:
        args = sys.argv[1:]

    fscache = FileSystemCache()
    sources, options = process_options(args,
                                       stdout=stdout,
                                       stderr=stderr,
                                       fscache=fscache)

    messages = []
    formatter = util.FancyFormatter(stdout, stderr, options.show_error_codes)

    def flush_errors(new_messages: List[str], serious: bool) -> None:
        messages.extend(new_messages)
        f = stderr if serious else stdout
        try:
            for msg in new_messages:
                if options.color_output:
                    msg = formatter.colorize(msg)
                f.write(msg + '\n')
            f.flush()
        except BrokenPipeError:
            sys.exit(2)

    serious = False
    blockers = False
    res = None
    try:
        # Keep a dummy reference (res) for memory profiling below, as otherwise
        # the result could be freed.
        res = build.build(sources, options, None, flush_errors, fscache,
                          stdout, stderr)
    except CompileError as e:
        blockers = True
        if not e.use_stdout:
            serious = True
    if options.warn_unused_configs and options.unused_configs and not options.incremental:
        print("Warning: unused section(s) in %s: %s" %
              (options.config_file, ", ".join(
                  "[mypy-%s]" % glob
                  for glob in options.per_module_options.keys()
                  if glob in options.unused_configs)),
              file=stderr)
    if options.junit_xml:
        t1 = time.time()
        py_version = '{}_{}'.format(options.python_version[0],
                                    options.python_version[1])
        util.write_junit_xml(t1 - t0, serious, messages, options.junit_xml,
                             py_version, options.platform)

    if MEM_PROFILE:
        from mypy.memprofile import print_memory_profile
        print_memory_profile()
    del res  # Now it's safe to delete

    code = 0
    if messages:
        code = 2 if blockers else 1
    if options.error_summary:
        if messages:
            n_errors, n_files = util.count_stats(messages)
            if n_errors:
                stdout.write(
                    formatter.format_error(n_errors, n_files, len(sources),
                                           options.color_output) + '\n')
        else:
            stdout.write(
                formatter.format_success(len(sources), options.color_output) +
                '\n')
        stdout.flush()
    if options.fast_exit:
        # Exit without freeing objects -- it's faster.
        #
        # NOTE: We don't flush all open files on exit (or run other destructors)!
        util.hard_exit(code)
    elif code:
        sys.exit(code)
Ejemplo n.º 32
0
 def _save_file_handle(self, f: typing.TextIO) -> None:
     f.write(self.__str__())
     f.truncate()
Ejemplo n.º 33
0
 def write(self, stream: TextIO) -> None:
     """Write style data to file-like object `stream`."""
     index = self.index
     stream.write(" %d{\n" % index)
     stream.write('  name="%s\n' % self.name)
     stream.write('  localized_name="%s\n' % self.localized_name)
     stream.write('  description="%s\n' % self.description)
     stream.write("  color=%d\n" % self._color)
     if self._color != OBJECT_COLOR:
         stream.write("  mode_color=%d\n" % self._mode_color)
     stream.write("  color_policy=%d\n" % self._color_policy)
     stream.write("  physical_pen_number=%d\n" % self.physical_pen_number)
     stream.write("  virtual_pen_number=%d\n" % self.virtual_pen_number)
     stream.write("  screen=%d\n" % self.screen)
     stream.write("  linepattern_size=%s\n" % str(self.linepattern_size))
     stream.write("  linetype=%d\n" % self.linetype)
     stream.write(
         "  adaptive_linetype=%s\n"
         % str(bool(self.adaptive_linetype)).upper()
     )
     stream.write("  lineweight=%s\n" % str(self.lineweight))
     stream.write("  fill_style=%d\n" % self.fill_style)
     stream.write("  end_style=%d\n" % self.end_style)
     stream.write("  join_style=%d\n" % self.join_style)
     stream.write(" }\n")
Ejemplo n.º 34
0
    def _gen_conf(self, factory: str, f: TextIO, no_sysctl: bool):
        intf = """
[Interface]
Address = {addr}
ListenPort = {port}
PrivateKey = {key}
SaveConfig = false

PostUp = iptables -t nat -A POSTROUTING -o {intf} -j MASQUERADE
PostUp = iptables -A FORWARD -i %i -j ACCEPT
        """.format(key=self.privkey,
                   addr=self.addr,
                   port=self.port,
                   intf="TODO")
        f.write(intf.strip())
        f.write("\n")
        if not no_sysctl:
            f.write("PostUp = sysctl -q -w net.ipv4.ip_forward=1\n\n")
            f.write("PostDown = sysctl -q -w net.ipv4.ip_forward=0\n")

        intf = """
PostDown = iptables -D FORWARD -i %i -j ACCEPT
PostDown = iptables -t nat -D POSTROUTING -o {intf} -j MASQUERADE
        """.format(intf="TODO")
        f.write(intf.strip())
        f.write("\n")

        for device in FactoryDevice.iter_vpn_enabled(factory, self.api):
            peer = """# {name}
[Peer]
PublicKey = {key}
AllowedIPs = {ip}
            """.format(name=device.name, key=device.pubkey, ip=device.ip)
            f.write(peer.strip())
            f.write("\n")
Ejemplo n.º 35
0
 def _write_lineweights(self, stream: TextIO) -> None:
     """Write custom lineweight table to text `stream`."""
     stream.write("custom_lineweight_table{\n")
     for index, weight in enumerate(self.lineweights):
         stream.write(" %d=%.2f\n" % (index, weight))
     stream.write("}\n")
Ejemplo n.º 36
0
def write_function(d: TextIO,
                   m: FunctionNode,
                   indent='',
                   extern='',
                   index=-1) -> None:
    ret = cs_type(m.ret, False) if m.ret else 'void'
    params = [(cs_type(p.param_type, True), p.param_name) for p in m.params]

    if extern:
        d.write(
            f'[DllImport("{extern}", CallingConvention = CallingConvention.StdCall)]\n'
        )
        d.write(f'{indent}public static extern {ret} {m.name}(\n')
    else:
        # for com interface
        d.write(f'{indent}public {ret} {m.name}(\n')

    # params
    indent2 = indent + '    '
    is_first = True
    for p in m.params:
        if is_first:
            is_first = False
            comma = ''
        else:
            comma = ', '
        d.write(f'{indent2}/// {p}\n')
        d.write(f'{indent2}{comma}{type_with_name(p)}\n')
    d.write(f'{indent})')

    if extern:
        d.write(';\n')
    else:
        # function body extends ComPtr(IUnknown represent)
        d.write('\n')
        d.write(f'''{indent}{{
{indent2}var fp = GetFunctionPointer(VTableIndexBase + {index});
{indent2}var callback = ({m.name}Func)Marshal.GetDelegateForFunctionPointer(fp, typeof({m.name}Func));
{indent2}{'return ' if ret!='void' else ''}callback(Self{''.join(', ' + ref_with_name(p) for p in m.params)});
{indent}}}
{indent}delegate {ret} {m.name}Func(IntPtr self{''.join(', ' + type_with_name(p) for p in m.params)});
''')
Ejemplo n.º 37
0
 def _write_plot_styles(self, stream: TextIO) -> None:
     """Write user styles to text `stream`."""
     stream.write("plot_style{\n")
     for style in self:
         style.write(stream)
     stream.write("}\n")
Ejemplo n.º 38
0
 def read_head(file_obj: TextIO) -> Tuple[Tuple[str, str], TextIO]:
     match = header_pat.match("".join(file_obj.readline()
                                      for _ in range(4)))
     return (match.group(1), match.group(2)), file_obj
Ejemplo n.º 39
0
def dwd_close_file(file: TextIO) -> None:
    """Close cache file."""
    print(']', file=file)
    file.close()
Ejemplo n.º 40
0
 def dump(self, fp: TextIO, obj: Any) -> None:
     """
     Dump an object/resource to JSON file (or file like) object
     """
     for chunk in self._encoder.iterencode(obj):
         fp.write(chunk)
Ejemplo n.º 41
0
 def _main(model_file: TextIO) -> None:
     model_json = jsonutil.loads(model_file.read())
     model = clgen.Model.from_json(model_json)
     model.train()
     log.info("done.")
Ejemplo n.º 42
0
def writeline_nl(fileobj: TextIO, line: str) -> None:
    fileobj.write(line + '\n')
Ejemplo n.º 43
0
def upload(string_args: Optional[list[str]] = None,
           out: TextIO = sys.stdout) -> None:
    """Upload city suggestions to Algolia index."""

    parser = argparse.ArgumentParser(
        description=
        'Upload a Geonames cities dataset to Algolia for city suggest')

    parser.add_argument(
        '--cities-with-zip',
        help='Path to the txt file containing US cities and their ZIP codes',
        default='data/usa/cities_with_zip.txt')
    parser.add_argument(
        '--population-by-zip',
        help='Path to the txt file containing population count by zip code.',
        default='data/usa/population_by_zip_codes.txt')
    parser.add_argument(
        '--states-fips-codes',
        help=
        'Path to the csv file containing the correspondance between state FIPS and ISO codes,'
        ' if needed.',
        default='')
    parser.add_argument('--algolia-app-id',
                        help='ID of the Algolia app to upload to.',
                        default='K6ACI9BKKT')
    parser.add_argument('--algolia-api-key',
                        help='Algolia API key.',
                        default=os.getenv('ALGOLIA_API_KEY'))
    parser.add_argument('--algolia-index',
                        help='Name of the Algolia index to upload to.',
                        default='cities_US')
    parser.add_argument(
        '--batch-size',
        help='Number of suggestions to upload to Algolia per batch.',
        default=5000,
        type=int)

    args = parser.parse_args(string_args)

    batch_size = args.batch_size

    city_population = _get_city_population(args.population_by_zip)
    cities_with_population = get_cities_with_population(
        city_population, args.cities_with_zip)
    states_by_code = prepare_state_codes(args.states_fips_codes)

    suggestions = prepare_zip_cities(cities_with_population, states_by_code)

    client = search_client.SearchClient.create(args.algolia_app_id,
                                               args.algolia_api_key)
    index_name = args.algolia_index
    cities_index = client.init_index(index_name)
    tmp_index_name = f'{index_name}_{round(time.time())}'
    tmp_cities_index = client.init_index(tmp_index_name)

    try:
        tmp_cities_index.set_settings(cities_index.get_settings())
        for start in range(0, len(suggestions), batch_size):
            tmp_cities_index.save_objects(suggestions[start:start +
                                                      batch_size])

        # OK we're ready finally replace the index.
        client.move_index(tmp_index_name, index_name)
    except exceptions.AlgoliaException:
        tmp_cities_index.delete()
        out.write(json.dumps(suggestions[:10], indent=2))
        raise
Ejemplo n.º 44
0
    def print(self, f: TextIO):
        f.write(f'{self.result.nit} iterations\n')
        f.write(self.result.message)
        f.write('\n\n')
        self.diminishing_table(f, 'Recipe counts', self.result.x, self.rec_names, 4)

        # Initialize rates based on recipe and solution
        self.title(f, 'Resources')
        resources = np.empty(self.n_resources,
                             dtype=[
                                 ('rates', 'float64', (3,)),
                                 ('name', f'U{max(len(r) for r in self.res_names)}'),
                             ])
        rates = resources['rates']
        np.matmul(+self.recipes.clip(min=0), self.result.x, out=rates[:, 0])  # Produced
        np.matmul(-self.recipes.clip(max=0), self.result.x, out=rates[:, 1])  # Consumed
        np.matmul(+self.recipes,             self.result.x, out=rates[:, 2])  # Excess
        resources['name'] = self.res_names

        # Filter by rates above a small margin
        eps = 1e-13
        to_show = np.any(np.abs(rates) > eps, axis=1)
        resources = resources[to_show]
        rates = resources['rates']

        # Sort by produced, descending
        resources = resources[(-rates[:, 0]).argsort()]

        width = max(len(n) for n in resources['name'])
        titles = ('Produced', 'Consumed', 'Excess')
        name_fmt = f'{{:>{width}}} '

        f.write(name_fmt.format('Resource') + ' '.join(
            f'{t:>10}' for t in titles
        ))
        f.write('\n')
        for row in resources:
            f.write(name_fmt.format(row['name']))
            produced, consumed, excess = row['rates']
            if produced > eps:
                f.write(f'{produced:10.3e} '
                        f'{consumed/produced:10.1%} '
                        f'{excess/produced:10.1%}\n')
            else:
                f.write(f'{0:10} '
                        f'{consumed:10.3e}\n')
Ejemplo n.º 45
0
def get_bitboard_by_fen_f(f_in: TextIO, f_out: TextIO):
    fen = f_in.readline().rstrip()
    board = get_bitboard_by_fen(fen)
    for line in board:
        print(line, file=f_out)
Ejemplo n.º 46
0
 def export(self, sounds: Iterable['Sound'], file: TextIO):
     """Write SoundScripts to a file.
     
     Pass a file-like object open for text writing, and an iterable
     of Sounds to write to the file.
     """
     for snd in sounds:
         file.write('"{}"\n\t{{\n'.format(snd.name))
         
         file.write('\t' 'channel {}\n'.format(snd.channel.value))
         
         file.write('\t' 'soundlevel {}\n'.format(join_float(snd.level)))
         
         if snd.volume != (1, 1):
             file.write('\tvolume {}\n'.format(join_float(snd.volume)))
         if snd.pitch != (100, 100):
             file.write('\tpitch {}\n'.format(join_float(snd.pitch)))
         
         if len(snd.sounds) > 1:
             file.write('\trandwav\n\t\t{\n')
             for wav in snd.sounds:
                 file.write('\t\twave "{}"\n'.format(wav))
             file.write('\t\t}\n')
         else:
             file.write('\twave "{}"\n'.format(snd.sounds[0]))
         
         if snd.stack_start or snd.stack_stop or snd.stack_update:
             file.write(
                 '\t' 'soundentry_version 2\n'
                 '\t' 'operator_stacks\n'
                 '\t\t' '{\n'
             )
             if snd.stack_start:
                 file.write(
                     '\t\t' 'start_stack\n'
                     '\t\t\t' '{\n'
                 )
                 for prop in snd.stack_start:
                     for line in prop.export():
                         file.write('\t\t\t' + line)
                 file.write('\t\t\t}\n')
             if snd.stack_update:
                 file.write(
                     '\t\t' 'update_stack\n'
                     '\t\t\t' '{\n'
                 )
                 for prop in snd.stack_update:
                     for line in prop.export():
                         file.write('\t\t\t' + line)
                 file.write('\t\t\t}\n')
             if snd.stack_stop:
                 file.write(
                     '\t\t' 'stop_stack\n'
                     '\t\t\t' '{\n'
                 )
                 for prop in snd.stack_stop:
                     for line in prop.export():
                         file.write('\t\t\t' + line)
                 file.write('\t\t\t}\n')
             file.write('\t\t}\n')
         file.write('\t}\n')
Ejemplo n.º 47
0
 def _read_file_handle(self, f: typing.TextIO) -> None:
     self.parse_lines(f.read())
Ejemplo n.º 48
0
def read_game(handle: TextIO, *, Visitor: Callable[[], BaseVisitor[ResultT]] = GameBuilder) -> Optional[ResultT]:
    """
    Reads a game from a file opened in text mode.

    >>> import chess.pgn
    >>>
    >>> pgn = open("data/pgn/kasparov-deep-blue-1997.pgn")
    >>>
    >>> first_game = chess.pgn.read_game(pgn)
    >>> second_game = chess.pgn.read_game(pgn)
    >>>
    >>> first_game.headers["Event"]
    'IBM Man-Machine, New York USA'
    >>>
    >>> # Iterate through all moves and play them on a board.
    >>> board = first_game.board()
    >>> for move in first_game.mainline_moves():
    ...     board.push(move)
    ...
    >>> board
    Board('4r3/6P1/2p2P1k/1p6/pP2p1R1/P1B5/2P2K2/3r4 b - - 0 45')

    By using text mode, the parser does not need to handle encodings. It is the
    caller's responsibility to open the file with the correct encoding.
    PGN files are usually ASCII or UTF-8 encoded. So, the following should
    cover most relevant cases (ASCII, UTF-8, UTF-8 with BOM).

    >>> pgn = open("data/pgn/kasparov-deep-blue-1997.pgn", encoding="utf-8-sig")

    Use :class:`~io.StringIO` to parse games from a string.

    >>> import io
    >>>
    >>> pgn = io.StringIO("1. e4 e5 2. Nf3 *")
    >>> game = chess.pgn.read_game(pgn)

    The end of a game is determined by a completely blank line or the end of
    the file. (Of course, blank lines in comments are possible).

    According to the PGN standard, at least the usual 7 header tags are
    required for a valid game. This parser also handles games without any
    headers just fine.

    The parser is relatively forgiving when it comes to errors. It skips over
    tokens it can not parse. Any exceptions are logged and collected in
    :data:`Game.errors <chess.pgn.Game.errors>`. This behavior can be
    :func:`overriden <chess.pgn.GameBuilder.handle_error>`.

    Returns the parsed game or ``None`` if the end of file is reached.
    """
    visitor = Visitor()

    found_game = False
    skipping_game = False
    headers = None
    managed_headers = None

    # Ignore leading empty lines and comments.
    line = handle.readline().lstrip("\ufeff")
    while line.isspace() or line.startswith("%") or line.startswith(";"):
        line = handle.readline()

    # Parse game headers.
    while line:
        # Ignore comments.
        if line.startswith("%") or line.startswith(";"):
            line = handle.readline()
            continue

        # First token of the game.
        if not found_game:
            found_game = True
            skipping_game = visitor.begin_game() is SKIP
            if not skipping_game:
                managed_headers = visitor.begin_headers()
                if not isinstance(managed_headers, Headers):
                    managed_headers = None
                    headers = Headers({})

        if not line.startswith("["):
            break

        if not skipping_game:
            tag_match = TAG_REGEX.match(line)
            if tag_match:
                visitor.visit_header(tag_match.group(1), tag_match.group(2))
                if headers is not None:
                    headers[tag_match.group(1)] = tag_match.group(2)
            else:
                break

        line = handle.readline()

    if not found_game:
        return None

    if not skipping_game:
        skipping_game = visitor.end_headers() is SKIP

    # Ignore single empty line after headers.
    if line.isspace():
        line = handle.readline()

    if not skipping_game:
        # Chess variant.
        headers = managed_headers if headers is None else headers
        try:
            VariantBoard = headers.variant()
        except ValueError as error:
            visitor.handle_error(error)
            VariantBoard = chess.Board

        # Initial position.
        fen = headers.get("FEN", VariantBoard.starting_fen)
        try:
            board_stack = [VariantBoard(fen, chess960=headers.is_chess960())]
        except ValueError as error:
            visitor.handle_error(error)
            skipping_game = True
        else:
            visitor.visit_board(board_stack[0])

    # Fast path: Skip entire game.
    if skipping_game:
        in_comment = False

        while line:
            if not in_comment:
                if line.isspace():
                    break
                elif line.startswith("%"):
                    line = handle.readline()
                    continue

            for match in SKIP_MOVETEXT_REGEX.finditer(line):
                token = match.group(0)
                if token == "{":
                    in_comment = True
                elif not in_comment and token == ";":
                    break
                elif token == "}":
                    in_comment = False

            line = handle.readline()

        visitor.end_game()
        return visitor.result()

    # Parse movetext.
    skip_variation_depth = 0
    while line:
        read_next_line = True

        # Ignore comments.
        if line.startswith("%") or line.startswith(";"):
            line = handle.readline()
            continue

        # An empty line means the end of a game.
        if line.isspace():
            visitor.end_game()
            return visitor.result()

        for match in MOVETEXT_REGEX.finditer(line):
            token = match.group(0)

            if token.startswith("{"):
                # Consume until the end of the comment.
                line = token[1:]
                comment_lines = []
                while line and "}" not in line:
                    comment_lines.append(line.rstrip())
                    line = handle.readline()
                end_index = line.find("}")
                comment_lines.append(line[:end_index])
                if "}" in line:
                    line = line[end_index:]
                else:
                    line = ""

                if not skip_variation_depth:
                    visitor.visit_comment("\n".join(comment_lines).strip())

                # Continue with the current or the next line.
                if line:
                    read_next_line = False
                break
            elif token == "(":
                if skip_variation_depth:
                    skip_variation_depth += 1
                elif board_stack[-1].move_stack:
                    if visitor.begin_variation() is SKIP:
                        skip_variation_depth = 1
                    else:
                        board = board_stack[-1].copy()
                        board.pop()
                        board_stack.append(board)
            elif token == ")":
                if skip_variation_depth:
                    skip_variation_depth -= 1
                if len(board_stack) > 1:
                    visitor.end_variation()
                    board_stack.pop()
            elif skip_variation_depth:
                continue
            elif token.startswith(";"):
                break
            elif token.startswith("$"):
                # Found a NAG.
                visitor.visit_nag(int(token[1:]))
            elif token == "?":
                visitor.visit_nag(NAG_MISTAKE)
            elif token == "??":
                visitor.visit_nag(NAG_BLUNDER)
            elif token == "!":
                visitor.visit_nag(NAG_GOOD_MOVE)
            elif token == "!!":
                visitor.visit_nag(NAG_BRILLIANT_MOVE)
            elif token == "!?":
                visitor.visit_nag(NAG_SPECULATIVE_MOVE)
            elif token == "?!":
                visitor.visit_nag(NAG_DUBIOUS_MOVE)
            elif token in ["1-0", "0-1", "1/2-1/2", "*"] and len(board_stack) == 1:
                visitor.visit_result(token)
            else:
                # Parse SAN tokens.
                try:
                    move = visitor.parse_san(board_stack[-1], token)
                except ValueError as error:
                    visitor.handle_error(error)
                    skip_variation_depth = 1
                else:
                    visitor.visit_move(board_stack[-1], move)
                    board_stack[-1].push(move)
                visitor.visit_board(board_stack[-1])

        if read_next_line:
            line = handle.readline()

    visitor.end_game()
    return visitor.result()
Ejemplo n.º 49
0
 def render(self, out: TextIO) -> None:
     out.write("}\n")
Ejemplo n.º 50
0
Archivo: orm.py Proyecto: fantix/authub
 def compile(self, buf: TextIO):
     if self.required:
         buf.write("required ")
     print(f"property {self.name} := ({self.expression});", file=buf)
Ejemplo n.º 51
0
def write_alias(d: TextIO, node: TypedefNode) -> None:
    if node.name.startswith('PFN_'):
        # function pointer workaround
        d.write(
            '[StructLayout(LayoutKind.Sequential, CharSet=CharSet.Unicode)]\n')
        d.write(f'public struct {node.name}{{\n')
        d.write('    public IntPtr Value;\n')
        d.write('}\n')
    else:
        typedef_type = cs_type(node.typedef_type, False)
        if node.name == typedef_type:
            return
        if node.name.startswith('D2D1_') and typedef_type.startswith(
                'D2D_') and node.name[5:] == typedef_type[4:]:
            return
        if node.name.startswith('D3D11_') and typedef_type.startswith(
                'D3D_') and node.name[6:] == typedef_type[4:]:
            return
        if node.name in type_map.keys():
            return
        d.write(
            '[StructLayout(LayoutKind.Sequential, CharSet=CharSet.Unicode)]\n')
        d.write(f'public struct {node.name}{{\n')
        d.write(f'    public {typedef_type} Value;\n')
        d.write('}\n')
Ejemplo n.º 52
0
def _write_type_definition(typedef: Typedef, fid: TextIO) -> None:
    """
    Write the type definition in the Typescript code.

    :param typedef: to be declared
    :param fid: target
    :return:
    """
    if typedef.identifier == '':
        raise ValueError(
            "Expected a typedef with an identifier, but got a typedef with an empty identifier."
        )

    if typedef.description:
        _write_description(description=typedef.description, indent='', fid=fid)
        fid.write('\n')

    if isinstance(typedef, Classdef):
        fid.write("export interface {} {{\n".format(typedef.identifier))
        for i, prop in enumerate(typedef.properties.values()):
            if i > 0:
                fid.write('\n')

            if prop.description:
                _write_description(description=prop.description,
                                   indent=INDENT,
                                   fid=fid)
                fid.write("\n")

            type_expr = _type_expression(typedef=prop.typedef,
                                         path='{}.{}'.format(
                                             typedef.identifier, prop.name))

            if not prop.required:
                fid.write(INDENT + '{}?: {};\n'.format(prop.name, type_expr))
            else:
                fid.write(INDENT + '{}: {};\n'.format(prop.name, type_expr))

        fid.write("}")
    else:
        fid.write("type {} = {};".format(
            typedef.identifier,
            _type_expression(typedef=typedef, path=typedef.identifier)))
Ejemplo n.º 53
0
def main(
    siblings: bool,
    ancestor_siblings: bool,
    max_ancestor_generations: int,
    max_descendant_generations: int,
    dynamic_generation_limits: bool,
    verbose: int,
    gedcom_file: BinaryIO,
    xref_id: str,
    output_file: TextIO,
):
    """
    Create databases for genealogytree from GEDCOM files.

    The LaTeX genealogytree package (GTR) provides tools for including
    genealogy trees in LaTeX documents. One way of doing that is by
    storing the genealogical information in a GTR-specific database
    file. This tool allows you to create such databases from GEDCOM
    files (GEDCOM is a popular file format for storing genealogical
    information).

    The input file (GEDCOM_FILE, use "-" for STDIN) is read, and a GTR
    database is written to OUTPUT_FILE (usually has a ".graph"
    extension, defaults to STDOUT). The GTR database contains a
    "sandclock" node for the person with the given GEDCOM XREF-ID.

    The database file can then be used in LaTeX as follows:

    \b
        \\documentclass{article}
        \\usepackage[utf8]{inputenc}
        \\usepackage[all]{genealogytree}
        \\begin{document}
            \\begin{genealogypicture}[template=database pole]
                input{my-database.graph}  % Change filename accordingly
            \\end{genealogypicture}
        \\end{document}
    """
    log.addHandler(logging.StreamHandler())
    if verbose > 1:
        log.setLevel(logging.DEBUG)
    elif verbose > 0:
        log.setLevel(logging.INFO)

    def error(s: str):
        if verbose > 1:
            logging.exception(s)
            sys.exit(1)
        sys.exit(s)

    try:
        persons, families = load_gedcom(gedcom_file)
    except Exception as e:
        error(f'Could not load GEDCOM data: {e}')

    try:
        person = persons[xref_id.replace('@', '')]
    except KeyError:
        error(f'No person with XREF-ID "{xref_id}"')

    if dynamic_generation_limits:
        num_ancestor_generations = person.count_ancestor_generations()
        log.debug(f'{num_ancestor_generations} ancestor generations')
        num_descendant_generations = person.count_descendant_generations()
        log.debug(f'{num_descendant_generations} descendant generations')

        if ((num_ancestor_generations > max_ancestor_generations) == (
                num_descendant_generations > max_descendant_generations)):
            # Limit is broken in neither direction or in both directions
            pass
        elif num_ancestor_generations > max_ancestor_generations:
            remaining = max_descendant_generations - num_descendant_generations
            if remaining:
                log.debug(f'Dynamically increasing max_ancestor_gerations by '
                          f'{remaining}')
                max_ancestor_generations += remaining
        else:  # num_descendant_generations > max_descendant_generations
            remaining = max_ancestor_generations - num_ancestor_generations
            if remaining:
                log.debug(
                    f'Dynamically increasing max_descendant_gerations by '
                    f'{remaining}')
                max_descendant_generations += remaining

    output_file.write(
        sandclock(
            person,
            siblings,
            ancestor_siblings,
            max_ancestor_generations,
            max_descendant_generations,
        ))
Ejemplo n.º 54
0
 def stuff(a: TextIO) -> str:
     return a.readline()
Ejemplo n.º 55
0
def _write_request(request: Request, fid: TextIO) -> None:
    """
    Generate the code of the request function.

    :param request: function definition
    :param fid: target
    :return:
    """
    # pylint: disable=too-many-locals
    # pylint: disable=too-many-statements
    # pylint: disable=too-many-branches
    description = 'Sends a request to the endpoint: {} {}'.format(
        request.path, request.method)
    if request.description:
        description += '\n\n' + request.description
    _write_description(description, INDENT, fid)
    fid.write('\n')

    prefix = INDENT + 'public {}('.format(request.operation_id)

    args = []  # type: List[str]
    for param in request.parameters:
        args.append('{}{}: {}'.format(param.name,
                                      '?' if not param.required else '',
                                      _type_expression(typedef=param.typedef)))

    return_type = ''
    if '200' in request.responses:
        resp = request.responses['200']
        if resp.typedef is not None:
            return_type = _type_expression(typedef=resp.typedef)

    if not return_type:
        return_type = 'any'

    suffix = '): Observable<{} | HttpErrorResponse> {{'.format(return_type)

    line = prefix + ', '.join(args) + suffix
    if len(line) <= 120:
        fid.write(line)
    else:
        fid.write(prefix)
        fid.write('\n')

        for i, arg in enumerate(args):
            fid.write(3 * INDENT + arg)

            if i < len(args) - 1:
                fid.write(',\n')

        fid.write(suffix)
    fid.write('\n')

    name_to_parameters = dict([(param.name, param)
                               for param in request.parameters])

    rel_path = request.path[1:] if request.path.startswith(
        '/') else request.path

    # path parameters
    token_pth = swagger_to.tokenize_path(path=rel_path)

    if not token_pth.parameter_to_token_indices and not request.query_parameters:
        fid.write(INDENT * 2 +
                  'const url = this.url_prefix + "{}";\n'.format(rel_path))
    else:
        if not token_pth.parameter_to_token_indices:
            fid.write(INDENT * 2 +
                      'let url = this.url_prefix + "{}";'.format(rel_path))
        else:
            fid.write(INDENT * 2 + 'let url = this.url_prefix;')
            for i, tkn in enumerate(token_pth.tokens):
                fid.write("\n")

                if i in token_pth.token_index_to_parameter:
                    param_name = token_pth.token_index_to_parameter[i]
                    param = name_to_parameters[param_name]

                    fid.write(INDENT * 2 +
                              'url += encodeURIComponent({});'.format(
                                  _to_string_expression(typedef=param.typedef,
                                                        variable=param.name)))
                else:
                    fid.write(
                        INDENT * 2 + 'url += encodeURIComponent("{}");'.format(
                            tkn.replace('\\', '\\\\').replace('\n', '\\n').
                            replace('\r', '\\r').replace('\t', '\\t')))

    if request.path_parameters and request.query_parameters:
        fid.write("\n")

    # query parameters
    if request.query_parameters:
        fid.write('\n')
        fid.write(INDENT * 2 + 'url += "?";\n')

        for i, param in enumerate(request.query_parameters):
            amp = ''  # used to concatenate query parameters
            if i > 0:
                fid.write("\n\n")
                amp = '&'

            if param.required:
                fid.write(INDENT * 2 +
                          'url += "{}{}=" + encodeURIComponent({});'.format(
                              amp, param.name,
                              _to_string_expression(typedef=param.typedef,
                                                    variable=param.name)))
            else:
                fid.write(INDENT * 2 + 'if ({}) {{\n'.format(param.name))
                fid.write(INDENT * 3 +
                          'url += "{}{}=" + encodeURIComponent({});\n'.format(
                              amp, param.name,
                              _to_string_expression(typedef=param.typedef,
                                                    variable=param.name)))
                fid.write(INDENT * 2 + '}')

        fid.write('\n')

    fid.write('\n')

    mth = request.method.lower()
    if request.body_parameter is not None:
        if request.body_parameter.required:
            fid.write(INDENT * 2 +
                      'let observable = this.http.request(url, \n')
            fid.write(
                INDENT * 3 +
                'new RequestOptions({{method: "{0}", body: JSON.stringify({1})}}));\n'
                .format(mth, request.body_parameter.name))
        else:
            fid.write(INDENT * 2 + 'let observable: Observable<any>;\n')
            fid.write(INDENT * 2 +
                      'if ({}) {{\n'.format(request.body_parameter.name))
            fid.write(INDENT * 3 + 'this.http.request(url, \n')
            fid.write(
                INDENT * 4 +
                'new RequestOptions({{method: "{0}", body: JSON.stringify({1})}}));\n'
                .format(mth, request.body_parameter.name))
            fid.write(INDENT * 2 + '} else {\n')
            fid.write(INDENT * 3 + 'observable = this.http.request(url, '
                      'new RequestOptions({{method: "{0}"}}));\n'.format(mth))
            fid.write(INDENT * 2 + '}\n')
    else:
        fid.write(INDENT * 2 +
                  'let observable = this.http.{}(url);\n'.format(mth))

    return_var = 'observable'
    if return_type != 'any':
        fid.write(
            INDENT * 2 +
            'let typed_observable = observable.map(res => (res.json() as {}));\n'
            .format(return_type))
        return_var = 'typed_observable'

    fid.write(INDENT * 2 + 'if (this.on_error) {\n')
    fid.write(
        INDENT * 3 +
        'return {0}.catch(err => this.on_error(err))\n'.format(return_var))
    fid.write(INDENT * 2 + '}\n')
    fid.write(INDENT * 2 + 'return {};\n'.format(return_var))

    fid.write(INDENT + '}')
Ejemplo n.º 56
0
def _write_client(requests: List[Request], fid: TextIO) -> None:
    """
    Generate the client.

    :param requests: translated request functions
    :param fid: target
    :return:
    """
    fid.write("@Injectable()\n")
    fid.write("export class RemoteCaller {\n")
    fid.write(INDENT + "public url_prefix: string;\n")
    fid.write(
        INDENT +
        "public on_error: (error: HttpErrorResponse) => Observable<HttpErrorResponse> | null;\n"
    )
    fid.write("\n")
    fid.write(INDENT + "constructor(private http: Http) {\n")
    fid.write(INDENT * 2 + 'this.url_prefix = "";\n')
    fid.write(INDENT * 2 + 'this.on_error = null;\n')
    fid.write(INDENT + '}\n\n')
    fid.write(INDENT + "public set_url_prefix(url_prefix: string) {\n")
    fid.write(INDENT * 2 + "this.url_prefix = url_prefix;\n")
    fid.write(INDENT + '}\n\n')
    fid.write(
        INDENT + "public set_on_error("
        "on_error: (error: HttpErrorResponse) => Observable<HttpErrorResponse> | null) {\n"
    )
    fid.write(INDENT * 2 + "this.on_error = on_error;\n")
    fid.write(INDENT + '}')

    for request in requests:
        fid.write('\n\n')
        _write_request(request=request, fid=fid)

    fid.write("\n}")
Ejemplo n.º 57
0
def generate(targets: List[Target], out: TextIO) -> None:
    uses_pkg_config = any(isinstance(t, Pkg) for t in targets)

    out.write(LISTS_PROLOGUE.format(**locals()))
    out.write(INSTALL_TARGETS)
    if uses_pkg_config:
        out.write('include(FindPkgConfig)\n')

    bde_targets = []
    for target in reversed(targets):
        if isinstance(target, Group) or isinstance(target, Package):
            generate_bde(target, out)
            if len(list(target.drivers())):
                bde_targets.append(target)
        elif isinstance(target, CMake):
            path = target.path()
            out.write('add_subdirectory({path} {target.name})\n'.format(
                                            **locals()).replace('\\', '/'))
        elif isinstance(target, Pkg):
            generate_pkg(target, out)

        if target.overrides:
            out.write(f'include({target.overrides})\n'.replace('\\', '/'))

    if bde_targets:
        out.write(ALL_TESTS_PROLOGUE)
        for target in bde_targets:
            out.write(f'    {target.name}.t\n')
        out.write(COMMAND_EPILOGUE)
Ejemplo n.º 58
0
def generate_pkg(target: Pkg, out: TextIO) -> None:
    name    = target.name
    package = target.package
    out.write(PKG_CONFIG.format(**locals()))
Ejemplo n.º 59
0
def generate_bde(target: BdeTarget, out: TextIO) -> None:
    out.write(LIBRARY_PROLOGUE.format(**locals()))
    for component in target.sources():
        out.write('    {}\n'.format(component).replace('\\', '/'))
    out.write(COMMAND_EPILOGUE)

    target_upper = target.name.upper()
    out.write(DEFINE_SYMBOL.format(**locals()))

    out.write(INCLUDE_DIRECTORIES_PROLOGUE.format(**locals()))
    for include in target.includes():
        out.write('    {}\n'.format(include).replace('\\', '/'))
    out.write(COMMAND_EPILOGUE)

    out.write(LINK_LIBRARIES_PROLOGUE.format(**locals()))
    for dependency in target.dependencies():
        if dependency.has_output:
            out.write('    {}\n'.format(dependency.name))
    out.write(COMMAND_EPILOGUE)

    if target.lazily_bound:
        out.write(LAZILY_BOUND_FLAG.format(**locals()))

    drivers = []
    for driver in target.drivers():
        name = os.path.splitext(os.path.basename(driver))[0]
        out.write(TESTING_DRIVER.format(**locals()).replace('\\', '/'))
        drivers.append(name)

    if drivers:
        out.write(TEST_TARGET_PROLOGUE.format(**locals()))
        for driver in drivers:
            out.write('    {}\n'.format(driver))
        out.write(COMMAND_EPILOGUE)

    out.write(INSTALL_HEADERS_PROLOGUE)
    for header in target.headers():
        out.write('    {}\n'.format(header).replace('\\', '/'))
    out.write(INSTALL_HEADERS_DESTINATION)
    out.write(COMMAND_EPILOGUE)

    out.write(INSTALL_LIBRARY.format(**locals()))
Ejemplo n.º 60
-1
def output_file(out: TextIO, fin: TextIO, keep_license: bool) -> None:
	skip = LICENSE_LINES
	if keep_license: skip = 0
	while True:
		line = fin.readline()
		if not line:
			break
		if skip:
			skip -= 1
			continue
		out.write(line)