Пример #1
0
def pat_dependency(src_path: str, src_file: TextIO) -> str:
  '''
  Return a list of dependencies.
  A .pat file always has a single dependency: the source file it patches.
  '''
  version_line = src_file.readline()
  orig_line = src_file.readline()
  orig_path = orig_line.strip()
  if not orig_path:
    failF('pat error: {}:2:1: line specifying original path is missing or empty.', src_path)
  return orig_path
Пример #2
0
def peek_line(reader: TextIO) -> str:
    pos = reader.tell()
    line = reader.readline()
    reader.seek(pos)
    return line
Пример #3
0
def _sort_imports(
    input_stream: TextIO,
    output_stream: TextIO,
    extension: str = "py",
    config: Config = DEFAULT_CONFIG,
) -> bool:
    """Parses stream identifying sections of contiguous imports and sorting them

    Code with unsorted imports is read from the provided `input_stream`, sorted and then
    outputted to the specified `output_stream`.

    - `input_stream`: Text stream with unsorted import sections.
    - `output_stream`: Text stream to output sorted inputs into.
    - `config`: Config settings to use when sorting imports. Defaults settings.
        - *Default*: `isort.settings.DEFAULT_CONFIG`.
    - `extension`: The file extension or file extension rules that should be used.
        - *Default*: `"py"`.
        - *Choices*: `["py", "pyi", "pyx"]`.

    Returns `True` if there were changes that needed to be made (errors present) from what
    was provided in the input_stream, otherwise `False`.
    """
    line_separator: str = config.line_ending
    add_imports: List[str] = [
        format_natural(addition) for addition in config.add_imports
    ]
    import_section: str = ""
    next_import_section: str = ""
    next_cimports: bool = False
    in_quote: str = ""
    first_comment_index_start: int = -1
    first_comment_index_end: int = -1
    contains_imports: bool = False
    in_top_comment: bool = False
    first_import_section: bool = True
    section_comments = [
        f"# {heading}" for heading in config.import_headings.values()
    ]
    indent: str = ""
    isort_off: bool = False
    cimports: bool = False
    made_changes: bool = False

    if config.float_to_top:
        new_input = ""
        current = ""
        isort_off = False
        for line in chain(input_stream, (None, )):
            if isort_off and line is not None:
                if line == "# isort: on\n":
                    isort_off = False
                new_input += line
            elif line in ("# isort: split\n", "# isort: off\n", None):
                if line == "# isort: off\n":
                    isort_off = True
                if current:
                    parsed = parse.file_contents(current, config=config)
                    extra_space = ""
                    while current[-1] == "\n":
                        extra_space += "\n"
                        current = current[:-1]
                    extra_space = extra_space.replace("\n", "", 1)
                    new_input += output.sorted_imports(parsed,
                                                       config,
                                                       extension,
                                                       import_type="import")
                    new_input += extra_space
                    current = ""
                new_input += line or ""
            else:
                current += line or ""

        input_stream = StringIO(new_input)

    for index, line in enumerate(chain(input_stream, (None, ))):
        if line is None:
            if index == 0 and not config.force_adds:
                return False

            not_imports = True
            line = ""
            if not line_separator:
                line_separator = "\n"
        else:
            stripped_line = line.strip()
            if stripped_line and not line_separator:
                line_separator = line[len(line.rstrip()):].replace(
                    " ", "").replace("\t", "")

            for file_skip_comment in FILE_SKIP_COMMENTS:
                if file_skip_comment in line:
                    raise FileSkipComment("Passed in content")

            if ((index == 0 or (index in (1, 2) and not contains_imports))
                    and stripped_line.startswith("#")
                    and stripped_line not in section_comments):
                in_top_comment = True
            elif in_top_comment:
                if not line.startswith(
                        "#") or stripped_line in section_comments:
                    in_top_comment = False
                    first_comment_index_end = index - 1

            if (not stripped_line.startswith("#")
                    or in_quote) and '"' in line or "'" in line:
                char_index = 0
                if first_comment_index_start == -1 and (line.startswith('"') or
                                                        line.startswith("'")):
                    first_comment_index_start = index
                while char_index < len(line):
                    if line[char_index] == "\\":
                        char_index += 1
                    elif in_quote:
                        if line[char_index:char_index +
                                len(in_quote)] == in_quote:
                            in_quote = ""
                            if first_comment_index_end < first_comment_index_start:
                                first_comment_index_end = index
                    elif line[char_index] in ("'", '"'):
                        long_quote = line[char_index:char_index + 3]
                        if long_quote in ('"""', "'''"):
                            in_quote = long_quote
                            char_index += 2
                        else:
                            in_quote = line[char_index]
                    elif line[char_index] == "#":
                        break
                    char_index += 1

            not_imports = bool(in_quote) or in_top_comment or isort_off
            if not (in_quote or in_top_comment):
                stripped_line = line.strip()
                if isort_off:
                    if stripped_line == "# isort: on":
                        isort_off = False
                elif stripped_line == "# isort: off":
                    not_imports = True
                    isort_off = True
                elif stripped_line == "# isort: split":
                    not_imports = True
                elif stripped_line in config.section_comments and not import_section:
                    import_section += line
                    indent = line[:-len(line.lstrip())]
                elif not (stripped_line or contains_imports):
                    if add_imports and not indent:
                        if not import_section:
                            output_stream.write(line)
                            line = ""
                        import_section += line_separator.join(
                            add_imports) + line_separator
                        contains_imports = True
                        add_imports = []
                    else:
                        not_imports = True
                elif (not stripped_line or stripped_line.startswith("#") and
                      (not indent or indent + line.lstrip() == line)):
                    import_section += line
                elif stripped_line.startswith(IMPORT_START_IDENTIFIERS):
                    contains_imports = True

                    new_indent = line[:-len(line.lstrip())]
                    import_statement = line
                    stripped_line = line.strip().split("#")[0]
                    while stripped_line.endswith("\\") or (
                            "(" in stripped_line and ")" not in stripped_line):
                        if stripped_line.endswith("\\"):
                            while stripped_line and stripped_line.endswith(
                                    "\\"):
                                line = input_stream.readline()
                                stripped_line = line.strip().split("#")[0]
                                import_statement += line
                        else:
                            while ")" not in stripped_line:
                                line = input_stream.readline()
                                stripped_line = line.strip().split("#")[0]
                                import_statement += line

                    cimport_statement: bool = False
                    if (import_statement.lstrip().startswith(
                            CIMPORT_IDENTIFIERS)
                            or " cimport " in import_statement
                            or " cimport*" in import_statement
                            or " cimport(" in import_statement
                            or ".cimport" in import_statement):
                        cimport_statement = True

                    if cimport_statement != cimports or (new_indent != indent
                                                         and import_section):
                        if import_section:
                            next_cimports = cimport_statement
                            next_import_section = import_statement
                            import_statement = ""
                            not_imports = True
                            line = ""
                        else:
                            cimports = cimport_statement

                    indent = new_indent
                    import_section += import_statement
                else:
                    not_imports = True

        if not_imports:
            raw_import_section: str = import_section
            if (add_imports and not in_top_comment and not in_quote
                    and not import_section
                    and not line.lstrip().startswith(COMMENT_INDICATORS)):
                import_section = line_separator.join(
                    add_imports) + line_separator
                contains_imports = True
                add_imports = []

            if next_import_section and not import_section:  # pragma: no cover
                raw_import_section = import_section = next_import_section
                next_import_section = ""

            if import_section:
                if add_imports and not indent:
                    import_section = (line_separator.join(add_imports) +
                                      line_separator + import_section)
                    contains_imports = True
                    add_imports = []

                if not indent:
                    import_section += line
                    raw_import_section += line
                if not contains_imports:
                    output_stream.write(import_section)
                else:
                    leading_whitespace = import_section[:-len(import_section.
                                                              lstrip())]
                    trailing_whitespace = import_section[len(import_section.
                                                             rstrip()):]
                    if first_import_section and not import_section.lstrip(
                            line_separator).startswith(COMMENT_INDICATORS):
                        import_section = import_section.lstrip(line_separator)
                        raw_import_section = raw_import_section.lstrip(
                            line_separator)
                        first_import_section = False

                    if indent:
                        import_section = line_separator.join(
                            line.lstrip()
                            for line in import_section.split(line_separator))
                        out_config = Config(
                            config=config,
                            line_length=max(config.line_length - len(indent),
                                            0),
                            wrap_length=max(config.wrap_length - len(indent),
                                            0),
                            lines_after_imports=1,
                        )
                    else:
                        out_config = config

                    sorted_import_section = output.sorted_imports(
                        parse.file_contents(import_section, config=config),
                        out_config,
                        extension,
                        import_type="cimport" if cimports else "import",
                    )
                    if not (import_section.strip()
                            and not sorted_import_section):
                        if indent:
                            sorted_import_section = (
                                leading_whitespace + textwrap.indent(
                                    sorted_import_section, indent).strip() +
                                trailing_whitespace)

                        if not made_changes:
                            if config.ignore_whitespace:
                                compare_in = remove_whitespace(
                                    raw_import_section,
                                    line_separator=line_separator).strip()
                                compare_out = remove_whitespace(
                                    sorted_import_section,
                                    line_separator=line_separator).strip()
                            else:
                                compare_in = raw_import_section.strip()
                                compare_out = sorted_import_section.strip()

                            if compare_out != compare_in:
                                made_changes = True

                        output_stream.write(sorted_import_section)
                        if not line and not indent and next_import_section:
                            output_stream.write(line_separator)

                if indent:
                    output_stream.write(line)
                    if not next_import_section:
                        indent = ""

                if next_import_section:
                    cimports = next_cimports
                    contains_imports = True
                else:
                    contains_imports = False
                import_section = next_import_section
                next_import_section = ""
            else:
                output_stream.write(line)
                not_imports = False

    return made_changes
Пример #4
0
 def _read_lines(self, file_object: TextIO) -> Generator[str, None, None]:
     while True:
         line = file_object.readline()
         if not line:
             break
         yield line
Пример #5
0
def read_interface(
        file: TextIO,
        n1,
        catalog: Dict[str, agf.Record],
        temperature: float,
        try_n_prefix: bool = False) -> Tuple[trains.Interface, float, float]:
    commands = {}
    parms = {}
    while True:
        pos = file.tell()
        line = file.readline()
        words = line.split()
        if len(words) > 0:
            if line[:2] != '  ':
                file.seek(pos)
                break
            if words[0] == 'PARM':
                parm_index = int(words[1])
                parm_value = float(words[2])
                assert len(words) == 3
                parms[parm_index] = parm_value
            else:
                commands[words[0]] = words[1:]

    if 'GLAS' in commands:
        glass = commands['GLAS'][0]
        try:
            record = catalog[glass]
        except KeyError as ex:
            if try_n_prefix:
                nglass = 'N-' + glass
                try:
                    record = catalog[nglass]
                except KeyError:
                    raise GlassNotInCatalogError((glass, nglass))
            else:
                raise GlassNotInCatalogError([glass]) from ex
        n2 = record.fix_temperature(temperature)
    else:
        n2 = ri.air
    thickness = float(commands['DISZ'][0]) * 1e-3
    clear_semi_dia = float(commands['DIAM'][0]) * 1e-3
    chip_zone = float(commands.get('OEMA', ['0'])[0]) * 1e-3
    clear_radius = clear_semi_dia + chip_zone

    with np.errstate(divide='ignore'):
        roc = np.divide(1e-3, float(commands['CURV'][0]))
    kappa = float(commands.get('CONI', [0])[0]) + 1
    surface_type = commands['TYPE'][0]
    if surface_type == 'STANDARD' and np.isclose(kappa, 1):
        inner_surface = trains.SphericalSurface(roc, clear_radius)
    elif surface_type in ('STANDARD', 'EVENASPH'):
        alphas = []
        for parm_index, parm_value in parms.items():
            # Term is (2*parm_index)th-order coefficient, so the (2*parm_index-2)th element of alphas.
            order = 2 * parm_index
            index = order - 2
            if parm_value != 0:
                assert index >= 0
                alphas += [0] * (index - len(alphas) + 1)
                alphas[index] = parm_value * (1e3)**(order - 1)
        inner_surface = trains.ConicSurface(roc, clear_radius, kappa, alphas)
    else:
        raise ValueError('Unknown surface type %s.' % surface_type)

    if 'MEMA' in commands:
        mech_semi_dia = float(commands['MEMA'][0]) * 1e-3
    else:
        mech_semi_dia = clear_radius

    if mech_semi_dia - clear_radius > 1e-6:
        # TODO tidy this up - get rid of rt first?
        # Somehow need to offset this surface (in z). No way of describing this at present. Could add an offset
        # attribute to Surface. Then would need an offset in Profile.
        outer_surface = trains.SphericalSurface(np.inf,
                                                mech_semi_dia - clear_radius)
        outer_sag = inner_surface.calc_sag(clear_radius)
        surface = trains.SegmentedSurface((inner_surface, outer_surface),
                                          (0, outer_sag))
    else:
        surface = inner_surface

    interface = surface.to_interface(n1, n2)

    return interface, n2, thickness
Пример #6
0
def get_puzzle_words(puzzle_file: TextIO) -> List[str]:
    """Return the puzzle words in puzzle_file. These are the words in the
    first line of the file.
    """

    return puzzle_file.readline().strip().split(",")
Пример #7
0
def read_auth(client: mqtt.Client, auth_file: TextIO):
    """Reads authentication from a file."""
    username = auth_file.readline().strip()
    password = auth_file.readline().strip()
    client.username_pw_set(username, password)
Пример #8
0
def parse_states(file: TextIO) -> Tuple[State, State, State, Set[State]]:
    """
    Parses a list of states, identifies the start, accept and reject states.

    :param file: File to read from
    :return: Accept state, reject state, start state, set of all states
    """
    accept_state = None
    reject_state = None
    start_state = None
    all_states = set()

    try:
        line = file.readline()
        header, states_count_string = line.split()
    except ValueError:
        raise ParseError('Invalid states header format: {}'.format(line))

    try:
        states_count = int(states_count_string)
    except ValueError:
        raise ParseError(
            'Invalid number of states: {}'.format(states_count_string))

    if header != 'states':
        raise ParseError('Expected states header, got {}'.format(header))

    if states_count < 2:
        raise ParseError('The machine has to have at least 2 states')

    for _ in range(states_count):
        try:
            state, *accept_reject = file.readline().split()
        except ValueError:
            raise ParseError('Invalid state row')

        if state not in all_states:
            all_states.add(state)
        else:
            raise ParseError('Duplicate state: {}'.format(state))

        if start_state is None:
            start_state = state

        if accept_reject == ['+']:
            if accept_state is None:
                accept_state = state
            else:
                raise ParseError('Multiple accept states: {}'.format(state))
        elif accept_reject == ['-']:
            if reject_state is None:
                reject_state = state
            else:
                raise ParseError('Multiple reject states: {}'.format(state))
        elif accept_reject:
            raise ParseError(
                "Invalid state modifier: {}".format(accept_reject))

    if accept_state is None:
        raise ParseError("Missing accept state")

    if reject_state is None:
        raise ParseError("Missing reject state")

    return accept_state, reject_state, start_state, all_states
Пример #9
0
 def read_head(file_obj: TextIO) -> Tuple[str, str, TextIO]:
     match = header_pat.match("".join(file_obj.readline()
                                      for _ in range(4)))
     return match.group(1), match.group(2), file_obj
Пример #10
0
def obsheader3(f: T.TextIO,
               use: set[str] = None,
               meas: list[str] = None) -> dict[str, T.Any]:
    """
    get RINEX 3 OBS types, for each system type
    optionally, select system type and/or measurement type to greatly
    speed reading and save memory (RAM, disk)
    """
    if isinstance(f, (str, Path)):
        with opener(f, header=True) as h:
            return obsheader3(h, use, meas)

    fields = {}
    Fmax = 0

    # %% first line
    hdr = rinexinfo(f)

    for ln in f:
        if "END OF HEADER" in ln:
            break

        hd = ln[60:80]
        c = ln[:60]
        if "SYS / # / OBS TYPES" in hd:
            k = c[0]
            fields[k] = c[6:60].split()
            N = int(c[3:6])
            # %% maximum number of fields in a file, to allow fast Numpy parse.
            Fmax = max(N, Fmax)

            n = N - 13
            while n > 0:  # Rinex 3.03, pg. A6, A7
                ln = f.readline()
                assert "SYS / # / OBS TYPES" in ln[60:]
                fields[k] += ln[6:60].split()
                n -= 13

            assert len(fields[k]) == N

            continue

        if hd.strip() not in hdr:  # Header label
            hdr[hd.strip()] = c  # don't strip for fixed-width parsers
            # string with info
        else:  # concatenate to the existing string
            hdr[hd.strip()] += " " + c

    # %% list with x,y,z cartesian (OPTIONAL)
    # Rinex 3.03, pg. A6, Table A2
    try:
        # some RINEX files have bad headers with mulitple APPROX POSITION XYZ.
        # we choose to use the first such header.
        hdr["position"] = [
            float(j) for j in hdr["APPROX POSITION XYZ"].split()
        ][:3]
        if ecef2geodetic is not None and len(hdr["position"]) == 3:
            hdr["position_geodetic"] = ecef2geodetic(*hdr["position"])
    except (KeyError, ValueError):
        pass
    # %% time
    try:
        t0s = hdr["TIME OF FIRST OBS"]
        # NOTE: must do second=int(float()) due to non-conforming files
        hdr["t0"] = datetime(
            year=int(t0s[:6]),
            month=int(t0s[6:12]),
            day=int(t0s[12:18]),
            hour=int(t0s[18:24]),
            minute=int(t0s[24:30]),
            second=int(float(t0s[30:36])),
            microsecond=int(float(t0s[30:43]) % 1 * 1000000),
        )
    except (KeyError, ValueError):
        pass

    try:
        hdr["interval"] = float(hdr["INTERVAL"][:10])
    except (KeyError, ValueError):
        pass
    # %% select specific satellite systems only (optional)
    if use:
        if not set(fields.keys()).intersection(use):
            raise KeyError(f"system type {use} not found in RINEX file")

        fields = {k: fields[k] for k in use if k in fields}

    # perhaps this could be done more efficiently, but it's probably low impact on overall program.
    # simple set and frozenset operations do NOT preserve order, which would completely mess up reading!
    sysind: dict[str, T.Any] = {}
    if isinstance(meas, (tuple, list, np.ndarray)):
        for sk in fields:  # iterate over each system
            # ind = np.isin(fields[sk], meas)  # boolean vector
            ind = np.zeros(len(fields[sk]), dtype=bool)
            for m in meas:
                for i, field in enumerate(fields[sk]):
                    if field.startswith(m):
                        ind[i] = True

            fields[sk] = np.array(fields[sk])[ind].tolist()
            sysind[sk] = np.empty(Fmax * 3, dtype=bool)  # *3 due to LLI, SSI
            for j, i in enumerate(ind):
                sysind[sk][j * 3:j * 3 + 3] = i
    else:
        sysind = {k: np.s_[:] for k in fields}

    hdr["fields"] = fields
    hdr["fields_ind"] = sysind
    hdr["Fmax"] = Fmax

    return hdr
Пример #11
0
 def stuff(a: TextIO) -> str:
     return a.readline()
Пример #12
0
def process(
    input_stream: TextIO,
    output_stream: TextIO,
    extension: str = "py",
    config: Config = DEFAULT_CONFIG,
) -> bool:
    """Parses stream identifying sections of contiguous imports and sorting them

    Code with unsorted imports is read from the provided `input_stream`, sorted and then
    outputted to the specified `output_stream`.

    - `input_stream`: Text stream with unsorted import sections.
    - `output_stream`: Text stream to output sorted inputs into.
    - `config`: Config settings to use when sorting imports. Defaults settings.
        - *Default*: `isort.settings.DEFAULT_CONFIG`.
    - `extension`: The file extension or file extension rules that should be used.
        - *Default*: `"py"`.
        - *Choices*: `["py", "pyi", "pyx"]`.

    Returns `True` if there were changes that needed to be made (errors present) from what
    was provided in the input_stream, otherwise `False`.
    """
    line_separator: str = config.line_ending
    add_imports: List[str] = [
        format_natural(addition) for addition in config.add_imports
    ]
    import_section: str = ""
    next_import_section: str = ""
    next_cimports: bool = False
    in_quote: str = ""
    first_comment_index_start: int = -1
    first_comment_index_end: int = -1
    contains_imports: bool = False
    in_top_comment: bool = False
    first_import_section: bool = True
    indent: str = ""
    isort_off: bool = False
    code_sorting: Union[bool, str] = False
    code_sorting_section: str = ""
    code_sorting_indent: str = ""
    cimports: bool = False
    made_changes: bool = False
    stripped_line: str = ""
    end_of_file: bool = False
    verbose_output: List[str] = []

    if config.float_to_top:
        new_input = ""
        current = ""
        isort_off = False
        for line in chain(input_stream, (None, )):
            if isort_off and line is not None:
                if line == "# isort: on\n":
                    isort_off = False
                new_input += line
            elif line in ("# isort: split\n", "# isort: off\n",
                          None) or str(line).endswith("# isort: split\n"):
                if line == "# isort: off\n":
                    isort_off = True
                if current:
                    if add_imports:
                        add_line_separator = line_separator or "\n"
                        current += add_line_separator + add_line_separator.join(
                            add_imports)
                        add_imports = []
                    parsed = parse.file_contents(current, config=config)
                    verbose_output += parsed.verbose_output
                    extra_space = ""
                    while current and current[-1] == "\n":
                        extra_space += "\n"
                        current = current[:-1]
                    extra_space = extra_space.replace("\n", "", 1)
                    sorted_output = output.sorted_imports(parsed,
                                                          config,
                                                          extension,
                                                          import_type="import")
                    made_changes = made_changes or _has_changed(
                        before=current,
                        after=sorted_output,
                        line_separator=parsed.line_separator,
                        ignore_whitespace=config.ignore_whitespace,
                    )
                    new_input += sorted_output
                    new_input += extra_space
                    current = ""
                new_input += line or ""
            else:
                current += line or ""

        input_stream = StringIO(new_input)

    for index, line in enumerate(chain(input_stream, (None, ))):
        if line is None:
            if index == 0 and not config.force_adds:
                return False

            not_imports = True
            end_of_file = True
            line = ""
            if not line_separator:
                line_separator = "\n"

            if code_sorting and code_sorting_section:
                output_stream.write(
                    textwrap.indent(
                        isort.literal.assignment(
                            code_sorting_section,
                            str(code_sorting),
                            extension,
                            config=_indented_config(config, indent),
                        ),
                        code_sorting_indent,
                    ))
        else:
            stripped_line = line.strip()
            if stripped_line and not line_separator:
                line_separator = line[len(line.rstrip()):].replace(
                    " ", "").replace("\t", "")

            for file_skip_comment in FILE_SKIP_COMMENTS:
                if file_skip_comment in line:
                    raise FileSkipComment("Passed in content")

            if not in_quote and stripped_line == "# isort: off":
                isort_off = True

            if ((index == 0 or (index in (1, 2) and not contains_imports))
                    and stripped_line.startswith("#")
                    and stripped_line not in config.section_comments):
                in_top_comment = True
            elif in_top_comment:
                if not line.startswith(
                        "#") or stripped_line in config.section_comments:
                    in_top_comment = False
                    first_comment_index_end = index - 1

            was_in_quote = bool(in_quote)
            if (not stripped_line.startswith("#")
                    or in_quote) and '"' in line or "'" in line:
                char_index = 0
                if first_comment_index_start == -1 and (line.startswith('"') or
                                                        line.startswith("'")):
                    first_comment_index_start = index
                while char_index < len(line):
                    if line[char_index] == "\\":
                        char_index += 1
                    elif in_quote:
                        if line[char_index:char_index +
                                len(in_quote)] == in_quote:
                            in_quote = ""
                            if first_comment_index_end < first_comment_index_start:
                                first_comment_index_end = index
                    elif line[char_index] in ("'", '"'):
                        long_quote = line[char_index:char_index + 3]
                        if long_quote in ('"""', "'''"):
                            in_quote = long_quote
                            char_index += 2
                        else:
                            in_quote = line[char_index]
                    elif line[char_index] == "#":
                        break
                    char_index += 1

            not_imports = bool(
                in_quote) or was_in_quote or in_top_comment or isort_off
            if not (in_quote or was_in_quote or in_top_comment):
                if isort_off:
                    if stripped_line == "# isort: on":
                        isort_off = False
                elif stripped_line.endswith("# isort: split"):
                    not_imports = True
                elif stripped_line in CODE_SORT_COMMENTS:
                    code_sorting = stripped_line.split("isort: ")[1].strip()
                    code_sorting_indent = line[:-len(line.lstrip())]
                    not_imports = True
                elif code_sorting:
                    if not stripped_line:
                        output_stream.write(
                            textwrap.indent(
                                isort.literal.assignment(
                                    code_sorting_section,
                                    str(code_sorting),
                                    extension,
                                    config=_indented_config(config, indent),
                                ),
                                code_sorting_indent,
                            ))
                        not_imports = True
                        code_sorting = False
                        code_sorting_section = ""
                        code_sorting_indent = ""
                    else:
                        code_sorting_section += line
                        line = ""
                elif stripped_line in config.section_comments:
                    if import_section and not contains_imports:
                        output_stream.write(import_section)
                        import_section = line
                        not_imports = False
                    else:
                        import_section += line
                    indent = line[:-len(line.lstrip())]
                elif not (stripped_line or contains_imports):
                    not_imports = True
                elif (not stripped_line or stripped_line.startswith("#") and
                      (not indent or indent + line.lstrip() == line)
                      and not config.treat_all_comments_as_code
                      and stripped_line not in config.treat_comments_as_code):
                    import_section += line
                elif stripped_line.startswith(IMPORT_START_IDENTIFIERS):
                    did_contain_imports = contains_imports
                    contains_imports = True

                    new_indent = line[:-len(line.lstrip())]
                    import_statement = line
                    stripped_line = line.strip().split("#")[0]
                    while stripped_line.endswith("\\") or (
                            "(" in stripped_line and ")" not in stripped_line):
                        if stripped_line.endswith("\\"):
                            while stripped_line and stripped_line.endswith(
                                    "\\"):
                                line = input_stream.readline()
                                stripped_line = line.strip().split("#")[0]
                                import_statement += line
                        else:
                            while ")" not in stripped_line:
                                line = input_stream.readline()
                                stripped_line = line.strip().split("#")[0]
                                import_statement += line

                    cimport_statement: bool = False
                    if (import_statement.lstrip().startswith(
                            CIMPORT_IDENTIFIERS)
                            or " cimport " in import_statement
                            or " cimport*" in import_statement
                            or " cimport(" in import_statement
                            or ".cimport" in import_statement):
                        cimport_statement = True

                    if cimport_statement != cimports or (
                            new_indent != indent and import_section and
                        (not did_contain_imports
                         or len(new_indent) < len(indent))):
                        indent = new_indent
                        if import_section:
                            next_cimports = cimport_statement
                            next_import_section = import_statement
                            import_statement = ""
                            not_imports = True
                            line = ""
                        else:
                            cimports = cimport_statement
                    else:
                        if new_indent != indent:
                            if import_section and did_contain_imports:
                                import_statement = indent + import_statement.lstrip(
                                )
                            else:
                                indent = new_indent
                    import_section += import_statement
                else:
                    not_imports = True

        if not_imports:
            raw_import_section: str = import_section
            if (add_imports and (stripped_line or end_of_file)
                    and not config.append_only and not in_top_comment
                    and not in_quote and not import_section
                    and not line.lstrip().startswith(COMMENT_INDICATORS)):
                import_section = line_separator.join(
                    add_imports) + line_separator
                if end_of_file and index != 0:
                    output_stream.write(line_separator)
                contains_imports = True
                add_imports = []

            if next_import_section and not import_section:  # pragma: no cover
                raw_import_section = import_section = next_import_section
                next_import_section = ""

            if import_section:
                if add_imports and not indent:
                    import_section = (line_separator.join(add_imports) +
                                      line_separator + import_section)
                    contains_imports = True
                    add_imports = []

                if not indent:
                    import_section += line
                    raw_import_section += line
                if not contains_imports:
                    output_stream.write(import_section)

                else:
                    leading_whitespace = import_section[:-len(import_section.
                                                              lstrip())]
                    trailing_whitespace = import_section[len(import_section.
                                                             rstrip()):]
                    if first_import_section and not import_section.lstrip(
                            line_separator).startswith(COMMENT_INDICATORS):
                        import_section = import_section.lstrip(line_separator)
                        raw_import_section = raw_import_section.lstrip(
                            line_separator)
                        first_import_section = False

                    if indent:
                        import_section = "".join(
                            line[len(indent):]
                            for line in import_section.splitlines(
                                keepends=True))

                    parsed_content = parse.file_contents(import_section,
                                                         config=config)
                    verbose_output += parsed_content.verbose_output

                    sorted_import_section = output.sorted_imports(
                        parsed_content,
                        _indented_config(config, indent),
                        extension,
                        import_type="cimport" if cimports else "import",
                    )
                    if not (import_section.strip()
                            and not sorted_import_section):
                        if indent:
                            sorted_import_section = (
                                leading_whitespace + textwrap.indent(
                                    sorted_import_section, indent).strip() +
                                trailing_whitespace)

                        made_changes = made_changes or _has_changed(
                            before=raw_import_section,
                            after=sorted_import_section,
                            line_separator=line_separator,
                            ignore_whitespace=config.ignore_whitespace,
                        )
                        output_stream.write(sorted_import_section)
                        if not line and not indent and next_import_section:
                            output_stream.write(line_separator)

                if indent:
                    output_stream.write(line)
                    if not next_import_section:
                        indent = ""

                if next_import_section:
                    cimports = next_cimports
                    contains_imports = True
                else:
                    contains_imports = False
                import_section = next_import_section
                next_import_section = ""
            else:
                output_stream.write(line)
                not_imports = False

            if stripped_line and not in_quote and not import_section and not next_import_section:
                if stripped_line == "yield":
                    while not stripped_line or stripped_line == "yield":
                        new_line = input_stream.readline()
                        if not new_line:
                            break

                        output_stream.write(new_line)
                        stripped_line = new_line.strip().split("#")[0]

                if stripped_line.startswith(
                        "raise") or stripped_line.startswith("yield"):
                    while stripped_line.endswith("\\"):
                        new_line = input_stream.readline()
                        if not new_line:
                            break

                        output_stream.write(new_line)
                        stripped_line = new_line.strip().split("#")[0]

    if made_changes and config.only_modified:
        for output_str in verbose_output:
            print(output_str)

    return made_changes
Пример #13
0
def read_chunk_of_file(file: TextIO) -> Iterator[str]:
    while line := file.readline():
        if contents := line.strip():
            yield contents
Пример #14
0
def get_knight_moves_f(f_in: TextIO, f_out: TextIO):
    knight_nr = int(f_in.readline().rstrip())
    res = get_knight_moves(knight_nr)
    for line in res:
        print(line, file=f_out)
Пример #15
0
def read_game(handle: TextIO, *, Visitor: Callable[[], BaseVisitor[ResultT]] = GameBuilder) -> Optional[ResultT]:
    """
    Reads a game from a file opened in text mode.

    >>> import chess.pgn
    >>>
    >>> pgn = open("data/pgn/kasparov-deep-blue-1997.pgn")
    >>>
    >>> first_game = chess.pgn.read_game(pgn)
    >>> second_game = chess.pgn.read_game(pgn)
    >>>
    >>> first_game.headers["Event"]
    'IBM Man-Machine, New York USA'
    >>>
    >>> # Iterate through all moves and play them on a board.
    >>> board = first_game.board()
    >>> for move in first_game.mainline_moves():
    ...     board.push(move)
    ...
    >>> board
    Board('4r3/6P1/2p2P1k/1p6/pP2p1R1/P1B5/2P2K2/3r4 b - - 0 45')

    By using text mode, the parser does not need to handle encodings. It is the
    caller's responsibility to open the file with the correct encoding.
    PGN files are usually ASCII or UTF-8 encoded. So, the following should
    cover most relevant cases (ASCII, UTF-8, UTF-8 with BOM).

    >>> pgn = open("data/pgn/kasparov-deep-blue-1997.pgn", encoding="utf-8-sig")

    Use :class:`~io.StringIO` to parse games from a string.

    >>> import io
    >>>
    >>> pgn = io.StringIO("1. e4 e5 2. Nf3 *")
    >>> game = chess.pgn.read_game(pgn)

    The end of a game is determined by a completely blank line or the end of
    the file. (Of course, blank lines in comments are possible).

    According to the PGN standard, at least the usual 7 header tags are
    required for a valid game. This parser also handles games without any
    headers just fine.

    The parser is relatively forgiving when it comes to errors. It skips over
    tokens it can not parse. Any exceptions are logged and collected in
    :data:`Game.errors <chess.pgn.Game.errors>`. This behavior can be
    :func:`overriden <chess.pgn.GameBuilder.handle_error>`.

    Returns the parsed game or ``None`` if the end of file is reached.
    """
    visitor = Visitor()

    found_game = False
    skipping_game = False
    headers = None
    managed_headers = None

    # Ignore leading empty lines and comments.
    line = handle.readline().lstrip("\ufeff")
    while line.isspace() or line.startswith("%") or line.startswith(";"):
        line = handle.readline()

    # Parse game headers.
    while line:
        # Ignore comments.
        if line.startswith("%") or line.startswith(";"):
            line = handle.readline()
            continue

        # First token of the game.
        if not found_game:
            found_game = True
            skipping_game = visitor.begin_game() is SKIP
            if not skipping_game:
                managed_headers = visitor.begin_headers()
                if not isinstance(managed_headers, Headers):
                    managed_headers = None
                    headers = Headers({})

        if not line.startswith("["):
            break

        if not skipping_game:
            tag_match = TAG_REGEX.match(line)
            if tag_match:
                visitor.visit_header(tag_match.group(1), tag_match.group(2))
                if headers is not None:
                    headers[tag_match.group(1)] = tag_match.group(2)
            else:
                break

        line = handle.readline()

    if not found_game:
        return None

    if not skipping_game:
        skipping_game = visitor.end_headers() is SKIP

    # Ignore single empty line after headers.
    if line.isspace():
        line = handle.readline()

    if not skipping_game:
        # Chess variant.
        headers = managed_headers if headers is None else headers
        try:
            VariantBoard = headers.variant()
        except ValueError as error:
            visitor.handle_error(error)
            VariantBoard = chess.Board

        # Initial position.
        fen = headers.get("FEN", VariantBoard.starting_fen)
        try:
            board_stack = [VariantBoard(fen, chess960=headers.is_chess960())]
        except ValueError as error:
            visitor.handle_error(error)
            skipping_game = True
        else:
            visitor.visit_board(board_stack[0])

    # Fast path: Skip entire game.
    if skipping_game:
        in_comment = False

        while line:
            if not in_comment:
                if line.isspace():
                    break
                elif line.startswith("%"):
                    line = handle.readline()
                    continue

            for match in SKIP_MOVETEXT_REGEX.finditer(line):
                token = match.group(0)
                if token == "{":
                    in_comment = True
                elif not in_comment and token == ";":
                    break
                elif token == "}":
                    in_comment = False

            line = handle.readline()

        visitor.end_game()
        return visitor.result()

    # Parse movetext.
    skip_variation_depth = 0
    while line:
        read_next_line = True

        # Ignore comments.
        if line.startswith("%") or line.startswith(";"):
            line = handle.readline()
            continue

        # An empty line means the end of a game.
        if line.isspace():
            visitor.end_game()
            return visitor.result()

        for match in MOVETEXT_REGEX.finditer(line):
            token = match.group(0)

            if token.startswith("{"):
                # Consume until the end of the comment.
                line = token[1:]
                comment_lines = []
                while line and "}" not in line:
                    comment_lines.append(line.rstrip())
                    line = handle.readline()
                end_index = line.find("}")
                comment_lines.append(line[:end_index])
                if "}" in line:
                    line = line[end_index:]
                else:
                    line = ""

                if not skip_variation_depth:
                    visitor.visit_comment("\n".join(comment_lines).strip())

                # Continue with the current or the next line.
                if line:
                    read_next_line = False
                break
            elif token == "(":
                if skip_variation_depth:
                    skip_variation_depth += 1
                elif board_stack[-1].move_stack:
                    if visitor.begin_variation() is SKIP:
                        skip_variation_depth = 1
                    else:
                        board = board_stack[-1].copy()
                        board.pop()
                        board_stack.append(board)
            elif token == ")":
                if skip_variation_depth:
                    skip_variation_depth -= 1
                if len(board_stack) > 1:
                    visitor.end_variation()
                    board_stack.pop()
            elif skip_variation_depth:
                continue
            elif token.startswith(";"):
                break
            elif token.startswith("$"):
                # Found a NAG.
                visitor.visit_nag(int(token[1:]))
            elif token == "?":
                visitor.visit_nag(NAG_MISTAKE)
            elif token == "??":
                visitor.visit_nag(NAG_BLUNDER)
            elif token == "!":
                visitor.visit_nag(NAG_GOOD_MOVE)
            elif token == "!!":
                visitor.visit_nag(NAG_BRILLIANT_MOVE)
            elif token == "!?":
                visitor.visit_nag(NAG_SPECULATIVE_MOVE)
            elif token == "?!":
                visitor.visit_nag(NAG_DUBIOUS_MOVE)
            elif token in ["1-0", "0-1", "1/2-1/2", "*"] and len(board_stack) == 1:
                visitor.visit_result(token)
            else:
                # Parse SAN tokens.
                try:
                    move = visitor.parse_san(board_stack[-1], token)
                except ValueError as error:
                    visitor.handle_error(error)
                    skip_variation_depth = 1
                else:
                    visitor.visit_move(board_stack[-1], move)
                    board_stack[-1].push(move)
                visitor.visit_board(board_stack[-1])

        if read_next_line:
            line = handle.readline()

    visitor.end_game()
    return visitor.result()
Пример #16
0
def _receive_query_response(input_channel: TextIO) -> Response:
    query_message = input_channel.readline().strip()
    LOG.debug(f"Received `{query_message}`")
    return parse_query_response(query_message)
Пример #17
0
def buffer_to_dict(buf: TextIO):
    description = buf.readline().strip()
    NE, NP = map(int, buf.readline().split())
    nodes = {}
    for _ in range(NP):
        line = buf.readline().strip('\n').split()
        # Gr3/fort.14 format cannot distinguish between a 2D mesh with one
        # vector value (e.g. velocity, which uses 2 columns) or a 3D mesh with
        # one scalar value. This is a design problem of the mesh format, which
        # renders it ambiguous, and the main reason why the use of fort.14/grd
        # formats is discouraged, in favor of UGRID.
        # Here, we assume the input mesh is strictly a 2D mesh, and the data
        # that follows is an array of values.
        if len(line[3:]) == 1:
            nodes[line[0]] = [(float(line[1]), float(line[2])), float(line[3])]
        else:
            nodes[line[0]] = [
                (float(line[1]), float(line[2])),
                [float(line[i]) for i in range(3, len(line[3:]))]
            ]
    elements = {}
    for _ in range(NE):
        line = buf.readline().split()
        elements[line[0]] = line[2:]
    # Assume EOF if NOPE is empty.
    try:
        NOPE = int(buf.readline().split()[0])
    except IndexError:
        return {
            'description': description,
            'nodes': nodes,
            'elements': elements
        }
    # let NOPE=-1 mean an ellipsoidal-mesh
    # reassigning NOPE to 0 until further implementation is applied.
    boundaries: Dict = defaultdict(dict)
    _bnd_id = 0
    buf.readline()
    while _bnd_id < NOPE:
        NETA = int(buf.readline().split()[0])
        _cnt = 0
        boundaries[None][_bnd_id] = dict()
        boundaries[None][_bnd_id]['indexes'] = list()
        while _cnt < NETA:
            boundaries[None][_bnd_id]['indexes'].append(
                buf.readline().split()[0].strip())
            _cnt += 1
        _bnd_id += 1
    NBOU = int(buf.readline().split()[0])
    _nbnd_cnt = 0
    buf.readline()
    while _nbnd_cnt < NBOU:
        npts, ibtype = map(int, buf.readline().split()[:2])
        _pnt_cnt = 0
        if ibtype not in boundaries:
            _bnd_id = 0
        else:
            _bnd_id = len(boundaries[ibtype])
        boundaries[ibtype][_bnd_id] = dict()
        boundaries[ibtype][_bnd_id]['indexes'] = list()
        while _pnt_cnt < npts:
            line = buf.readline().split()
            if len(line) == 1:
                boundaries[ibtype][_bnd_id]['indexes'].append(line[0])
            else:
                index_construct = []
                for val in line:
                    if '.' in val:
                        continue
                    index_construct.append(val)
                boundaries[ibtype][_bnd_id]['indexes'].append(index_construct)
            _pnt_cnt += 1
        _nbnd_cnt += 1
    return {
        'description': description,
        'nodes': nodes,
        'elements': elements,
        'boundaries': boundaries
    }
Пример #18
0
def parse(reader: TextIO) -> Tuple[Dict[int, TestCase], List[str]]:
    lineno = 0
    errs: List[str] = []

    def error(msg: str) -> None:
        errs.append("%s:%s: Invalid TAP13: %s" % (reader.name, lineno, msg))

    firstline = reader.readline().rstrip("\n")
    lineno += 1
    if firstline != "TAP version 13":
        error("First line must be %s" % repr("TAP version 13"))
        return ({}, errs)

    plan: Optional[int] = None
    tests: Dict[int, TestCase] = {}
    at_end = False
    prev_test = 0

    line = reader.readline().rstrip("\n")
    lineno += 1
    while line:
        if line.startswith("#"):
            pass
        elif at_end:
            error("Cannot have more output after trailing test plan")
            break
        elif line.startswith("1.."):
            if plan is not None:
                error("Test plan can only be given once")
                break
            strplan = trim_prefix(line, "1..")
            if not strplan.isdigit():
                error("Not an integer number of tests: %s" % repr(strplan))
                break
            if len(tests) > 0:
                at_end = True
            plan = int(strplan)
        elif re.match(r"^(not )?ok\b", line):
            m = cast(
                Dict[int, str],
                re.match(r"^(ok|not ok)\b\s*([0-9]+\b)?([^#]*)(#.*)?", line))
            #                                    1               2          3      4
            #
            # 1: status (required)
            # 2: test number (recommended)
            # 3: description (recommended)
            # 4: comment (when necessary)
            status = TestStatus.OK if m[1] == "ok" else TestStatus.NOT_OK
            test_number = int(m[2]) if m[2] is not None else (prev_test + 1)
            description = m[3]
            comment = m[4]

            # Parse directives
            if re.match(r"^# TODO( .*)?$", comment or "", flags=re.IGNORECASE):
                status = {
                    TestStatus.OK: TestStatus.TODO_OK,
                    TestStatus.NOT_OK: TestStatus.TODO_NOT_OK,
                }[status]
            if re.match(r"^# SKIP", comment or "", flags=re.IGNORECASE):
                status = TestStatus.SKIP

            yaml: Optional[Any] = None
            if re.match(r"^\s+---$", peek_line(reader).rstrip("\n")):
                yaml = ""
                for line in reader:
                    lineno += 1
                    line = line.rstrip("\n")
                    yaml += line + "\n"
                    if re.match(r"^\s+\.\.\.$", line):
                        break
                # Don't bother parsing the YAML; we'd have to pull in
                # something outside of the stdlib, and we don't intend
                # to do anytihng with it anyway.

            tests[test_number] = TestCase(status=status,
                                          n=test_number,
                                          description=description,
                                          comment=comment,
                                          yaml=yaml)
            prev_test = test_number
        elif line.startswith("Bail out!"):
            error(line)
            break
        else:
            error("Invalid line: %s" % repr(line))
            break
        line = reader.readline().rstrip("\n")
        lineno += 1

    if plan is not None:
        for i in range(1, plan + 1):
            if i not in tests:
                tests[i] = TestCase(status=TestStatus.MISSING, n=i)
        if len(tests) > plan:
            error(
                "More test results than test plan indicated, truncating: %d > %d"
                % (len(tests), plan))
            trunc: Dict[int, TestCase] = {}
            for i in range(1, plan + 1):
                trunc[i] = tests[i]
            tests = trunc
    return tests, errs
Пример #19
0
def advance_file(file: TextIO, conds: List[Pattern]) -> Union[int, None]:
    """Find the next sequence of lines that satisfy, one-to-one and in order,
    the list of condition regex. Then rewind the file to the start of
    the last of these matched lines. It returns the position of the
    start of next line (from which point we will presumably look for
    matches again).

    Given a file that contains:

    .. code-block: text

         data1
         data2
         sweet spot
         data3
         data4

    and conditions (as compiled re's) ``['^data\\d+', '^sweet']``

    This would advance the file to the beginning of the 'sweet spot' line,
    and return the pos for the start of the data3 line.
    If called again on the same file with the same conditions, this would
    return None, as not further matching positions exist.

    :param file: The file to search, presumably pointing to the start of
        a line. The file cursor will be advanced to the start of the last
        of a sequence of lines that satisfy the conditions (or the end of
        the file if no such position exists).
    :param conds:
    :return: The position of the start of the line after the one advanced
        to. If None, then no matched position was found.
    """

    # Tracks the file pos that would follow a set of matches.
    next_pos = file.tell()
    # Tracks the line after the first matched line in the sequence
    # of matches. If we match 3/5 conditions, we'll want to rewind
    # to the start of the second of those lines and start matching again.
    restart_pos = None
    # The current condition we're comparing against.
    cond_idx = 0
    # After matching against all conditions against lines, we
    # rewind the file to the start of the last matched line. (this pos)
    rewind_pos = None

    while cond_idx < len(conds):
        rewind_pos = next_pos

        line = file.readline()

        # We're out of file, but haven't matched all conditions.
        if line == '':
            return None

        next_pos = file.tell()
        # We'll restart at this line on if not all conditions match.
        if cond_idx == 0:
            restart_pos = next_pos

        # When we match a condition, advance to the next one, otherwise reset.
        if conds[cond_idx].search(line) is not None:
            cond_idx += 1
        else:
            cond_idx = 0
            file.seek(restart_pos)

    # Go back to the start of the last matched line.
    file.seek(rewind_pos)

    return next_pos
Пример #20
0
    def load_txt(self, f: TextIO):
        self.clear()

        curblock = self

        level = 0
        stack = list()

        line_no = 0

        while True:
            line = f.readline()
            line_no += 1
            if line == '':  # EOF
                break

            line = line.strip('\x09\x0a\x0d\x20')  # \t\n\r\s

            comment = ''
            if '//' in line:
                line, comment = line.split('//', 1)
                line = line.rstrip('\x09\x20')  # \t\s

            if '{' in line:
                stack.append(curblock)

                head = line.split('{', 1)[0]
                head = head.rstrip('\x09\x20')  # \t\s

                if head.endswith(('^', '~')):
                    curblock.sorted = head.endswith('^')
                    head = head[:-1]
                    head = head.rstrip('\x09\x20')  # \t\s
                else:
                    curblock.sorted = True

                path = ''
                if '=' in head:
                    name, path = line.split('=', 1)
                    name = name.rstrip('\x09\x20')  # \t\s
                    path = path.lstrip('\x09\x20')  # \t\s
                else:
                    name = head

                if path != '':
                    curblock[name] = BlockPar.from_txt(path)
                else:
                    prevblock = curblock
                    curblock = BlockPar()
                    prevblock.add(name, curblock)

                    level += 1

            elif '}' in line:
                if level > 0:
                    curblock = stack.pop()
                level -= 1

            elif '=' in line:
                name, value = line.split('=', 1)
                name = name.rstrip('\x09\x20')  # \t\s
                value = value.lstrip('\x09\x20')  # \t\s

                # multiline parameters - heredoc
                if value.startswith('<<<'):
                    value = ''
                    spacenum = 0
                    while True:
                        line = f.readline()
                        line_no += 1
                        if line == '':  # EOF
                            raise Exception("BlockPar.load_txt: "
                                            "heredoc end marker not found")

                        if line.strip('\x09\x0a\x0d\x20') == '':
                            continue

                        if value == '':
                            spacenum = len(line) - len(line.lstrip('\x20'))
                            if spacenum > (4 * level):
                                spacenum = 4 * level

                        if line.lstrip('\x09\x20').startswith('>>>'):
                            value = value.rstrip('\x0a\x0d')
                            break

                        value += line[spacenum:]

                curblock.add(name, value)

            else:
                continue
Пример #21
0
 def stuff(a: TextIO) -> str:
     return a.readline()
Пример #22
0
def loop_read_interval_from_stdin(stream: TextIO):  
    word = stream.readline().strip()
    if word == STOP_WORD:
        exit(0)
    left, right = map(float, word.split())
    return Interval(left, right)    
Пример #23
0
def parse_char(bdfstream: TextIO) -> Tuple[Dict[str, str], List[int]]:
    specs = {}
    while not (line := bdfstream.readline()).startswith("BITMAP"):
        parts = line.split(maxsplit=1)
        specs[parts[0]] = parts[1].strip()
Пример #24
0
def read_puzzle(lines: TextIO) -> Tuple[List[str], Grid]:
    """Parses a puzzle input file into the list of words and the character grid"""
    words = parse_line(lines.readline())
    characters = map(ord, chain(*map(parse_line, lines)))

    return words, Grid(array('I', characters))
Пример #25
0
def peek(file: TextIO) -> str:
    pos = file.tell()
    line = file.readline()
    file.seek(pos)
    return line
Пример #26
0
def get_line(f: TextIO) -> str:
    return f.readline().rstrip('\n')
Пример #27
0
def _consume_comments(f: TextIO, initial_line=None) -> str:
    line = initial_line or f.readline()
    while line and line.startswith('#'):
        line = f.readline()
    return line
Пример #28
0
    def load_file(self, fid: typing.TextIO):
        """
        Load the touchstone file into the internal data structures.

        Parameters
        ----------
        fid : file object

        """

        filename = self.filename

        # Check the filename extension. 
        # Should be .sNp for Touchstone format V1.0, and .ts for V2
        extension = filename.split('.')[-1].lower()
        
        if (extension[0] == 's') and (extension[-1] == 'p'): # sNp
            # check if N is a correct number
            try:
                self.rank = int(extension[1:-1])
            except (ValueError):
                raise (ValueError("filename does not have a s-parameter extension. It has  [%s] instead. please, correct the extension to of form: 'sNp', where N is any integer." %(extension)))
        elif extension == 'ts':
            pass
        else:
            raise Exception('Filename does not have the expected Touchstone extension (.sNp or .ts)')

        values = []
        while True:
            line = fid.readline()
            if not line:
                break
            # store comments if they precede the option line
            line = line.split('!', 1)
            if len(line) == 2:
                if not self.parameter:
                    if self.comments is None:
                        self.comments = ''
                    self.comments = self.comments + line[1]
                elif line[1].startswith(' Port['):
                    try:
                        port_string, name = line[1].split('=', 1) #throws ValueError on unpack
                        name = name.strip()
                        garbage, index = port_string.strip().split('[', 1) #throws ValueError on unpack
                        index = int(index.rstrip(']')) #throws ValueError on not int-able
                        if index > self.rank or index <= 0:
                            print("Port name {0} provided for port number {1} but that's out of range for a file with extension s{2}p".format(name, index, self.rank))
                        else:
                            if self.port_names is None: #Initialize the array at the last minute
                                self.port_names = [''] * self.rank
                            self.port_names[index - 1] = name
                    except ValueError as e:
                        print("Error extracting port names from line: {0}".format(line))

            # remove the comment (if any) so rest of line can be processed.
            # touchstone files are case-insensitive
            line = line[0].strip().lower()

            # skip the line if there was nothing except comments
            if len(line) == 0:
                continue

            # grab the [version] string
            if line[:9] == '[version]':
                self.version = line.split()[1]
                continue

            # grab the [reference] string
            if line[:11] == '[reference]':
                # The reference impedances can be span after the keyword  
                # or on the following line
                self.reference = [ float(r) for r in line.split()[2:] ]
                if not self.reference:
                    line = fid.readline()
                    self.reference = [ float(r) for r in line.split()]
                continue
            
            # grab the [Number of Ports] string
            if line[:17] == '[number of ports]':
                self.rank = int(line.split()[-1])
                continue
            
            # grab the [Number of Frequencies] string
            if line[:23] == '[number of frequencies]':
                self.frequency_nb = line.split()[-1]
                continue

            # skip the [Network Data] keyword
            if line[:14] == '[network data]':
                continue
            
            # skip the [End] keyword
            if line[:5] == '[end]':
                continue
            
            # the option line
            if line[0] == '#':
                toks = line[1:].strip().split()
                # fill the option line with the missing defaults
                toks.extend(['ghz', 's', 'ma', 'r', '50'][len(toks):])
                self.frequency_unit = toks[0]
                self.parameter = toks[1]
                self.format = toks[2]
                self.resistance = toks[4]
                if self.frequency_unit not in ['hz', 'khz', 'mhz', 'ghz']:
                    print('ERROR: illegal frequency_unit [%s]',  self.frequency_unit)
                    # TODO: Raise
                if self.parameter not in 'syzgh':
                    print('ERROR: illegal parameter value [%s]', self.parameter)
                    # TODO: Raise
                if self.format not in ['ma', 'db', 'ri']:
                    print('ERROR: illegal format value [%s]', self.format)
                    # TODO: Raise

                continue

            # collect all values without taking care of there meaning
            # we're separating them later
            values.extend([ float(v) for v in line.split() ])

        # let's do some post-processing to the read values
        # for s2p parameters there may be noise parameters in the value list
        values = numpy.asarray(values)
        if self.rank == 2:
            # the first frequency value that is smaller than the last one is the
            # indicator for the start of the noise section
            # each set of the s-parameter section is 9 values long
            pos = numpy.where(numpy.sign(numpy.diff(values[::9])) == -1)
            if len(pos[0]) != 0:
                # we have noise data in the values
                pos = pos[0][0] + 1   # add 1 because diff reduced it by 1
                noise_values = values[pos*9:]
                values = values[:pos*9]
                self.noise = noise_values.reshape((-1,5))

        if len(values)%(1+2*(self.rank)**2) != 0 :
            # incomplete data line / matrix found
            raise AssertionError

        # reshape the values to match the rank
        self.sparameters = values.reshape((-1, 1 + 2*self.rank**2))
        # multiplier from the frequency unit
        self.frequency_mult = {'hz':1.0, 'khz':1e3,
                               'mhz':1e6, 'ghz':1e9}.get(self.frequency_unit)
        # set the reference to the resistance value if no [reference] is provided
        if not self.reference:
            self.reference = [self.resistance] * self.rank
Пример #29
0
def _read_file(path: Path, monitor: TextIO) -> Tuple[str, int]:
    monitor.seek(0)
    return path.name, int(monitor.readline())
Пример #30
0
def _read_info(f: TextIO) -> Tuple[bool, Sequence[int]]:
    _forward(f, 1)
    line = f.readline()
    spin_orbs, n_sym, _ = (int(x) for x in line.split())
    line = f.readline()
    return bool(spin_orbs), [int(irrep) for irrep in line.split()[:n_sym]]
Пример #31
0
def read_game(handle: TextIO, *, Visitor = GameBuilder):
    """
    Reads a game from a file opened in text mode.

    >>> import chess.pgn
    >>>
    >>> pgn = open("data/pgn/kasparov-deep-blue-1997.pgn")
    >>>
    >>> first_game = chess.pgn.read_game(pgn)
    >>> second_game = chess.pgn.read_game(pgn)
    >>>
    >>> first_game.headers["Event"]
    'IBM Man-Machine, New York USA'
    >>>
    >>> # Iterate through all moves and play them on a board.
    >>> board = first_game.board()
    >>> for move in first_game.mainline_moves():
    ...     board.push(move)
    ...
    >>> board
    Board('4r3/6P1/2p2P1k/1p6/pP2p1R1/P1B5/2P2K2/3r4 b - - 0 45')

    By using text mode, the parser does not need to handle encodings. It is the
    caller's responsibility to open the file with the correct encoding.
    PGN files are usually ASCII or UTF-8 encoded. So, the following should
    cover most relevant cases (ASCII, UTF-8, UTF-8 with BOM).

    >>> pgn = open("data/pgn/kasparov-deep-blue-1997.pgn", encoding="utf-8-sig")

    Use :class:`~io.StringIO` to parse games from a string.

    >>> import io
    >>>
    >>> pgn = io.StringIO("1. e4 e5 2. Nf3 *")
    >>> game = chess.pgn.read_game(pgn)

    The end of a game is determined by a completely blank line or the end of
    the file. (Of course, blank lines in comments are possible).

    According to the PGN standard, at least the usual 7 header tags are
    required for a valid game. This parser also handles games without any
    headers just fine.

    The parser is relatively forgiving when it comes to errors. It skips over
    tokens it can not parse. By default, any exceptions are logged and
    collected in :data:`Game.errors <chess.pgn.Game.errors>`. This behavior can
    be :func:`overriden <chess.pgn.GameBuilder.handle_error>`.

    Returns the parsed game or ``None`` if the end of file is reached.
    """
    visitor = Visitor()

    found_game = False
    skipping_game = False
    managed_headers: Optional[Headers] = None
    unmanaged_headers: Optional[Headers] = None

    # Ignore leading empty lines and comments.
    line = handle.readline().lstrip("\ufeff")
    while line.isspace() or line.startswith("%") or line.startswith(";"):
        line = handle.readline()

    # Parse game headers.
    while line:
        # Ignore comments.
        if line.startswith("%") or line.startswith(";"):
            line = handle.readline()
            continue

        # First token of the game.
        if not found_game:
            found_game = True
            skipping_game = visitor.begin_game() is SKIP
            if not skipping_game:
                managed_headers = visitor.begin_headers()
                if not isinstance(managed_headers, Headers):
                    unmanaged_headers = Headers({})

        if not line.startswith("["):
            break

        if not skipping_game:
            tag_match = TAG_REGEX.match(line)
            if tag_match:
                visitor.visit_header(tag_match.group(1), tag_match.group(2))
                if unmanaged_headers is not None:
                    unmanaged_headers[tag_match.group(1)] = tag_match.group(2)
            else:
                break

        line = handle.readline()

    if not found_game:
        return None

    if not skipping_game:
        skipping_game = visitor.end_headers() is SKIP

    # Ignore single empty line after headers.
    if line.isspace():
        line = handle.readline()

    if not skipping_game:
        # Chess variant.
        headers = managed_headers if unmanaged_headers is None else unmanaged_headers
        assert headers is not None, "got neither managed nor unmanaged headers"
        try:
            VariantBoard = headers.variant()
        except ValueError as error:
            visitor.handle_error(error)
            VariantBoard = chess.Board

        # Initial position.
        fen = headers.get("FEN", VariantBoard.starting_fen)
        try:
            board_stack = [VariantBoard(fen, chess960=headers.is_chess960())]
        except ValueError as error:
            visitor.handle_error(error)
            skipping_game = True
        else:
            visitor.visit_board(board_stack[0])

    # Fast path: Skip entire game.
    if skipping_game:
        in_comment = False

        while line:
            if not in_comment:
                if line.isspace():
                    break
                elif line.startswith("%"):
                    line = handle.readline()
                    continue

            for match in SKIP_MOVETEXT_REGEX.finditer(line):
                token = match.group(0)
                if token == "{":
                    in_comment = True
                elif not in_comment and token == ";":
                    break
                elif token == "}":
                    in_comment = False

            line = handle.readline()

        visitor.end_game()
        return visitor.result()

    # Parse movetext.
    skip_variation_depth = 0
    while line:
        read_next_line = True

        # Ignore comments.
        if line.startswith("%") or line.startswith(";"):
            line = handle.readline()
            continue

        # An empty line means the end of a game.
        if line.isspace():
            visitor.end_game()
            return visitor.result()

        for match in MOVETEXT_REGEX.finditer(line):
            token = match.group(0)

            if token.startswith("{"):
                # Consume until the end of the comment.
                line = token[1:]
                comment_lines = []
                while line and "}" not in line:
                    comment_lines.append(line.rstrip())
                    line = handle.readline()
                end_index = line.find("}")
                comment_lines.append(line[:end_index])
                if "}" in line:
                    line = line[end_index:]
                else:
                    line = ""

                if not skip_variation_depth:
                    visitor.visit_comment("\n".join(comment_lines).strip())

                # Continue with the current or the next line.
                if line:
                    read_next_line = False
                break
            elif token == "(":
                if skip_variation_depth:
                    skip_variation_depth += 1
                elif board_stack[-1].move_stack:
                    if visitor.begin_variation() is SKIP:
                        skip_variation_depth = 1
                    else:
                        board = board_stack[-1].copy()
                        board.pop()
                        board_stack.append(board)
            elif token == ")":
                if skip_variation_depth:
                    skip_variation_depth -= 1
                if len(board_stack) > 1:
                    visitor.end_variation()
                    board_stack.pop()
            elif skip_variation_depth:
                continue
            elif token.startswith(";"):
                break
            elif token.startswith("$"):
                # Found a NAG.
                visitor.visit_nag(int(token[1:]))
            elif token == "?":
                visitor.visit_nag(NAG_MISTAKE)
            elif token == "??":
                visitor.visit_nag(NAG_BLUNDER)
            elif token == "!":
                visitor.visit_nag(NAG_GOOD_MOVE)
            elif token == "!!":
                visitor.visit_nag(NAG_BRILLIANT_MOVE)
            elif token == "!?":
                visitor.visit_nag(NAG_SPECULATIVE_MOVE)
            elif token == "?!":
                visitor.visit_nag(NAG_DUBIOUS_MOVE)
            elif token in ["1-0", "0-1", "1/2-1/2", "*"] and len(board_stack) == 1:
                visitor.visit_result(token)
            else:
                # Parse SAN tokens.
                try:
                    move = visitor.parse_san(board_stack[-1], token)
                except ValueError as error:
                    visitor.handle_error(error)
                    skip_variation_depth = 1
                else:
                    visitor.visit_move(board_stack[-1], move)
                    board_stack[-1].push(move)
                visitor.visit_board(board_stack[-1])

        if read_next_line:
            line = handle.readline()

    visitor.end_game()
    return visitor.result()
Пример #32
0
def _forward(f: TextIO, n: int) -> str:
    for _ in range(n):
        line = f.readline()
    return line
Пример #33
0
def read_nearby(file_handle: TextIO) -> List[Ticket]:
    _ = file_handle.readline()
    _ = file_handle.readline()
    return [parse_ticket(line) for line in file_handle]
Пример #34
-1
def output_file(out: TextIO, fin: TextIO, keep_license: bool) -> None:
	skip = LICENSE_LINES
	if keep_license: skip = 0
	while True:
		line = fin.readline()
		if not line:
			break
		if skip:
			skip -= 1
			continue
		out.write(line)