Exemplo n.º 1
0
def transitions_filter(file: TextIO):
    file.seek(0)
    started = False
    line_reader = SimpleLineGenerator(file)
    try:
        for line in line_reader():
            line = line.strip()
            if line == '' or line[0] == "#":
                continue
            if line.lower() == "transitions":
                if started:
                    raise RuntimeError(
                        "Detected more than one 'TRANSITIONS' section. Aborting."
                    )
                else:
                    started = True
                continue
            if line.lower() == "lambdas":
                for skipped_line in line_reader():
                    if skipped_line.strip().lower() == "transitions":
                        if started:
                            raise RuntimeError(
                                "Detected more than one 'TRANSITIONS' section. Aborting."
                            )
                        else:
                            line_reader.hold_on()
                            break
            if started and line.lower() != "lambdas":
                yield line.replace(",", ' ').split()
    finally:
        if not started:
            raise RuntimeError("Can not find 'TRANSITIONS' section. Aborting.")
Exemplo n.º 2
0
    def _new_game(self, game_id: str, infile: typing.TextIO):
        """Consumes lines describing game metadata from the event file.

        Args:
            game_id: Identifier for the game.
            infile: Open buffer reading the event file.

        Returns:
            None.
        """
        self.current_game = {"id": game_id}
        while True:
            prev_loc = infile.tell()
            line = infile.readline()
            if not line:
                raise Exception("Encountered EOF while parsing new game info")
            fields = line.strip().split(",")
            if fields[0] not in ("version", "info"):
                infile.seek(prev_loc)
                return
            if fields[0] == "info":
                field, value = fields[1:]
                self.current_game[field] = value
                if field == "visteam":
                    self.v_roster = data.get_roster(self.year, value)
                elif field == "hometeam":
                    self.h_roster = data.get_roster(self.year, value)
Exemplo n.º 3
0
def load(filestream: TextIO) -> Dict[str, Any]:
    """Load and parse a YAML-formatted file.

    :param filename: The YAML file to load.

    :raises SnapcraftError: if loading didn't succeed.
    :raises LegacyFallback: if the project's base is not core22.
    """
    try:
        data = yaml.safe_load(filestream)
        build_base = utils.get_effective_base(
            base=data.get("base"),
            build_base=data.get("build-base"),
            project_type=data.get("type"),
            name=data.get("name"),
        )

        if build_base is None:
            raise errors.LegacyFallback("no base defined")
        if build_base != "core22":
            raise errors.LegacyFallback("base is not core22")
    except yaml.error.YAMLError as err:
        raise errors.SnapcraftError(
            f"snapcraft.yaml parsing error: {err!s}") from err

    filestream.seek(0)

    try:
        return yaml.load(filestream, Loader=_SafeLoader)
    except yaml.error.YAMLError as err:
        raise errors.SnapcraftError(
            f"snapcraft.yaml parsing error: {err!s}") from err
Exemplo n.º 4
0
    def _read_game(self, infile: typing.TextIO):
        """Read events and generate data until a new game is declared in the file.

        Args:
            infile: An open buffer reading the event file.

        Yields:
            A row of tabular data for every play in the game.
        """
        while True:
            prev_loc = infile.tell()
            line = infile.readline()
            if not line:
                return
            fields = line.strip().split(",")
            if fields[0] == "id":
                infile.seek(prev_loc)
                return
            if fields[0] in ["start", "sub"]:
                pid: str = fields[1]
                pos: int = int(fields[5])
                lineup = self.h_lineup if int(fields[3]) else self.v_lineup
                lineup[pos - 1] = pid
                continue
            if fields[0] == "play":
                self._process_play(*fields[1:])
                yield self._current_event
                self._current_event = None
Exemplo n.º 5
0
def get_file_size(file: TextIO) -> int:
    """Get size of file in bytes without altering file"""
    starting_pos = file.tell()
    file.seek(0, os.SEEK_END)
    file_size = file.tell()
    file.seek(0, starting_pos)
    return file_size
Exemplo n.º 6
0
def get_first_chrom_name(fp: TextIO) -> str:
    # make sure file pointer is at beginning of file
    fp.seek(0)
    header = fp.readline().rstrip()
    if not header.startswith(">"):
        raise ValueError(f"Expected fasta file, but it did not start with '>'")
    return header[1:].split()[0]
Exemplo n.º 7
0
def scan_draws(fp: TextIO, config_dict: Dict, lineno: int) -> int:
    """
    Parse draws, check elements per draw, save num draws to config_dict.
    """
    draws_found = 0
    num_cols = len(config_dict['column_names'])
    cur_pos = fp.tell()
    line = fp.readline().strip()
    first_draw = None
    while len(line) > 0 and not line.startswith('#'):
        lineno += 1
        draws_found += 1
        data = line.split(',')
        if len(data) != num_cols:
            raise ValueError(
                'line {}: bad draw, expecting {} items, found {}'.format(
                    lineno, num_cols, len(line.split(','))))
        if first_draw is None:
            first_draw = np.array(data, dtype=np.float64)
        cur_pos = fp.tell()
        line = fp.readline().strip()
    config_dict['draws'] = draws_found
    config_dict['first_draw'] = first_draw
    fp.seek(cur_pos)
    return lineno
    def _read_energy(f_object: TextIO):
        """
        Read the energy value from a force file.

        Parameters
        ----------
        f_object : TextIO
                File object from which to read the values
        Returns
        -------
        Energy value for that configuration : float
        """
        energy = None
        pattern = "Total FORCE_EVAL"
        for line in f_object:
            if re.search(pattern, line):
                energy = float(line.split()[-1])

        if energy is None:
            print("No energy value found - Are you sure the input file was correct? Check the optimization.out file "
                  "for more details.")
            sys.exit(1)
        else:
            f_object.seek(0)  # return to the start of the file.
            return energy
    def _read_forces(f_object: TextIO, n_atoms: int):
        """
        Read in the forces from the text file.

        Parameters
        ----------
        f_object : TextIO
                file object from which to read data
        n_atoms : int
                Number of atoms in the file.

        Returns
        -------
        mean force value as a float : float
        """
        force = 0.0
        pattern_1 = "# Atom   Kind   Element          X              Y              Z"
        pattern_2 = "SUM OF ATOMIC FORCES"

        for i, line, in enumerate(f_object):
            if re.search(pattern_1, line):
                start = i + 1
            elif re.search(pattern_2, line):
                stop = i - 1
        f_object.seek(0)
        for i, line in enumerate(f_object):
            if i < start:
                continue
            elif start <= i <= stop:
                force += np.linalg.norm(np.array(line.split())[3:].astype(float))
            else:
                break
        f_object.seek(0)
        return force/n_atoms
def strip_vars(f: typing.TextIO) -> list:
    tmp_lst = []
    i = 0
    count_var_regex = r"\bcount\b"
    count_var_regex_sub = COUNT_REPLACEMENT_WORD
    f.seek(0)
    try:
        for line in f.readlines():
            # Changes var.count, but not var.count_of_consul, for example
            repl_count_var = re.sub(count_var_regex, count_var_regex_sub, line,
                                    0)
            tmp_lst.append(repl_count_var)
            if line.startswith(tuple(ignore)):
                # This assumes your .tf files have a contiguous block of variables
                tmp_lst.pop()
                break
        for line in "".join(tmp_lst).splitlines():
            if line.startswith("variable"):
                break
            while not line.startswith("variable"):
                i += 1
                break
        del tmp_lst[:i]
        # Remove trailing newline if it got pulled in
        if "".join(tmp_lst[-1:]).isspace():
            del tmp_lst[-1:]
        # And add a newline to the head for any existing entries
        if not "".join(tmp_lst[0]).isspace():
            tmp_lst.insert(0, "\n")
    except IndexError:
        if len(sys.argv) > 1:
            print("INFO: No variables found in " + f.name)
    return tmp_lst
Exemplo n.º 11
0
 def _detect_dialect(cls, csv_file: TextIO, dialect: Dialect) -> Dialect:
     number_of_rows = 1024
     start_of_file = 0
     if not dialect:
         dialect = Sniffer().sniff(csv_file.read(number_of_rows))
         csv_file.seek(start_of_file)
     return dialect
Exemplo n.º 12
0
def get_file_size(f: typing.TextIO) -> int:
	""" Gets file size. This function restores the file position. """
	restore = f.tell()
	f.seek(0, 2)
	ret = f.tell()
	f.seek(restore)
	return ret
Exemplo n.º 13
0
def get_imports(blueprint_file: typing.TextIO) -> dict:
    level = 0
    imports_token = None
    import_lines = {}
    blueprint_file.seek(0, 0)
    for t in yaml.scan(blueprint_file):
        if isinstance(t, (yaml.tokens.BlockMappingStartToken,
                          yaml.tokens.BlockSequenceStartToken,
                          yaml.tokens.FlowMappingStartToken,
                          yaml.tokens.FlowSequenceStartToken)):
            level += 1
        if isinstance(
                t, (yaml.tokens.BlockEndToken, yaml.tokens.FlowMappingEndToken,
                    yaml.tokens.FlowSequenceEndToken)):
            level -= 1

        if isinstance(t, yaml.tokens.ScalarToken):
            if level == 1 and t.value == 'imports':
                imports_token = t
                continue

            token_length = t.end_mark.index - t.start_mark.index

            if level >= 1 and imports_token and \
                    token_length < MAX_IMPORT_TOKEN_LENGTH:
                import_lines[t.value] = {
                    START_POS: t.start_mark.index,
                    END_POS: t.end_mark.index,
                }

        if isinstance(t, yaml.tokens.KeyToken) and imports_token:
            break

    return import_lines
Exemplo n.º 14
0
def main(csv_file: TextIO, output_dir: Path) -> int:
    output_dir.mkdir(exist_ok=True)

    dialect = csv.Sniffer().sniff(csv_file.read(1024 * 4))
    csv_file.seek(0)

    reader = csv.DictReader(csv_file, dialect=dialect)
    rows = list(reader)
    csv_file.close()

    def is_question(q: str) -> bool:
        return q not in ["Timestamp", "Email address"]

    questions = filter(is_question, rows[0].keys())

    data = {q: get_data(rows, q) for q in questions}
    max_n = max(max(v[1]) for v in data.values())
    print(max_n)
    y_ticks = np.arange(0, max_n + 2, step=2)

    for question, counter in data.items():
        filename = safe_filename(question) + ".png"
        filename = output_dir / filename
        create_graph(counter, question, filename, y_ticks=y_ticks)

    return 0
Exemplo n.º 15
0
def get_mzXMLs_from_pep_xml(pepxml_file: typing.TextIO):
    REC = re.compile(' base_name="(.+?)"')
    currpos = pepxml_file.tell()
    t = pepxml_file.read()
    pepxml_file.seek(currpos)
    paths = map(pathlib.Path, REC.findall(t))
    return [path.with_suffix(".mzXML") for path in paths if path.is_absolute()]
Exemplo n.º 16
0
def one_file(fp: TextIO) -> None:
    lines = list(fp)
    new = lines[:]
    shebang_header = False

    if new[0].startswith("#!"):
        shebang_header = True
        new.pop(0)
        if not new[0].strip():
            new.pop(0)

    while new and new[0][0] == "#":
        new.pop(0)

    while new and new[0].strip() == "":
        new.pop(0)

    new.insert(0, LICENSE_NOTICE)
    if shebang_header:
        new.insert(0, SHEBANG_HEADER)
    data = "".join(new)

    fp.seek(0)
    fp.write(data)
    fp.truncate()
Exemplo n.º 17
0
 def __init__(self, file: TextIO):
     self.text: str = file.read()
     self.current_index: int = 0
     self.current_token: Optional[str] = None
     self.next_index: Optional[int] = None
     self.next_token: Optional[str] = None
     file.seek(0)
Exemplo n.º 18
0
    def from_file(cls, fp: TextIO, skip_line=2):
        id = fp.readline()
        # print(id)
        if id is None:
            raise RuntimeError('No more line exist in {}'.format(fp))
        for i in range(skip_line):
            fp.readline()

        size = eval(fp.readline())
        data = []

        for i in range(size):
            now_position = fp.tell()
            line = fp.readline()
            try:
                x, y = map(float, line.split())
                data.append([x, y])
            except Exception as e:
                logger.warning(
                    f"Except point number in {fp} is {size} but only get {i} points"
                )
                # 拟合数据中的数据长度不等于self,_size
                fp.seek(now_position)  # 回到上一行
                size = i  # 更新点的数量
                break

        return SinglePoint(size, data)
Exemplo n.º 19
0
def repair_depfile(depfile: TextIO, include_dirs: List[Path]) -> None:
    changes_made = False
    out = ""
    for line in depfile.readlines():
        if ":" in line:
            colon_pos = line.rfind(":")
            out += line[: colon_pos + 1]
            line = line[colon_pos + 1 :]

        line = line.strip()

        if line.endswith("\\"):
            end = " \\"
            line = line[:-1].strip()
        else:
            end = ""

        path = Path(line)
        if not path.is_absolute():
            changes_made = True
            path = resolve_include(path, include_dirs)
        out += f"    {path}{end}\n"

    # If any paths were changed, rewrite the entire file
    if changes_made:
        depfile.seek(0)
        depfile.write(out)
        depfile.truncate()
Exemplo n.º 20
0
    def from_mypy(self, file: TextIO) -> List[Issue]:
        buff = None
        items = []

        for line in file:
            line = line.strip()
            if RE_MYPY_LINE.match(line):
                if buff:
                    items.append(self.__parse_mypy_issue(buff))
                buff = []
            if buff is not None:
                buff.append(line)
        if buff is None:
            # mypy can return an error without line/column values
            file.seek(0)
            for line in file:
                m = RE_MYPY_LINE_WO_COORDINATES.match(line.strip())
                if not m:
                    continue
                items.append(
                    self.Issue(
                        path=m.group(1).strip(),
                        description=m.group(2).strip(),
                        rule=self._mypy_rules["[unknown]"],
                    ))
                self.logger.info(
                    f"detected an error without coordinates: {line}")

        items.append(self.__parse_mypy_issue(buff))
        return [i for i in items if i]
Exemplo n.º 21
0
def check_stream(
    input_stream: TextIO,
    show_diff: Union[bool, TextIO] = False,
    extension: Optional[str] = None,
    config: Config = DEFAULT_CONFIG,
    file_path: Optional[Path] = None,
    disregard_skip: bool = False,
    **config_kwargs,
) -> bool:
    """Checks any imports within the provided code stream, returning `False` if any unsorted or
    incorrectly imports are found or `True` if no problems are identified.

    - **input_stream**: The stream of code with imports that need to be sorted.
    - **show_diff**: If `True` the changes that need to be done will be printed to stdout, if a
    TextIO stream is provided results will be written to it, otherwise no diff will be computed.
    - **extension**: The file extension that contains imports. Defaults to filename extension or py.
    - **config**: The config object to use when sorting imports.
    - **file_path**: The disk location where the code string was pulled from.
    - **disregard_skip**: set to `True` if you want to ignore a skip set in config for this file.
    - ****config_kwargs**: Any config modifications.
    """
    config = _config(path=file_path, config=config, **config_kwargs)

    changed: bool = sort_stream(
        input_stream=input_stream,
        output_stream=Empty,
        extension=extension,
        config=config,
        file_path=file_path,
        disregard_skip=disregard_skip,
    )
    printer = create_terminal_printer(color=config.color_output)
    if not changed:
        if config.verbose and not config.only_modified:
            printer.success(f"{file_path or ''} Everything Looks Good!")
        return True
    else:
        printer.error(f"{file_path or ''} Imports are incorrectly sorted and/or formatted.")
        if show_diff:
            output_stream = StringIO()
            input_stream.seek(0)
            file_contents = input_stream.read()
            sort_stream(
                input_stream=StringIO(file_contents),
                output_stream=output_stream,
                extension=extension,
                config=config,
                file_path=file_path,
                disregard_skip=disregard_skip,
            )
            output_stream.seek(0)

            show_unified_diff(
                file_input=file_contents,
                file_output=output_stream.read(),
                file_path=file_path,
                output=None if show_diff is True else cast(TextIO, show_diff),
                color_output=config.color_output,
            )
        return False
Exemplo n.º 22
0
def parse_conllu_plus_fields(
    in_file: T.TextIO,
    metadata_parsers: T.Optional[T.Dict[str, _MetadataParserType]] = None
) -> T.Optional[T.Sequence[str]]:
    pos = in_file.tell()

    # Get first line
    try:
        first_sentence = next(parse_sentences(in_file))
        first_line = first_sentence.split("\n")[0]
    except StopIteration:
        first_line = ""

    # parse_sentences moves to file cursor, so reset it here
    in_file.seek(pos)

    if not first_line.startswith("#"):
        return None

    tokenlist = parse_token_and_metadata(first_line,
                                         metadata_parsers=metadata_parsers)
    metadata = tokenlist.metadata

    fields = None
    if "global.columns" in metadata and metadata["global.columns"]:
        fields = [
            value.lower() for value in metadata["global.columns"].split(" ")
        ]

    return fields
Exemplo n.º 23
0
def _get_metadata(fp: TextIO) -> Tuple[int, Dict[str, str]]:
    """
    Load metadata from a fluke 985 tab-delimited data file.

    Parameters
    ----------
    fp : file-like object
        The data file.

    Returns
    -------
    last_md_line : int
        The index of the blank line between metadata and the table.

    metadata : dict
        A dictionary of metadata, including "Model Number" and others specified
        in the header.
    """
    metadata = {}
    last_md_line = 0
    fp.seek(0)
    for line_number in itertools.count(start=1):
        line = fp.readline().strip()
        if ':' not in line:
            last_md_line = line_number
            break

        key, value = line.split(':', 1)
        metadata[key.strip()] = value.strip()

    return last_md_line, metadata
Exemplo n.º 24
0
def scan_config(fd: TextIO, config_dict: Dict[str, Any], lineno: int) -> int:
    """
    Scan initial stan_csv file comments lines and
    save non-default configuration information to config_dict.
    """
    cur_pos = fd.tell()
    line = fd.readline().strip()
    while len(line) > 0 and line.startswith('#'):
        lineno += 1
        if line.endswith('(Default)'):
            line = line.replace('(Default)', '')
        line = line.lstrip(' #\t')
        key_val = line.split('=')
        if len(key_val) == 2:
            if key_val[0].strip() == 'file' and not key_val[1].endswith('csv'):
                config_dict['data_file'] = key_val[1].strip()
            elif key_val[0].strip() != 'file':
                raw_val = key_val[1].strip()
                val: Union[int, float, str]
                try:
                    val = int(raw_val)
                except ValueError:
                    try:
                        val = float(raw_val)
                    except ValueError:
                        val = raw_val
                config_dict[key_val[0].strip()] = val
        cur_pos = fd.tell()
        line = fd.readline().strip()
    fd.seek(cur_pos)
    return lineno
Exemplo n.º 25
0
def scan_sampling_iters(fd: TextIO, config_dict: Dict[str, Any],
                        lineno: int) -> int:
    """
    Parse sampling iteration, save number of iterations to config_dict.
    """
    draws_found = 0
    num_cols = len(config_dict['column_names'])
    cur_pos = fd.tell()
    line = fd.readline().strip()
    while len(line) > 0 and not line.startswith('#'):
        lineno += 1
        draws_found += 1
        data = line.split(',')
        if len(data) != num_cols:
            raise ValueError(
                'line {}: bad draw, expecting {} items, found {}\n'.format(
                    lineno, num_cols, len(line.split(','))) +
                'This error could be caused by running out of disk space.\n'
                'Try clearing up TEMP or setting output_dir to a path'
                ' on another drive.', )
        cur_pos = fd.tell()
        line = fd.readline().strip()
    config_dict['draws_sampling'] = draws_found
    fd.seek(cur_pos)
    return lineno
Exemplo n.º 26
0
def load(fp: t.TextIO) -> t.Union[NNF, And[Or[Var]]]:
    """Load a sentence from an open file.

    The format is automatically detected.
    """
    for line in fp:
        if line.startswith('c'):
            continue
        if line.startswith('p '):
            problem = line.split()
            if len(line) < 2:
                raise DecodeError("Malformed problem line")
            fmt = problem[1]
            if 'sat' in fmt or 'SAT' in fmt:
                # problem[2] contains the number of variables
                # but that's currently not explicitly represented
                return _load_sat(fp)
            elif 'cnf' in fmt or 'CNF' in fmt:
                # problem[2] has the number of variables
                # problem[3] has the number of clauses
                return _load_cnf(fp)
            else:
                raise DecodeError("Unknown format '{}'".format(fmt))
        elif line.startswith('nnf '):
            # Might be a DSHARP output file
            from nnf import dsharp
            fp.seek(0)
            return dsharp.load(fp)
        else:
            raise DecodeError(
                "Couldn't find a problem line before an unknown kind of line")
    else:
        raise DecodeError(
            "Couldn't find a problem line before the end of the file")
Exemplo n.º 27
0
def countCharacters(file: typing.TextIO, background: str, delta_x: int,
                    delta_y: int, wrap: DIRECTION) -> int:
    count = 0
    x_position = 0
    lines = iter(file)
    line = next(lines).rstrip()
    while True:
        delta_lines = delta_y
        while delta_lines > 0:
            try:
                line = next(lines).rstrip()
                delta_lines -= 1
            except StopIteration:
                if wrap != 'y':
                    break
                file.seek(0)
        if delta_lines > 0:
            break
        x_position += delta_x
        if x_position >= len(line):
            if wrap != 'x':
                break
            x_position %= len(line)
        if line[x_position] != background:
            count += 1
    return count
Exemplo n.º 28
0
 def __init__(self,
              json_file: TextIO,
              close_fd: bool,
              use_rate: bool = True):
     data = json.load(json_file)
     y_list = data["y_list"]
     total = data["total"]
     if use_rate:
         y_list = [cracked / total * 100 for cracked in y_list]
     if close_fd:
         json_file.close()
     elif json_file.seekable():
         json_file.seek(0)
     self.x_list = data["x_list"]
     self.y_list = y_list
     self.color = data['color']
     self.marker = data['marker']
     self.marker_size = data['marker_size']
     self.mark_every = data["mark_every"]
     self.line_width = data['line_width']
     if type(data['line_style']) is str:
         self.line_style = data['line_style']
     else:
         self.line_style = (data['line_style'][0],
                            tuple(list(data['line_style'][1])))
     self.label = data['label']
     self.text = data['label']
     self.show_text = data['show_text']
     self.text_x = data['text_x']
     self.text_y = data['text_y']
     self.text_fontsize = data['text_fontsize']
     self.text_color = data['text_color']
Exemplo n.º 29
0
def count_lines(f_: TextIO, chunksize=2 ** 12) -> int:
    count = 0
    f_.seek(0)
    chunk = f_.read(chunksize)
    while chunk:
        count += chunk.count(NEWLINE)
        chunk = f_.read(chunksize)
    return count
Exemplo n.º 30
0
def is_black_alpha_only(data: TextIO) -> bool:
    """Check if an image has only black pixels (with alpha)"""
    result = False
    with Image.open(data) as image:
        if image.mode == "RGBA":
            result = not any(p[:3] != (0, 0, 0) for p in list(image.getdata()))
    data.seek(0)
    return result
Exemplo n.º 31
0
def dump_conditions(file: TextIO) -> None:
    """Dump docs for all the condition flags, results and metaconditions."""

    LOGGER.info('Dumping conditions...')

    # Delete existing data, after the marker.
    file.seek(0, io.SEEK_SET)

    prelude = []

    for line in file:
        if DOC_MARKER in line:
            break
        prelude.append(line)

    file.seek(0, io.SEEK_SET)
    file.truncate(0)

    if not prelude:
        # No marker, blank the whole thing.
        LOGGER.warning('No intro text before marker!')

    for line in prelude:
        file.write(line)
    file.write(DOC_MARKER + '\n\n')

    file.write(DOC_META_COND)

    ALL_META.sort(key=lambda i: i[1])  # Sort by priority
    for flag_key, priority, func in ALL_META:
        file.write('#### `{}` ({}):\n\n'.format(flag_key, priority))
        dump_func_docs(file, func)
        file.write('\n')

    for lookup, name in [
            (ALL_FLAGS, 'Flags'),
            (ALL_RESULTS, 'Results'),
            ]:
        print('<!------->', file=file)
        print('# ' + name, file=file)
        print('<!------->', file=file)

        lookup_grouped = defaultdict(list)  # type: Dict[str, List[Tuple[str, Tuple[str, ...], Callable]]]

        for flag_key, aliases, func in lookup:
            group = getattr(func, 'group', 'ERROR')
            if group is None:
                group = '00special'
            lookup_grouped[group].append((flag_key, aliases, func))

        # Collapse 1-large groups into Ungrouped.
        for group in list(lookup_grouped):
            if len(lookup_grouped[group]) < 2:
                lookup_grouped[''].extend(lookup_grouped[group])
                del lookup_grouped[group]

        if not lookup_grouped['']:
            del lookup_grouped['']

        for header_ind, (group, funcs) in enumerate(sorted(lookup_grouped.items())):
            if group == '':
                group = 'Ungrouped Conditions'

            if header_ind:
                # Not before the first one...
                print('---------\n', file=file)

            if group == '00special':
                print(DOC_SPECIAL_GROUP, file=file)
            else:
                print('### ' + group + '\n', file=file)

            LOGGER.info('Doing {} group...', group)

            for flag_key, aliases, func in funcs:
                print('#### `{}`:\n'.format(flag_key), file=file)
                if aliases:
                    print('**Aliases:** `' + '`, `'.join(aliases) + '`' + '  \n', file=file)
                dump_func_docs(file, func)
                file.write('\n')