예제 #1
0
 def test_parse_file(self):
     with tempfile.TemporaryFile('w+b', suffix='.beancount') as file:
         file.write(self.INPUT.encode('utf-8'))
         file.seek(0)
         entries, errors, _ = parser.parse_file(file)
         self.assertEqual(1, len(entries))
         self.assertEqual(0, len(errors))
예제 #2
0
 def test_parse_file(self):
     with tempfile.NamedTemporaryFile('w', suffix='.beancount') as file:
         file.write(self.INPUT)
         file.flush()
         entries, errors, _ = parser.parse_file(file.name)
         self.assertEqual(1, len(entries))
         self.assertEqual(0, len(errors))
예제 #3
0
def parse(filename):
    """Parse the a ledger in debug mode.

    Run the parser on ledger FILENAME with debug mode active.

    """
    entries, errors, _ = parser.parse_file(filename, yydebug=1)
예제 #4
0
def do_parse(filename, unused_args):
    """Run the parser in debug mode.

    Args:
      filename: A string, the Beancount input filename.
    """
    entries, errors, _ = parser.parse_file(filename, yydebug=1)
예제 #5
0
def export_v2_data(filename: str, output_filename: str,
                   num_directives: Optional[int]):
    if output_filename.endswith(".pbtxt"):
        output = open(output_filename, 'w')
        writer = None

        def write(message):
            print(message, file=output)
    else:
        output = open(output_filename, 'wb')
        writer = riegeli.RecordWriter(output)
        write = writer.write_message

    #entries, errors, options_map = loader.load_file(filename)
    entries, errors, options_map = parser.parse_file(filename)
    entries = data.sorted(entries)

    if num_directives:
        entries = itertools.islice(entries, num_directives)
    for entry in entries:
        if isinstance(entry, data.Transaction):
            pbdir = convert_Transaction(entry)
        elif isinstance(entry, data.Open):
            pbdir = convert_Open(entry)
        elif isinstance(entry, data.Close):
            pbdir = convert_Close(entry)
        elif isinstance(entry, data.Commodity):
            pbdir = convert_Commodity(entry)
        elif isinstance(entry, data.Event):
            pbdir = convert_Event(entry)
        elif isinstance(entry, data.Note):
            pbdir = convert_Note(entry)
        elif isinstance(entry, data.Query):
            pbdir = convert_Query(entry)
        elif isinstance(entry, data.Price):
            pbdir = convert_Price(entry)
        elif isinstance(entry, data.Balance):
            pbdir = convert_Balance(entry)
        elif isinstance(entry, data.Pad):
            pbdir = convert_Pad(entry)
        else:
            pbdir = None

        if pbdir is not None:
            write("#---")
            write("# {}".format(pbdir.location.lineno))
            write("#")
            write(pbdir)
            write("")

        if 0:
            print('-' * 80)
            printer.print_entry(entry)
            print(txn)
            print()

    if hasattr(writer, "close"):
        writer.close()
    output.close()
예제 #6
0
 def test_parse_None(self):
     # None is treated as the empty string...
     entries, errors, _ = parser.parse_string(None)
     self.assertEqual(0, len(entries))
     self.assertEqual(0, len(errors))
     # ...however None in not a valid file like object
     with self.assertRaises(TypeError):
         entries, errors, _ = parser.parse_file(None)
예제 #7
0
def render_file_context(entries, options_map, filename, lineno):
    """Render the context before and after a particular transaction is applied.

    Args:
      entries: A list of directives.
      options_map: A dict of options, as produced by the parser.
      filename: A string, the name of the file from which the transaction was parsed.
      lineno: An integer, the line number in the file the transaction was parsed from.
    Returns:
      A multiline string of text, which consists of the context before the
      transaction is applied, the transaction itself, and the context after it
      is applied. You can just print that, it is in form that is intended to be
      consumed by the user.
    """
    # Find the closest entry.
    closest_entry = data.find_closest(entries, filename, lineno)
    if closest_entry is None:
        raise SystemExit("No entry could be found before {}:{}".format(
            filename, lineno))

    # Run just the parser stage (no booking nor interpolation, which would
    # remove the postings) on the input file to produced the corresponding
    # unbooked transaction, so that we can get the list of accounts.
    if path.exists(filename):
        parsed_entries, _, __ = parser.parse_file(filename)

        # Note: We cannot bisect as we cannot rely on sorting behavior from the parser.
        lineno = closest_entry.meta['lineno']
        closest_parsed_entries = [
            parsed_entry for parsed_entry in parsed_entries
            if parsed_entry.meta['lineno'] == lineno
        ]
        if len(closest_parsed_entries) != 1:
            # This is an internal error, this should never occur.
            raise RuntimeError(
                "Parsed entry corresponding to real entry not found in original filename."
            )
        closest_parsed_entry = next(iter(closest_parsed_entries))
    else:
        closest_parsed_entry = None

    return render_entry_context(entries, options_map, closest_entry,
                                closest_parsed_entry)
예제 #8
0
 def parse_stdin(cls):
     entries, errors, _ = parser.parse_file("-")
     assert entries, "Empty entries: {}".format(entries)
     assert not errors, "Errors: {}".format(errors)
예제 #9
0
 def load_test_data(self, testset, kind):
     filename = self.generate_file_name(testset, kind)
     data, errors, _ = parser.parse_file(filename)
     assert not errors
     return data
예제 #10
0
def _parse_recursive(sources, log_timings, encoding=None):
    """Parse Beancount input, run its transformations and validate it.

    Recursively parse a list of files or strings and their include files and
    return an aggregate of parsed directives, errors, and the top-level
    options-map. If the same file is being parsed twice, ignore it and issue an
    error.

    Args:
      sources: A list of (filename-or-string, is-filename) where the first
        element is a string, with either a filename or a string to be parsed directly,
        and the second argument is a boolean that is true if the first is a filename.
        You may provide a list of such arguments to be parsed. Filenames must be absolute
        paths.
      log_timings: A function to write timings to, or None, if it should remain quiet.
      encoding: A string or None, the encoding to decode the input filename with.
    Returns:
      A tuple of (entries, parse_errors, options_map).
    """
    assert isinstance(sources, list) and all(
        isinstance(el, tuple) for el in sources)

    # Current parse state.
    entries, parse_errors = [], []
    options_map = None

    # A stack of sources to be parsed.
    source_stack = list(sources)

    # A list of absolute filenames that have been parsed in the past, used to
    # detect and avoid duplicates (cycles).
    filenames_seen = set()

    with misc_utils.log_time('beancount.parser.parser', log_timings, indent=1):
        while source_stack:
            source, is_file = source_stack.pop(0)
            is_top_level = options_map is None

            # If the file is encrypted, read it in and process it as a string.
            if is_file:
                cwd = path.dirname(source)
                source_filename = source
                if encryption.is_encrypted_file(source):
                    source = encryption.read_encrypted_file(source)
                    is_file = False
            else:
                # If we're parsing a string, the CWD is the current process
                # working directory.
                cwd = os.getcwd()
                source_filename = None

            if is_file:
                # All filenames here must be absolute.
                assert path.isabs(source)
                filename = path.normpath(source)

                # Check for file previously parsed... detect duplicates.
                if filename in filenames_seen:
                    parse_errors.append(
                        LoadError(
                            data.new_metadata("<load>", 0),
                            'Duplicate filename parsed: "{}"'.format(filename),
                            None))
                    continue

                # Check for a file that does not exist.
                if not path.exists(filename):
                    parse_errors.append(
                        LoadError(data.new_metadata("<load>", 0),
                                  'File "{}" does not exist'.format(filename),
                                  None))
                    continue

                # Parse a file from disk directly.
                filenames_seen.add(filename)
                with misc_utils.log_time('beancount.parser.parser.parse_file',
                                         log_timings,
                                         indent=2):
                    (src_entries, src_errors,
                     src_options_map) = parser.parse_file(filename,
                                                          encoding=encoding)

                cwd = path.dirname(filename)
            else:
                # Encode the contents if necessary.
                if encoding:
                    if isinstance(source, bytes):
                        source = source.decode(encoding)
                    source = source.encode('ascii', 'replace')

                # Parse a string buffer from memory.
                with misc_utils.log_time(
                        'beancount.parser.parser.parse_string',
                        log_timings,
                        indent=2):
                    (src_entries, src_errors,
                     src_options_map) = parser.parse_string(
                         source, source_filename)

            # Merge the entries resulting from the parsed file.
            entries.extend(src_entries)
            parse_errors.extend(src_errors)

            # We need the options from the very top file only (the very
            # first file being processed). No merging of options should
            # occur.
            if is_top_level:
                options_map = src_options_map
            else:
                aggregate_options_map(options_map, src_options_map)

            # Add includes to the list of sources to process. chdir() for glob,
            # which uses it indirectly.
            include_expanded = []
            with file_utils.chdir(cwd):
                for include_filename in src_options_map['include']:
                    matched_filenames = glob.glob(include_filename,
                                                  recursive=True)
                    if matched_filenames:
                        include_expanded.extend(matched_filenames)
                    else:
                        parse_errors.append(
                            LoadError(
                                data.new_metadata("<load>", 0),
                                'File glob "{}" does not match any files'.
                                format(include_filename), None))
            for include_filename in include_expanded:
                if not path.isabs(include_filename):
                    include_filename = path.join(cwd, include_filename)
                include_filename = path.normpath(include_filename)

                # Add the include filenames to be processed later.
                source_stack.append((include_filename, True))

    # Make sure we have at least a dict of valid options.
    if options_map is None:
        options_map = options.OPTIONS_DEFAULTS.copy()

    # Save the set of parsed filenames in options_map.
    options_map['include'] = sorted(filenames_seen)

    return entries, parse_errors, options_map
 def extract(self, file, existing_entries):
     entries, errors, options_map = parse_file(file.name)
     return entries