コード例 #1
0
ファイル: list_assets.py プロジェクト: redstreet/beanlabs
def read_details(filename):
    """Read the encrypted details file and create an index of its contents.

    Args:
      filename: A string, the name of the filename to read.
    Returns:
      A pair of the Details object containing the database and a mapping of
      Beancount account name to its (institution, account) pair.
    """
    # Read the encrypted details file.
    if encryption.is_encrypted_file(filename):
        contents = encryption.read_encrypted_file(filename)
    else:
        with open(args.details_filename, 'rb') as infile:
            contents = infile.read()

    # Read the ASCII protobuf database.
    details = assets_pb2.Details()
    text_format.Merge(contents, details)

    # Create a mapping of account name to (institution, account) pairs.
    mapping = {}
    for institution in details.institution:
        for acc in institution.account:
            assert acc.beancount not in mapping, (
                "Account name {} is not unique!".format(acc.beancount))
            mapping[acc.beancount] = (institution, acc)

    return details, mapping
コード例 #2
0
def load_encrypted_file(filename,
                        log_timings=None,
                        log_errors=None,
                        extra_validations=None,
                        dedent=False,
                        encoding=None):
    """Load an encrypted Beancount input file.

    Args:
      filename: The name of an encrypted file to be parsed.
      log_timings: See load_string().
      log_errors: See load_string().
      extra_validations: See load_string().
      dedent: See load_string().
      encoding: See load_string().
    Returns:
      A triple of (entries, errors, option_map) where "entries" is a date-sorted
      list of entries from the file, "errors" a list of error objects generated
      while parsing and validating the file, and "options_map", a dict of the
      options parsed from the file.
    """
    contents = encryption.read_encrypted_file(filename)
    return load_string(contents,
                       log_timings=log_timings,
                       log_errors=log_errors,
                       extra_validations=extra_validations,
                       encoding=encoding)
コード例 #3
0
ファイル: encryption_test.py プロジェクト: vitaminx/beancount
    def test_read_encrypted_file(self):
        encrypted_file = path.join(self.tmpdir.name, 'test.beancount.asc')
        self.encrypt_as_file(INPUT, encrypted_file)

        with test_utils.environ('GNUPGHOME', self.ringdir):
            plaintext = encryption.read_encrypted_file(encrypted_file)
            self.assertEqual(INPUT, plaintext)
コード例 #4
0
    def test_read_encrypted_file(self):
        # Import secret and public keys.
        self._run_gpg('--import', stdin=TEST_PUBLIC_KEY.encode('ascii'))
        self._run_gpg('--import', stdin=TEST_SECRET_KEY.encode('ascii'))

        # Encrypt the Beancount plaintext file with it.
        out, err = self._run_gpg('--recipient',
                                 'beancount-test',
                                 '--encrypt',
                                 '--output=-',
                                 stdin=INPUT.encode('utf8'))
        encrypted_file = path.join(self.tmpdir, 'test.beancount.asc')
        with open(encrypted_file, 'w') as encfile:
            encfile.write(out)

        with test_utils.environ('GNUPGHOME', self.ringdir):
            plaintext = encryption.read_encrypted_file(encrypted_file)
            self.assertEqual(INPUT, plaintext)
コード例 #5
0
def _parse_recursive(sources, log_timings, encoding=None):
    """Parse Beancount input, run its transformations and validate it.

    Recursively parse a list of files or strings and their include files and
    return an aggregate of parsed directives, errors, and the top-level
    options-map. If the same file is being parsed twice, ignore it and issue an
    error.

    Args:
      sources: A list of (filename-or-string, is-filename) where the first
        element is a string, with either a filename or a string to be parsed directly,
        and the second argument is a boolean that is true if the first is a filename.
        You may provide a list of such arguments to be parsed. Filenames must be absolute
        paths.
      log_timings: A function to write timings to, or None, if it should remain quiet.
      encoding: A string or None, the encoding to decode the input filename with.
    Returns:
      A tuple of (entries, parse_errors, options_map).
    """
    assert isinstance(sources, list) and all(
        isinstance(el, tuple) for el in sources)

    # Current parse state.
    entries, parse_errors = [], []
    options_map = None

    # A stack of sources to be parsed.
    source_stack = list(sources)

    # A list of absolute filenames that have been parsed in the past, used to
    # detect and avoid duplicates (cycles).
    filenames_seen = set()

    with misc_utils.log_time('beancount.parser.parser', log_timings, indent=1):
        while source_stack:
            source, is_file = source_stack.pop(0)
            is_top_level = options_map is None

            # If the file is encrypted, read it in and process it as a string.
            if is_file:
                cwd = path.dirname(source)
                source_filename = source
                if encryption.is_encrypted_file(source):
                    source = encryption.read_encrypted_file(source)
                    is_file = False
            else:
                # If we're parsing a string, the CWD is the current process
                # working directory.
                cwd = os.getcwd()
                source_filename = None

            if is_file:
                # All filenames here must be absolute.
                assert path.isabs(source)
                filename = path.normpath(source)

                # Check for file previously parsed... detect duplicates.
                if filename in filenames_seen:
                    parse_errors.append(
                        LoadError(
                            data.new_metadata("<load>", 0),
                            'Duplicate filename parsed: "{}"'.format(filename),
                            None))
                    continue

                # Check for a file that does not exist.
                if not path.exists(filename):
                    parse_errors.append(
                        LoadError(data.new_metadata("<load>", 0),
                                  'File "{}" does not exist'.format(filename),
                                  None))
                    continue

                # Parse a file from disk directly.
                filenames_seen.add(filename)
                with misc_utils.log_time('beancount.parser.parser.parse_file',
                                         log_timings,
                                         indent=2):
                    (src_entries, src_errors,
                     src_options_map) = parser.parse_file(filename,
                                                          encoding=encoding)

                cwd = path.dirname(filename)
            else:
                # Encode the contents if necessary.
                if encoding:
                    if isinstance(source, bytes):
                        source = source.decode(encoding)
                    source = source.encode('ascii', 'replace')

                # Parse a string buffer from memory.
                with misc_utils.log_time(
                        'beancount.parser.parser.parse_string',
                        log_timings,
                        indent=2):
                    (src_entries, src_errors,
                     src_options_map) = parser.parse_string(
                         source, source_filename)

            # Merge the entries resulting from the parsed file.
            entries.extend(src_entries)
            parse_errors.extend(src_errors)

            # We need the options from the very top file only (the very
            # first file being processed). No merging of options should
            # occur.
            if is_top_level:
                options_map = src_options_map
            else:
                aggregate_options_map(options_map, src_options_map)

            # Add includes to the list of sources to process. chdir() for glob,
            # which uses it indirectly.
            include_expanded = []
            with file_utils.chdir(cwd):
                for include_filename in src_options_map['include']:
                    matched_filenames = glob.glob(include_filename,
                                                  recursive=True)
                    if matched_filenames:
                        include_expanded.extend(matched_filenames)
                    else:
                        parse_errors.append(
                            LoadError(
                                data.new_metadata("<load>", 0),
                                'File glob "{}" does not match any files'.
                                format(include_filename), None))
            for include_filename in include_expanded:
                if not path.isabs(include_filename):
                    include_filename = path.join(cwd, include_filename)
                include_filename = path.normpath(include_filename)

                # Add the include filenames to be processed later.
                source_stack.append((include_filename, True))

    # Make sure we have at least a dict of valid options.
    if options_map is None:
        options_map = options.OPTIONS_DEFAULTS.copy()

    # Save the set of parsed filenames in options_map.
    options_map['include'] = sorted(filenames_seen)

    return entries, parse_errors, options_map