Beispiel #1
0
    def __make_cache(self, context):
        if not HAS_BEANCOUNT:
            return

        entries, _, options = load_file(self.vim.eval("beancount#get_root()"))

        accounts = set()
        events = set()
        links = set()
        payees = set()
        tags = set()

        for entry in entries:
            if isinstance(entry, data.Open):
                accounts.add(entry.account)
            elif isinstance(entry, data.Transaction):
                if entry.payee:
                    payees.add(entry.payee)
            if hasattr(entry, 'links') and entry.links:
                links.update(entry.links)
            if hasattr(entry, 'tags') and entry.tags:
                tags.update(entry.tags)
            if isinstance(entry, data.Event):
                events.add(entry.type)

        self.attributes = {
            'accounts': sorted(accounts),
            'events': sorted(events),
            'commodities': options['commodities'],
            'links': sorted(links),
            'payees': sorted(payees),
            'tags': sorted(tags),
        }
Beispiel #2
0
    def load_file(self):
        """Load self.beancount_file_path and compute things that are independent
        of how the entries might be filtered later"""
        self.all_entries, self.errors, self.options = \
            loader.load_file(self.beancount_file_path)
        self.price_map = prices.build_price_map(self.all_entries)
        self.account_types = options.get_account_types(self.options)

        self.title = self.options['title']
        if self.options['render_commas']:
            self.format_string = '{:,f}'
            self.default_format_string = '{:,.2f}'
        else:
            self.format_string = '{:f}'
            self.default_format_string = '{:.2f}'

        self.active_years = list(getters.get_active_years(self.all_entries))
        self.active_tags = list(getters.get_all_tags(self.all_entries))
        self.active_payees = list(getters.get_all_payees(self.all_entries))

        self.queries = _filter_entries_by_type(self.all_entries, Query)

        self.all_root_account = realization.realize(self.all_entries,
                                                    self.account_types)
        self.all_accounts = _list_accounts(self.all_root_account)
        self.all_accounts_leaf_only = _list_accounts(
            self.all_root_account, leaf_only=True)

        self.sidebar_links = _sidebar_links(self.all_entries)

        self._apply_filters()

        self.budgets = Budgets(self.entries)
        self.errors.extend(self.budgets.errors)
Beispiel #3
0
    def load_file(self, beancount_file_path=None):
        """Load self.beancount_file_path and compute things that are independent
        of how the entries might be filtered later"""
        if beancount_file_path:
            self.beancount_file_path = beancount_file_path

        self.all_entries, self.errors, self.options = \
            loader.load_file(self.beancount_file_path)
        self.price_map = prices.build_price_map(self.all_entries)
        self.account_types = options.get_account_types(self.options)

        self.title = self.options['title']
        if self.options['render_commas']:
            self.format_string = '{:,f}'
            self.default_format_string = '{:,.2f}'
        else:
            self.format_string = '{:f}'
            self.default_format_string = '{:.2f}'
        self.dcontext = self.options['dcontext']

        self.active_years = list(getters.get_active_years(self.all_entries))
        self.active_tags = list(getters.get_all_tags(self.all_entries))
        self.active_payees = list(getters.get_all_payees(self.all_entries))

        self.queries = self._entries_filter_type(self.all_entries, Query)

        self.all_root_account = realization.realize(self.all_entries,
                                                    self.account_types)
        self.all_accounts = self._all_accounts()
        self.all_accounts_leaf_only = self._all_accounts(leaf_only=True)

        self._apply_filters()
Beispiel #4
0
    def load_file(self):
        """Load self.beancount_file_path and compute things that are independent
        of how the entries might be filtered later"""
        with open(self.beancount_file_path, encoding='utf8') as f:
            self.source = f.read()

        self.entries, self._errors, self.options = loader.load_file(self.beancount_file_path)
        self.all_entries = self.entries
        self.price_map = prices.build_price_map(self.all_entries)

        self.title = self.options['title']

        self.errors = []
        for error in self._errors:
            self.errors.append({
                'file': error.source['filename'],
                'line': error.source['lineno'],
                'error': error.message,
                'entry': error.entry  # TODO render entry
            })

        self.active_years = list(getters.get_active_years(self.all_entries))
        self.active_tags = list(getters.get_all_tags(self.all_entries))

        self.account_types = options.get_account_types(self.options)
        self.real_accounts = realization.realize(self.entries, self.account_types)
        self.all_accounts = self._account_components()
Beispiel #5
0
def test_plugins(tmpdir):
    sample_folder = tmpdir.mkdir('fava_plugins')

    documents_folder = sample_folder.mkdir('documents')

    foo_folder = documents_folder.mkdir('Expenses').mkdir('Foo')
    sample_statement1 = foo_folder.join('2016-11-01 Test 1.pdf')
    sample_statement1.write('Hello World 1')
    sample_statement2 = foo_folder.join('2016-11-01 Test 2.pdf')
    sample_statement2.write('Hello World 2')
    sample_statement3 = foo_folder.join('2016-11-01 Test 3 discovered.pdf')
    sample_statement3.write('Hello World 3')

    assets_folder = documents_folder.mkdir('Assets').mkdir('Cash')
    sample_statement4 = assets_folder.join('2016-11-01 Test 4.pdf')
    sample_statement4.write('Hello World 4')
    sample_statement5 = assets_folder.join('Test 5.pdf')
    sample_statement5.write('Hello World 5')

    beancount_file = sample_folder.join('example.beancount')
    beancount_file.write(dedent("""
        option "title" "Test"
        option "operating_currency" "EUR"
        option "documents" "{}"

        plugin "fava.plugins.link_statements"
        plugin "fava.plugins.tag_discovered_documents"

        2016-10-31 open Expenses:Foo
        2016-10-31 open Assets:Cash

        2016-11-01 * "Foo" "Bar"
            statement: "{}"
            Expenses:Foo                100 EUR
            Assets:Cash

        2016-11-02 * "Foo" "Bar"
            statement: "documents/Expenses/Foo/2016-11-01 Test 1.pdf"
            statement-2: "documents/Assets/Cash/2016-11-01 Test 4.pdf"
            Expenses:Foo        100 EUR
            Assets:Cash

        2016-11-02 document Assets:Cash "documents/Assets/Cash/Test 5.pdf"
    """.format(documents_folder, sample_statement2)))

    entries, errors, options = load_file(str(beancount_file))

    assert len(errors) == 0
    assert len(entries) == 9

    assert 'statement' in entries[3].tags
    assert 'statement' in entries[4].tags
    assert 'statement' in entries[5].tags

    assert entries[2].links == entries[5].links
    assert entries[7].links == entries[3].links == entries[4].links

    assert 'discovered' in entries[6].tags
    assert entries[8].tags is None
Beispiel #6
0
def merge_and_store(*legerpaths, storePath=None, title='Merged'):
    if storePath is None:
        storePath = legerpaths[0]

    ledgers = [loader.load_file(p)[0] for p in legerpaths]

    merged_ledger = merge_ledgers(ledgers)

    store_sorted_ledger(merged_ledger, storePath, title=title)
Beispiel #7
0
    def load_beanfile(self, file_name, stop_on_error=False):
        entries, errors, context = load_file(file_name)

        if errors and False:
            printer.print_errors(errors, sys.stderr)
            if stop_on_error:
                raise ValueError()

        return entries
Beispiel #8
0
    def load_file(self):
        with open(self.beancount_file_path, encoding='utf8') as f:
            self._source = f.read()

        self.entries, self._errors, self.options_map = loader.load_file(self.beancount_file_path)
        self.all_entries = self.entries

        self.account_types = options.get_account_types(self.options_map)
        self.real_accounts = realization.realize(self.entries, self.account_types)
Beispiel #9
0
    def test_raises(self, name):
        orig_entries, orig_errors, _ = load_file(FIXTURES /
                                                 f"{name}.beancount")
        assert orig_errors == []

        entries, errors = plugin.unlinked_documents(orig_entries, {}, "ER-*")
        assert entries == orig_entries
        assert len(errors) == 1
        assert errors[0].message == f"Missing {name} for link 'ER-some-id'"
Beispiel #10
0
def do_print_options(filename, *args):
    """Print out the actual options parsed from a file.

    Args:
      unused_args: Ignored.
    """
    _, __, options_map = loader.load_file(filename)
    for key, value in sorted(options_map.items()):
        print('{}: {}'.format(key, value))
Beispiel #11
0
def test_plugins(tmp_path: Path) -> None:
    # Create sample files
    expenses_foo = tmp_path / "documents" / "Expenses" / "Foo"
    expenses_foo.mkdir(parents=True)
    (expenses_foo / "2016-11-02 Test 1.pdf").touch()
    (expenses_foo / "2016-11-03 Test 2.pdf").touch()
    (expenses_foo / "2016-11-04 Test 3 discovered.pdf").touch()
    assets_cash = tmp_path / "documents" / "Assets" / "Cash"
    assets_cash.mkdir(parents=True)
    (assets_cash / "2016-11-05 Test 4.pdf").touch()
    (assets_cash / "Test 5.pdf").touch()

    expenses_foo_rel = Path("documents") / "Expenses" / "Foo"
    assets_cash_rel = Path("documents") / "Assets" / "Cash"

    beancount_file = tmp_path / "example.beancount"
    beancount_file.write_text(
        dedent(f"""
        option "title" "Test"
        option "operating_currency" "EUR"
        option "documents" "{tmp_path / "documents"}"

        plugin "fava.plugins.link_documents"

        2016-10-30 open Expenses:Foo
        2016-10-31 open Assets:Cash

        2016-11-01 * "Foo" "Bar"
            document: "{expenses_foo / "2016-11-03 Test 2.pdf"}"
            document-2: "{assets_cash_rel / "2016-11-05 Test 4.pdf"}"
            Expenses:Foo                100 EUR
            Assets:Cash

        2016-11-07 * "Foo" "Bar"
            document: "{expenses_foo_rel / "2016-11-02 Test 1.pdf"}"
            document-2: "{assets_cash_rel / "2016-11-05 Test 4.pdf"}"
            Expenses:Foo        100 EUR
            Assets:Cash

        2016-11-06 document Assets:Cash "{assets_cash_rel / "Test 5.pdf"}"
        2017-11-06 balance Assets:Cash   -200 EUR
            document: "{assets_cash_rel / "Test 5.pdf"}"
        """.replace("\\", "\\\\")))

    entries, errors, _ = load_file(str(beancount_file))

    assert not errors
    assert len(entries) == 10

    assert "linked" in entries[3].tags
    assert "linked" in entries[4].tags

    # Document can be linked twice
    assert len(entries[6].links) == 2
    assert entries[2].links == entries[4].links
    assert entries[8].links == entries[3].links
Beispiel #12
0
def main():
    entries, errors, options = loader.load_file(sys.argv[1])

    for entry in entries:
        printer.print_entry(entry)

    print(options, file=sys.stderr)

    for error in errors:
        printer.print_error(error, file=sys.stderr)
Beispiel #13
0
    def test_match(self):
        orig_entries, orig_errors, _ = load_file(FIXTURES / "single.beancount")
        assert orig_errors == []

        config = [("FOO", "Assets:Checking")]
        entries, errors = repel(orig_entries, {}, str(config))
        assert len(errors) == 1
        assert errors[0].message == (
            "The tag 'FOO' and the account 'Assets:Checking' "
            "should not occur in the same transaction.")
Beispiel #14
0
def load_file(*args, futures=False, **kwargs):
    entries, errors, options = loader.load_file(*args, **kwargs)
    if not futures:
        culled_entries = [
            entry for entry in entries
            if (not isinstance(entry, Transaction)) or (
                'future' not in entry.tags)
        ]
        entries = culled_entries
    return entries, errors, options
    def __make_cache(self, context):
        if not HAS_BEANCOUNT:
            return

        entries, _, options = load_file(self.beancount_root)

        accounts = set()
        events = set()
        links = set()
        payees = set()
        tags = set()

        for entry in entries:
            if isinstance(entry, data.Open):
                accounts.add(entry.account)
            elif isinstance(entry, data.Transaction):
                if entry.payee:
                    payees.add(entry.payee)
            if hasattr(entry, 'links') and entry.links:
                links.update(entry.links)
            if hasattr(entry, 'tags') and entry.tags:
                tags.update(entry.tags)
            if isinstance(entry, data.Event):
                events.add(entry.type)

        self.attributes = {
            'accounts': [{
                'word': x,
                'kind': 'account'
            } for x in sorted(accounts)],
            'events': [{
                'word': '"{}"'.format(x),
                'kind': 'event'
            } for x in sorted(events)],
            'commodities': [{
                'word': x,
                'kind': 'commodity'
            } for x in options['commodities']],
            'links': [{
                'word': '^' + w,
                'kind': 'link'
            } for w in sorted(links)],
            'payees': [{
                'word': '"{}"'.format(w),
                'kind': 'payee'
            } for w in sorted(payees)],
            'tags': [{
                'word': '#' + w,
                'kind': 'tag'
            } for w in sorted(tags)],
            'directives': [{
                'word': x,
                'kind': 'directive'
            } for x in DIRECTIVES],
        }
Beispiel #16
0
def main():
    parser = argparse.ArgumentParser(description=__doc__)

    default_file = os.environ.get(BEAN_FILE_ENV, None)

    parser.add_argument(
        '-e', '--bean',
        metavar=BEAN_FILE_ENV,
        default=default_file,
        required=False,
        type=str,
        dest='bean_file',
        help=f"Beancount file to read and verify. {'Default is '+ default_file if default_file else ''}"
    )
    parser.add_argument(
        '-v', '--verbose',
        action='store_true',
        help='Print timings.'
    )
    parser.add_argument(
        '--pdb',
        action='store_true',
        help='Drop into a debugger on error'
    )
    parser.add_argument(
        '--logging-conf',
        type=str,
        default='./logging.yaml',
        help='logging.yaml file to use.  Default is ./logging.yaml'
    )
    args = parser.parse_args()

    logging_conf: Path = Path(args.logging_conf)

    logging_config(
        config_file=logging_conf,
        level=logging.DEBUG if args.verbose else logging.INFO
    )

    with misc_utils.log_time('beancount.loader (total)', logging.info):
        # Load up the file, print errors, checking and validation are invoked
        # automatically.
        try:
            entries, errors, _ = loader.load_file(
                args.bean_file,
                log_timings=logging.info,
                log_errors=sys.stderr,
                # Force slow and hardcore validations, just for check.
                extra_validations=validation.HARDCORE_VALIDATIONS)
        except Exception as exc:
            if args.pdb:
                pdb.post_mortem(exc.__traceback__)
            else:
                raise
Beispiel #17
0
def do_display_context(filename, args):
    """Print out the precision inferred from the parsed numbers in the input file.

    Args:
      filename: A string, which consists in the filename.
      args: A tuple of the rest of arguments. We're expecting the first argument
        to be an integer as a string.
    """
    entries, errors, options_map = loader.load_file(filename)
    dcontext = options_map['dcontext']
    sys.stdout.write(str(dcontext))
Beispiel #18
0
def load_training_data(
        training_data: Union[_FileMemo, List[Transaction], str],
        known_account: str = None,
        existing_entries: List[Tuple] = None) -> List[Transaction]:
    '''
    Loads training data
    :param training_data: The training data that shall be loaded.
        Can be provided as a string (the filename pointing to a beancount file),
        a _FileMemo instance,
        or a list of beancount entries
    :param known_account: Optional filter for the training data.
        If provided, the training data is filtered to only include transactions that involve the specified account.
    :param existing_entries: Optional existing entries to use instead of explicit training_data
    :return: Returns a list of beancount entries.
    '''
    if not training_data and existing_entries:
        logger.debug("Using existing entries for training data")
        training_data = list(filter_txns(existing_entries))
    elif isinstance(training_data, _FileMemo):
        logger.debug(
            f"Reading training data from _FileMemo \"{training_data.name}\"..."
        )
        training_data, errors, _ = loader.load_file(training_data.name)
        assert not errors
        training_data = filter_txns(training_data)
    elif isinstance(training_data, str):
        logger.debug(f"Reading training data from file \"{training_data}\"...")
        training_data, errors, _ = loader.load_file(training_data)
        assert not errors
        training_data = filter_txns(training_data)
    logger.debug(f"Finished reading training data.")
    if known_account:
        training_data = [
            t for t in training_data
            # ...filtered because the training data must involve the account:
            if transaction_involves_account(t, known_account)
        ]
        logger.debug(
            f"After filtering for account {known_account}, "
            f"the training data consists of {len(training_data)} entries.")
    return training_data
Beispiel #19
0
    def test_merge_meta__errors(self):
        # Note: Use load_file() and a real file to try to tease reentrance bugs.
        with TmpFile('w') as topfile:
            topfile.write(textwrap.dedent("""
              plugin "beanlabs.plugins.merge_meta" "/path/to/file/that/doesnt/exist"

              2015-02-02 open Assets:Checking
                begin: 2015-01-01
            """))
            topfile.flush()
            entries, errors, options_map = loader.load_file(topfile.name)
            self.assertEqual(1, len(errors))
Beispiel #20
0
 def reload(self):
     if self.transaction is not None:
         self.log.warn("Discard transactions due to reload")
         self.log.warn(format_entry(self.transaction))
     entries, errors, options_map = loader.load_file(self.filename)
     assert(len(errors) == 0)
     self.entries = entries
     self.options_map = options_map
     self.transaction = None
     # Commonly used transformations of the entries
     self.price_entries = prices.get_last_price_entries(entries, datetime.date.today())
     self.accounts = realization.realize(self.entries)
Beispiel #21
0
def _extract(ctx, src, output, existing, reverse, failfast, quiet):
    """Extract transactions from documents.

    Walk the SRC list of files or directories and extract the ledger
    entries from each file identified by one of the configured
    importers.  The entries are written to the specified output file
    or to the standard output in Beancount ledger format in sections
    associated to the source document.

    """
    verbosity = -quiet
    log = utils.logger(verbosity, err=True)
    errors = exceptions.ExceptionsTrap(log)

    # Load the ledger, if one is specified.
    existing_entries = loader.load_file(existing)[0] if existing else []

    extracted = []
    for filename in _walk(src, log):
        with errors:
            importer = identify.identify(ctx.importers, filename)
            if not importer:
                log('')  # Newline.
                continue

            # Signal processing of this document.
            log(' ...', nl=False)

            # Extract entries.
            entries = extract.extract_from_file(importer, filename,
                                                existing_entries)
            extracted.append((filename, entries))
            log(' OK', fg='green')

        if failfast and errors:
            break

    # Invoke hooks.
    hooks = [extract.find_duplicate_entries
             ] if ctx.hooks is None else ctx.hooks
    for func in hooks:
        extracted = func(extracted, existing_entries)

    # Reverse sort order, if requested.
    if reverse:
        for filename, entries in extracted:
            entries.reverse()

    # Serialize entries.
    extract.print_extracted_entries(extracted, output)

    if errors:
        sys.exit(1)
Beispiel #22
0
def main():
    optparser = argparse.ArgumentParser(description=__doc__)
    optparser.add_argument('filename', help='Transactions to be considered')
    optparser.add_argument('filename_diff', help='Transactions to be removed')

    optparser.add_argument('-q',
                           '--quiet',
                           action='store_true',
                           help="Don't print file or line numbers.")

    args = optparser.parse_args()

    # Parse the ledger files.
    entries, errors, options = loader.load_file(args.filename,
                                                log_errors=logging.error)
    entries_diff, errors_diff, options_diff = loader.load_file(
        args.filename_diff, log_errors=logging.error)

    # Create a mapping from links to lists of transactions to find.
    link_map = collections.defaultdict(list)
    for entry in data.filter_txns(entries_diff):
        for link in entry.links:
            link_map[link].append(entry)

    # Filter out the transactions.
    filtered_entries = []
    for entry in data.filter_txns(entries):
        for link in entry.links:
            if link in link_map:
                break
        else:
            filtered_entries.append(entry)

    # Print out something about each entry.
    for entry in filtered_entries:
        if not args.quiet:
            print()
            print('{}'.format(printer.render_source(entry.meta)))
            print()
        print(printer.format_entry(entry))
Beispiel #23
0
def main():
    """Extract trades from metadata-annotated postings and report on them.
    """
    logging.basicConfig(level=logging.INFO,
                        format='%(levelname)-8s: %(message)s')
    parser = version.ArgumentParser(description=__doc__.strip())
    parser.add_argument('filename', help='Beancount input filename')

    oparser = parser.add_argument_group('Outputs')
    oparser.add_argument(
        '-o',
        '--output',
        action='store',
        help="Filename to output results to (default goes to stdout)")
    oparser.add_argument('-f',
                         '--format',
                         default='text',
                         choices=['text', 'csv'],
                         help="Output format to render to (text, csv)")

    args = parser.parse_args()

    # Load the input file.
    entries, errors, options_map = loader.load_file(args.filename)

    # Get the list of trades.
    trades = extract_trades(entries)

    # Produce a table of all the trades.
    columns = ('units currency cost_currency '
               'buy_date buy_price sell_date sell_price pnl').split()
    header = [
        'Units', 'Currency', 'Cost Currency', 'Buy Date', 'Buy Price',
        'Sell Date', 'Sell Price', 'P/L'
    ]
    body = []
    for aug, red in trades:
        units = -red.posting.units.number
        buy_price = aug.posting.price.number
        sell_price = red.posting.price.number
        pnl = (units * (sell_price - buy_price)).quantize(buy_price)
        body.append([
            -red.posting.units.number, red.posting.units.currency,
            red.posting.price.currency,
            aug.txn.date.isoformat(), buy_price,
            red.txn.date.isoformat(), sell_price, pnl
        ])
    trades_table = table.Table(columns, header, body)

    # Render the table as text or CSV.
    outfile = open(args.output, 'w') if args.output else sys.stdout
    table.render_table(trades_table, outfile, args.format)
Beispiel #24
0
def do_region(filename, args, conversion=None):
    """Print out a list of transactions in a region and balances.

    Args:
      filename: A string, which consists in the filename.
      args: A tuple of the rest of arguments. We're expecting the first argument
        to be a string which contains either a lineno integer or a
        (filename:)?lineno:lineno combination (which can be used if the location
        is not in the top-level file).
      convert: A string, one of None, 'value', or 'cost'; if set, convert
        balances output to market value (or cost).
    """
    # Parse the arguments, get the line number.
    if len(args) != 1:
        raise SystemExit("Missing line number or link argument.")
    location_spec = args[0]

    # Load the input file.
    entries, errors, options_map = loader.load_file(filename)

    # Parse the argument as a line number or a
    # "<filename>:<lineno>:<lineno>" spec to pull context from, with
    # optional filename and optional last line number.
    #
    # If a filename is not provided, the ledger's top-level filename is used
    # (this is the common case). An explicit filename is used to get context
    # in included files.
    #
    # If a single line number is provided the closest transaction is
    # selected. If an internal of line numbers is provided, the list of all
    # transactions whose first line is inside the interval are selected.
    match = re.match(r"(?:(.+?):)?(\d+):(\d+)$", location_spec)
    if not match:
        raise SystemExit("Invalid line number or link format for region.")

    included_filename, first_line, last_line = match.groups()
    search_filename = (path.abspath(included_filename)
                       if included_filename else options_map['filename'])
    lineno = int(first_line)
    last_lineno = int(last_line)

    # Find all the entries in the region. (To be clear, this isn't like the
    # 'linked' command, none of the links are followed.)
    region_entries = [
        entry for entry in data.filter_txns(entries)
        if (entry.meta['filename'] == search_filename
            and lineno <= entry.meta['lineno'] <= last_lineno)
    ]

    price_map = prices.build_price_map(
        entries) if conversion == 'value' else None
    render_mini_balances(region_entries, options_map, conversion, price_map)
Beispiel #25
0
 def test_load_file_with_nonexist_include(self):
     with test_utils.tempdir() as tmp:
         test_utils.create_temporary_files(
             tmp, {
                 'root.beancount':
                 """
               include "/some/file/that/does/not/exist.beancount"
             """
             })
         entries, errors, options_map = loader.load_file(
             path.join(tmp, 'root.beancount'))
         self.assertEqual(1, len(errors))
         self.assertTrue(re.search('does not exist', errors[0].message))
Beispiel #26
0
def main():
    logging.basicConfig(level=logging.INFO, format='%(levelname)-8s: %(message)s')
    parser = version.ArgumentParser(description=__doc__.strip())
    parser.add_argument('filename', help='Beancount input filename')
    args = parser.parse_args()

    entries, _, options_map = loader.load_file(args.filename,
                                               log_errors=logging.error)

    report = create_report(entries, options_map)

    text = format_xhtml_report(report, options_map)
    sys.stdout.write(text)
Beispiel #27
0
def get_postings(filename, account_regexp, tag=None):
    if tag:
        match = lambda entry, posting: (re.match(
            account_regexp, posting.account) and tag in entry.tags)
    else:
        match = lambda _, posting: (re.match(account_regexp, posting.account))

    entries, _, _ = loader.load_file(filename)
    txn_postings = [
        data.TxnPosting(entry, posting) for entry in data.filter_txns(entries)
        for posting in entry.postings if match(entry, posting)
    ]
    return txn_postings
Beispiel #28
0
def main():
    logging.basicConfig(level=logging.INFO,
                        format='%(levelname)-8s: %(message)s')

    parser = gdrive.get_argparser(description=__doc__.strip())

    parser.add_argument('filename', help="Beancount input file")
    parser.add_argument('docid', help="Document ID")

    parser.add_argument(
        '-o',
        '--output',
        action='store',
        default=datetime.date.today().strftime('beancount.%Y-%m-%d.pdf'),
        help="Where to write out the collated PDF file")

    args = parser.parse_args()

    # Load the file.
    entries, unused_errors, options_map = loader.load_file(
        args.filename, log_timings=logging.info, log_errors=sys.stderr)

    # Generate a report.
    holdings_list = holdings_reports.report_holdings(
        None,
        False,
        entries,
        options_map,
        aggregation_key=lambda holding: holding.currency)

    oss = io.StringIO()
    table.table_to_csv(holdings_list, file=oss)
    csv_contents = oss.getvalue()

    # Connect, with authentication.
    # Check https://developers.google.com/drive/scopes for all available scopes.
    scopes = [
        'https://www.googleapis.com/auth/drive',
    ]
    http = gdrive.get_authenticated_http(" ".join(scopes), args)

    # Access the drive API.
    drive = gdrive.discovery.build('drive', 'v2', http=http)

    # Get the document and replace it.
    metadata = drive.files().get(fileId=args.docid).execute()
    upload = MediaInMemoryUpload(csv_contents.encode('utf-8'),
                                 mimetype=metadata['mimeType'])
    metadata = drive.files().update(fileId=args.docid,
                                    media_body=upload).execute()
    pprint(metadata)
Beispiel #29
0
    def load_file(self):
        """Load self.beancount_file_path and compute things that are independent
        of how the entries might be filtered later"""
        # use the internal function to disable cache
        if not self.is_encrypted:
            self.all_entries, self.errors, self.options = \
                loader._load([(self.beancount_file_path, True)],
                             None, None, None)
            include_path = os.path.dirname(self.beancount_file_path)
            self.watcher.update(self.options['include'], [
                os.path.join(include_path, path)
                for path in self.options['documents']])
        else:
            self.all_entries, self.errors, self.options = \
                loader.load_file(self.beancount_file_path)
        self.price_map = prices.build_price_map(self.all_entries)
        self.account_types = options.get_account_types(self.options)

        self.title = self.options['title']
        if self.options['render_commas']:
            self._format_string = '{:,f}'
            self._default_format_string = '{:,.2f}'
        else:
            self._format_string = '{:f}'
            self._default_format_string = '{:.2f}'

        self.active_years = list(getters.get_active_years(self.all_entries))
        self.active_tags = list(getters.get_all_tags(self.all_entries))
        self.active_payees = list(getters.get_all_payees(self.all_entries))

        self.queries = _filter_entries_by_type(self.all_entries, Query)
        self.custom_entries = _filter_entries_by_type(self.all_entries, Custom)

        self.all_root_account = realization.realize(self.all_entries,
                                                    self.account_types)
        self.all_accounts = _list_accounts(self.all_root_account)
        self.all_accounts_active = _list_accounts(
            self.all_root_account, active_only=True)

        self.fava_options, errors = parse_options(self.custom_entries)
        self.errors.extend(errors)

        self.sidebar_links = _sidebar_links(self.custom_entries)

        self.upcoming_events = _upcoming_events(
            self.all_entries, self.fava_options['upcoming-events'])

        self.budgets, errors = parse_budgets(self.custom_entries)
        self.errors.extend(errors)

        self._apply_filters()
Beispiel #30
0
    def test_load_cache_moved_file(self):
        # Create an initial set of files and load file, thus creating a cache.
        with test_utils.tempdir() as tmp:
            test_utils.create_temporary_files(
                tmp, {
                    'apples.beancount':
                    """
                  include "oranges.beancount"
                  2014-01-01 open Assets:Apples
                """,
                    'oranges.beancount':
                    """
                  2014-01-02 open Assets:Oranges
                """
                })
            top_filename = path.join(tmp, 'apples.beancount')
            entries, errors, options_map = loader.load_file(top_filename)
            self.assertFalse(errors)
            self.assertEqual(2, len(entries))
            self.assertEqual(1, self.num_calls)

            # Make sure the cache was created.
            self.assertTrue(
                path.exists(path.join(tmp, '.apples.beancount.picklecache')))

            # CHeck that it doesn't need refresh
            self.assertFalse(loader.needs_refresh(options_map))

            # Move the input file.
            new_top_filename = path.join(tmp, 'bigapples.beancount')
            os.rename(top_filename, new_top_filename)

            # Check that it needs refresh.
            self.assertTrue(loader.needs_refresh(options_map))

            # Load the root file again, make sure the cache is being hit.
            entries, errors, options_map = loader.load_file(top_filename)
            self.assertEqual(2, self.num_calls)
Beispiel #31
0
def main():
    import argparse, logging
    logging.basicConfig(level=logging.INFO,
                        format='%(levelname)-8s: %(message)s')
    parser = argparse.ArgumentParser(description=__doc__.strip())
    parser.add_argument('filename', help='Ledger filename')
    args = parser.parse_args()

    entries, errors, options_map = loader.load_file(args.filename)
    for entry in entries:
        if (isinstance(entry, data.Transaction)
                and any(posting.position.lot.lot_date
                        for posting in entry.postings)):
            printer.print_entry(entry)
Beispiel #32
0
def main():
    import argparse, logging
    logging.basicConfig(level=logging.INFO, format='%(levelname)-8s: %(message)s')
    parser = argparse.ArgumentParser(description=__doc__.strip())
    parser.add_argument('filename', help='Beancount input filename')
    args = parser.parse_args()

    entries, _, options_map = loader.load_file(args.filename)
    transactions = [entry
                    for entry in entries
                    if isinstance(entry, data.Transaction)]

    #detect_augmenting_lots(transactions)
    detect_augmenting_lots_with_dates(transactions)
Beispiel #33
0
def load_journal_hashes(journal):
    """
    Load in-memory all the hashes (md5 property) of the provided ledger.
    This is required for the duplication detecting algo 
    """

    md5s = []
    entries, _, _ = loader.load_file(journal)
    for entry in entries:
        if isinstance(entry, Transaction):
            md5 = entry.meta.get("md5", "")
            if md5:
                md5s.append(md5)
    return md5s
Beispiel #34
0
def directories(filename, dirs):
    """Validate a directory hierarchy against the ledger's account names.

    Read a ledger's list of account names and check that all the capitalized
    subdirectory names under the given roots match the account names.

    Args:
      filename: A string, the Beancount input filename.
      args: The rest of the arguments provided on the command-line, which in this
        case will be interpreted as the names of root directories to validate against
        the accounts in the given ledger.
    """
    entries, _, __ = loader.load_file(filename)
    validate_directories(entries, dirs)
Beispiel #35
0
 def test_load_file_no_includes(self):
     with test_utils.tempdir() as tmp:
         test_utils.create_temporary_files(
             tmp, {
                 'apples.beancount':
                 """
               2014-01-01 open Assets:Apples
             """
             })
         entries, errors, options_map = loader.load_file(
             path.join(tmp, 'apples.beancount'))
         self.assertEqual(0, len(errors))
         self.assertEqual(['apples.beancount'],
                          list(map(path.basename, options_map['include'])))
Beispiel #36
0
    def load_file(self) -> None:
        """Load the main file and all included files and set attributes."""
        # use the internal function to disable cache
        if not self._is_encrypted:
            # pylint: disable=protected-access
            self.all_entries, self.errors, self.options = loader._load(
                [(self.beancount_file_path, True)], None, None, None
            )
        else:
            self.all_entries, self.errors, self.options = loader.load_file(
                self.beancount_file_path
            )

        self.account_types = get_account_types(self.options)
        self.price_map = build_price_map(self.all_entries)
        self.all_root_account = realization.realize(
            self.all_entries, self.account_types
        )

        entries_by_type: DefaultDict[
            Type[Directive], Entries
        ] = collections.defaultdict(list)
        for entry in self.all_entries:
            entries_by_type[type(entry)].append(entry)
        self.all_entries_by_type = entries_by_type

        self.accounts = AccountDict()
        for entry in entries_by_type[Open]:
            self.accounts.setdefault(
                cast(Open, entry).account
            ).meta = entry.meta
        for entry in entries_by_type[Close]:
            self.accounts.setdefault(
                cast(Close, entry).account
            ).close_date = entry.date

        self.fava_options, errors = parse_options(
            cast(List[Custom], entries_by_type[Custom])
        )
        self.errors.extend(errors)

        if not self._is_encrypted:
            self._watcher.update(*self.paths_to_watch())

        for mod in MODULES:
            getattr(self, mod).load_file()

        self.filters = Filters(self.options, self.fava_options)

        self.filter(True)
Beispiel #37
0
    def load_file(self):
        """Load self.beancount_file_path and compute things that are independent
        of how the entries might be filtered later"""
        # use the internal function to disable cache
        if not self.is_encrypted:
            self.all_entries, self.errors, self.options = loader._load(
                [(self.beancount_file_path, True)], None, None, None
            )
            include_path = os.path.dirname(self.beancount_file_path)
            self.watcher.update(
                self.options["include"], [os.path.join(include_path, path) for path in self.options["documents"]]
            )
        else:
            self.all_entries, self.errors, self.options = loader.load_file(self.beancount_file_path)
        self.price_map = prices.build_price_map(self.all_entries)
        self.account_types = options.get_account_types(self.options)

        self.title = self.options["title"]
        if self.options["render_commas"]:
            self._format_string = "{:,f}"
            self._default_format_string = "{:,.2f}"
        else:
            self._format_string = "{:f}"
            self._default_format_string = "{:.2f}"

        self.active_years = list(getters.get_active_years(self.all_entries))
        self.active_tags = list(getters.get_all_tags(self.all_entries))
        self.active_payees = list(getters.get_all_payees(self.all_entries))

        self.queries = _filter_entries_by_type(self.all_entries, Query)
        self.custom_entries = _filter_entries_by_type(self.all_entries, Custom)

        self.all_root_account = realization.realize(self.all_entries, self.account_types)
        self.all_accounts = _list_accounts(self.all_root_account)
        self.all_accounts_active = _list_accounts(self.all_root_account, active_only=True)

        self.fava_options, errors = parse_options(self.custom_entries)
        self.errors.extend(errors)

        self.sidebar_links = _sidebar_links(self.custom_entries)

        self.upcoming_events = _upcoming_events(self.all_entries, self.fava_options["upcoming-events"])

        self.budgets, errors = parse_budgets(self.custom_entries)
        self.errors.extend(errors)

        self._apply_filters()
Beispiel #38
0
    def load_file(self):
        """Load the main file and all included files and set attributes."""
        # use the internal function to disable cache
        if not self._is_encrypted:
            # pylint: disable=protected-access
            self.all_entries, self.errors, self.options = \
                loader._load([(self.beancount_file_path, True)],
                             None, None, None)
            self.account_types = get_account_types(self.options)
            self._watcher.update(*self.paths_to_watch())
        else:
            self.all_entries, self.errors, self.options = \
                loader.load_file(self.beancount_file_path)
            self.account_types = get_account_types(self.options)
        self.price_map = prices.build_price_map(self.all_entries)
        self.all_root_account = realization.realize(self.all_entries,
                                                    self.account_types)

        entries_by_type = collections.defaultdict(list)
        for entry in self.all_entries:
            entries_by_type[type(entry)].append(entry)
        self.all_entries_by_type = entries_by_type

        self.accounts = _AccountDict()
        for entry in entries_by_type[Open]:
            self.accounts.setdefault(entry.account).meta = entry.meta
        for entry in entries_by_type[Close]:
            self.accounts.setdefault(entry.account).close_date = entry.date

        self.fava_options, errors = parse_options(entries_by_type[Custom])
        self.errors.extend(errors)

        for mod in MODULES:
            getattr(self, mod).load_file()

        self._filters = {
            'account': AccountFilter(self.options, self.fava_options),
            'filter': AdvancedFilter(self.options, self.fava_options),
            'time': TimeFilter(self.options, self.fava_options),
        }

        self.filter(True)
Beispiel #39
0
    def load_file(self):
        """Load self.beancount_file_path and compute things that are independent
        of how the entries might be filtered later"""

        self.entries, self._errors, self.options = loader.load_file(self.beancount_file_path)
        self.all_entries = self.entries
        self.price_map = prices.build_price_map(self.all_entries)
        self.account_types = options.get_account_types(self.options)

        self.title = self.options['title']

        self.errors = []
        for error in self._errors:
            self.errors.append({
                'file': error.source['filename'],
                'line': error.source['lineno'],
                'error': error.message
            })

        self.active_years = list(getters.get_active_years(self.all_entries))
        self.active_tags = list(getters.get_all_tags(self.all_entries))
        self.active_payees = list(getters.get_all_payees(self.all_entries))
        self.apply_filters()
Beispiel #40
0
def test_plugins(tmpdir):
    # pylint: disable=too-many-locals
    sample_folder = tmpdir.mkdir('fava_plugins')

    documents_folder = sample_folder.mkdir('documents')

    foo_folder = documents_folder.mkdir('Expenses').mkdir('Foo')
    sample_statement1 = foo_folder.join('2016-11-01 Test 1.pdf')
    sample_statement1.write('Hello World 1')
    sample_statement1_short = os.path.join('documents', 'Expenses', 'Foo',
                                           '2016-11-01 Test 1.pdf')
    sample_statement2 = foo_folder.join('2016-11-01 Test 2.pdf')
    sample_statement2.write('Hello World 2')
    sample_statement3 = foo_folder.join('2016-11-01 Test 3 discovered.pdf')
    sample_statement3.write('Hello World 3')

    assets_folder = documents_folder.mkdir('Assets').mkdir('Cash')
    sample_statement4 = assets_folder.join('2016-11-01 Test 4.pdf')
    sample_statement4_short = os.path.join('documents', 'Assets', 'Cash',
                                           '2016-11-01 Test 4.pdf')
    sample_statement4.write('Hello World 4')
    sample_statement5 = assets_folder.join('Test 5.pdf')
    sample_statement5_short = os.path.join('documents', 'Assets', 'Cash',
                                           'Test 5.pdf')
    sample_statement5.write('Hello World 5')

    beancount_file = sample_folder.join('example.beancount')
    beancount_file.write(
        _format("""
        option "title" "Test"
        option "operating_currency" "EUR"
        option "documents" "{}"

        plugin "fava.plugins.link_documents"
        plugin "fava.plugins.tag_discovered_documents"

        2016-10-31 open Expenses:Foo
        2016-10-31 open Assets:Cash

        2016-11-01 * "Foo" "Bar"
            document: "{}"
            Expenses:Foo                100 EUR
            Assets:Cash

        2016-11-02 * "Foo" "Bar"
            document: "{}"
            document-2: "{}"
            Expenses:Foo        100 EUR
            Assets:Cash

        2016-11-02 document Assets:Cash "{}"
    """, (documents_folder, sample_statement2, sample_statement1_short,
          sample_statement4_short, sample_statement5_short)))

    entries, errors, _ = load_file(str(beancount_file))

    assert not errors
    assert len(entries) == 9

    assert 'linked' in entries[3].tags
    assert 'linked' in entries[4].tags
    assert 'linked' in entries[5].tags

    assert entries[2].links == entries[5].links
    assert entries[7].links == entries[3].links == entries[4].links

    assert 'discovered' in entries[6].tags
    assert not entries[8].tags