def load_training_data(training_data: Union[_FileMemo, List[Transaction], str],
                       known_account: str = None,
                       existing_entries: List[Tuple] = None) -> List[Transaction]:
    '''
    Loads training data
    :param training_data: The training data that shall be loaded.
        Can be provided as a string (the filename pointing to a beancount file),
        a _FileMemo instance,
        or a list of beancount entries
    :param known_account: Optional filter for the training data.
        If provided, the training data is filtered to only include transactions that involve the specified account.
    :param existing_entries: Optional existing entries to use instead of explicit training_data
    :return: Returns a list of beancount entries.
    '''
    if not training_data and existing_entries:
        logger.debug("Using existing entries for training data")
        training_data = list(filter_txns(existing_entries))
    elif isinstance(training_data, _FileMemo):
        logger.debug(f"Reading training data from _FileMemo \"{training_data.name}\"...")
        training_data, errors, _ = loader.load_file(training_data.name)
        assert not errors
        training_data = filter_txns(training_data)
    elif isinstance(training_data, str):
        logger.debug(f"Reading training data from file \"{training_data}\"...")
        training_data, errors, _ = loader.load_file(training_data)
        assert not errors
        training_data = filter_txns(training_data)
    logger.debug(f"Finished reading training data.")
    if known_account:
        training_data = [t for t in training_data
                         # ...filtered because the training data must involve the account:
                         if transaction_involves_account(t, known_account)]
        logger.debug(f"After filtering for account {known_account}, "
                     f"the training data consists of {len(training_data)} entries.")
    return training_data
Exemple #2
0
def validate_coherent_cost(entries, unused_options_map):
    """Check that all currencies are either used at cost or not at all, but never both.

    Args:
      entries: A list of directives.
      unused_options_map: An options map.
    Returns:
      A list of new errors, if any were found.
    """
    errors = []

    with_cost = {}
    without_cost = {}
    for entry in data.filter_txns(entries):
        for posting in entry.postings:
            target_set = without_cost if posting.cost is None else with_cost
            currency = posting.units.currency
            target_set.setdefault(currency, entry)

    for currency in set(with_cost) & set(without_cost):
        errors.append(
            CoherentCostError(
                without_cost[currency].meta,
                "Currency '{}' is used both with and without cost".format(
                    currency), with_cost[currency]))
        # Note: We really ought to include both of the first transactions here.

    return entries, errors
def MatchTrades(
        entries: List[data.Directive]) -> Tuple[data.Entries, BalanceDict]:
    # NOTE(blais): Eventually we ought to use the real functionality provided by
    # Beancount. Note that we could add extra data in the inventory in order to
    # keep track of the augmenting entries, and return it properly. This would
    # work and is doable in the pure Python code today.
    balances = collections.defaultdict(inventory.Inventory)
    positions = {}

    # Create new link sets in order to avoid mutating the inputs.
    entries = [(entry._replace(links=entry.links.copy()) if isinstance(
        entry, data.Transaction) else entry) for entry in entries]

    # Process all transactions, adding links in-place.
    for entry in data.filter_txns(entries):
        for posting in entry.postings:
            if posting.cost is None:
                continue
            pos, booking = balances[posting.account].add_position(posting)
            pos_key = (posting.account, posting.units.currency)
            if booking in {MatchResult.CREATED, MatchResult.AUGMENTED}:
                positions[pos_key] = entry
            elif booking == MatchResult.REDUCED:
                opening_entry = positions[pos_key]
                link = 'trade-{}'.format(uuid.uuid4().hex[-12:])
                opening_entry.links.add(link)
                entry.links.add(link)

    return entries, balances
Exemple #4
0
def main():
    logging.basicConfig(level=logging.INFO,
                        format='%(levelname)-8s: %(message)s')
    parser = argparse.ArgumentParser(description=__doc__.strip())
    parser.add_argument('filename', help='Beancount ledger filename')
    args, pipeline_args = parser.parse_known_args()

    # Read the ledger.
    logging.info("Reading ledger.")
    t1 = time.time()
    entries, errors, options_map = loader.load_file(args.filename)
    postings = (beam.Row(posting.account, posting.units.number,
                         posting.units.currency)
                for entry in data.filter_txns(entries)
                for posting in entry.postings)
    price_map = prices.build_price_map(entries)
    t2 = time.time()
    logging.info("Read ledger in %.1fsecs.", t2 - t1)

    with CreatePipeline(pipeline_args) as pipeline:
        _ = (pipeline
             | beam.Create(postings)
             | SqlTransform("""
                 SELECT account FROM PCOLLECTION
             """,
                            dialect="zetasql")
             | beam.Map(print))
Exemple #5
0
def region(filename, region, conversion):
    """Print out a list of transactions within REGION and compute balances.

    The REGION argument is either a stard:end line numbers tuple or a
    filename:start:end triplet to indicate a region in a ledger file
    included from the main input file.

    """
    search_filename, first_lineno, last_lineno = region
    if search_filename is None:
        search_filename = filename

    entries, errors, options_map = loader.load_file(filename)

    # Find all the entries in the region. (To be clear, this isn't like the
    # 'linked' command, none of the links are followed.)
    region_entries = [
        entry for entry in data.filter_txns(entries)
        if (entry.meta['filename'] == search_filename
            and first_lineno <= entry.meta['lineno'] <= last_lineno)
    ]

    price_map = prices.build_price_map(
        entries) if conversion == 'value' else None
    render_mini_balances(region_entries, options_map, conversion, price_map)
Exemple #6
0
def txns_by_file(entries):
    """Group a list of transaction by origin file and sort them by line number.

    Args:
      entries: a list of directives. All non-transaction directives are ignored
    Returns:
      a dictionary mapping filenames to entries belonging to that file, sorted
      by line number

    """
    file_entries = {}  # return dict

    for entry in filter_txns(entries):  # group by file
        if 'filename' not in entry.meta:
            continue
        filename = entry.meta['filename']

        if filename not in file_entries:
            file_entries[filename] = []
        file_entries[filename].append(entry)

    for filename in file_entries:  # sort by line number
        file_entries[filename].sort(key=lambda e: e.meta['lineno'])

    return file_entries
Exemple #7
0
def main():
    parser = argparse.ArgumentParser(description=__doc__.strip())
    parser.add_argument('filename', help='Beancount ledger filename.')
    parser.add_argument('account', help='Account name.')
    parser.add_argument('size', nargs='?', type=Decimal,
                        help='Size to match at highest cost.')
    args = parser.parse_args()

    entries, errors, options_map = loader.load_file(args.filename)

    # Accumulate lots from account.
    balance = inventory.Inventory()
    for entry in data.filter_txns(entries):
        for posting in entry.postings:
            if posting.account == args.account:
                balance.add_position(posting)

    # Book most expensive shares.
    remaining_size = args.size
    booked = []
    for pos in sorted(balance, key=lambda pos: pos.cost.number, reverse=True):
        print(pos)
        if remaining_size:
            booked_size = min(remaining_size, pos.units.number)
            booked.append((booked_size, pos))
            remaining_size -= booked_size

    # Print out booked ones, if a size was specified.
    if booked:
        print()
        print("Booked:")
        for booked_size, pos in booked:
            if booked_size != pos.units.number:
                pos = pos._replace(units=pos.units._replace(number=booked_size))
            print("  {:50}  {}".format(args.account, -pos))
Exemple #8
0
def balance_check(entries, options_map):
    errors = []
    tracking_accounts = set()
    for entry in entries:
        if isinstance(entry, Open):
            if entry.meta.get('tracking', False):
                tracking_accounts.add(entry.account)
    asum = Inventory()
    bsum = Inventory()
    for entry in filter_txns(entries):
        for posting in entry.postings:
            if posting.account in tracking_accounts:
                continue
            components = posting.account.split(':')
            if components[0] in ('Assets', 'Liabilities'):
                asum.add_position(posting)
            elif components[0] in ('Income', 'Expenses'):
                bsum.add_position(posting)
    csum = asum.reduce(convert.get_weight) + bsum.reduce(convert.get_weight)
    if not csum.is_small(interpolate.infer_tolerances({}, options_map)):
        errors.append(
            BudgetBalanceError(
                {
                    'filename': '<budget_balance_check>',
                    'lineno': 0
                },
                f"On-budget accounts and budget total do not match: {asum} vs {-bsum}",
                None))
    return entries, errors
Exemple #9
0
def expand_sales_legs(entries, account, start, end, calculate_commission):
    # Expand each of the sales legs.
    balances = collections.defaultdict(inventory.Inventory)
    sales = []
    for txn in data.filter_txns(entries):
        # If we got to the end of the period, bail out.
        if txn.date >= end:
            break

        # Accumulate the balances before the start date.
        if txn.date < start:
            for posting in txn.postings:
                if re.match(account, posting.account):
                    balance = balances[posting.account]
                    balance.add_position(posting)
            continue

        # Fallthrough: we're not in the period. Process the matching postings.

        # Find reducing postings (i.e., for each lot).
        txn_sales = []
        for posting in txn.postings:
            if re.match(account, posting.account):
                balance = balances[posting.account]
                reduced_position, booking = balance.add_position(posting)
                # Set the cost on the posting from the reduced position.
                # FIXME: Eventually that'll happen automatically during the full
                # booking stage.
                if booking == inventory.Booking.REDUCED:
                    posting = posting._replace(cost=reduced_position.cost)

                # If the postings don't have a reference number, ignore them.
                if 'ref' not in txn.meta:
                    continue

                if (posting.cost and posting.units.number < ZERO):
                    if not posting.price:
                        logging.error("Missing price on %s", posting)
                    txn_sales.append(data.TxnPosting(txn, posting))

        if txn_sales and calculate_commission:
            # Find total commission.
            for posting in txn.postings:
                if re.search('Commission', posting.account):
                    commission = posting.units.number
                    break
            else:
                commission = ZERO

            # Compute total number of units.
            tot_units = sum(sale.posting.units.number for sale, _ in txn_sales)

            # Assign a proportion of the commission to each of the sales by
            # inserting it into its posting metadata. This will be processed below.
            for sale, _ in txn_sales:
                fraction = sale.posting.units.number / tot_units
                sale.posting.meta['commission'] = fraction * commission

        sales.extend(txn_sales)
    return sales
def split(entries, options_map, config_string=""):
    """
    Beancount plugin: Dublicates all entry postings over time at fraction of value.

    Args:
      entries: A list of directives. We're interested only in the Transaction instances.
      options_map: A parser options dict.
      config_string: A configuration string in JSON format given in source file.
    Returns:
      A tuple of entries and errors.
    """

    errors = []

    ## Parse config and set defaults
    config_obj = read_config(config_string)
    config = {
        # ALIASES_BEFORE  : config_obj.pop('aliases_before'  , ['splitBefore']),
        'aliases_after': config_obj.pop('aliases_after',
                                        ['splitAfter', 'split']),
        'alias_seperator': config_obj.pop('aliases_after', '-'),
        'default_duration': config_obj.pop('default_duration', 'Month'),
        'default_step': config_obj.pop('default_step', 'Day'),
        'min_value': D(str(config_obj.pop('min_value', 0.05))),
        'max_new_tx': config_obj.pop('max_new_tx', 9999),
        'suffix': config_obj.pop('suffix', ' (split %d/%d)'),
        'tag': config_obj.pop('tag', 'splitted'),
    }

    newEntries = []
    trashbin = []
    for tx in filter_txns(entries):

        # Split at entry level only, so that it balances.
        pass

        # We are interested in only marked entries. TODO: ALIASES_BEFORE.
        params = extract_mark_tx(tx, config)
        if not params:
            continue

        # For selected entries add new entries.
        trashbin.append(tx)

        # Need to remove plugin metadata because otherwise new_whole_entries will copy it
        # to generated transactions, which is not the behaviour described in the docs.
        # TODO: Remove if alias is used as well. Should we just remove all metadata, even
        # that which is not associated with the plugin?  I guess the desired behaviour is
        # never specified anywhere.
        if ('split' in tx.meta):
            tx.meta.pop('split')

        newEntries = newEntries + new_whole_entries(
            tx, params, distribute_over_period, config)

    for trash in trashbin:
        entries.remove(trash)

    return entries + newEntries, errors
def depreciate(entries, options_map, config_string=""):
    """
    Beancount plugin: Generates new entries to depreciate target posting over given period.

    Args:
      entries: A list of directives. We're interested only in the Transaction instances.
      options_map: A parser options dict.
      config_string: A configuration string in JSON format given in source file.
    Returns:
      A tuple of entries and errors.
    """

    errors = []

    ## Parse config and set defaults
    config_obj = read_config(config_string)
    config = {
      # aliases_before  : config_obj.pop('aliases_before'  , ['spreadBefore']),
        'aliases_after'   : config_obj.pop('aliases_after'   , ['deprAfter', 'depr']),
        'alias_seperator' : config_obj.pop('seperator    '   , '-'),
        'default_duration': config_obj.pop('default_duration', 'Year'),
        'default_step'    : config_obj.pop('default_step'    , 'Day'),
        'min_value' : D(str(config_obj.pop('min_value'       , 0.05))),
        'max_new_tx'      : config_obj.pop('max_new_tx'      , 9999),
        'suffix'          : config_obj.pop('suffix'          , ' (depr %d/%d)'),
        'tag'             : config_obj.pop('tag'             , 'depreciated'),
        'translations'    : {
            config_obj.pop('account_assets'  , 'Assets:Fixed')     : config_obj.pop('account_expenses', 'Expenses:Depreciation'),
            config_obj.pop('account_liab'    , 'Liabilities:Fixed'): config_obj.pop('account_income'  , 'Income:Appreciation'),
        },
    }

    newEntries = []
    for tx in filter_txns(entries):

        # Spread at posting level because not all account types may be eligible.
        selected_postings = []
        for i, posting in enumerate(tx.postings):
            # We are interested in only marked postings. TODO: ALIASES_BEFORE.
            params = extract_mark_posting(posting, config) \
                  or extract_mark_tx(tx, config) \
                  or False
            if not params:
                continue

            for translation in config['translations']:
                if posting.account[0:len(translation)] == translation:
                    new_account = config['translations'][translation] + posting.account[len(translation):]
                    selected_postings.append( (i, new_account, params, posting) )

        # For selected postings no need to change the original.
        pass

        # For selected postings add new postings bundled into entries.
        if len(selected_postings) > 0:
            newEntries = newEntries + new_filtered_entries(tx, params, distribute_over_period, selected_postings, config)

    return entries + newEntries, errors
Exemple #12
0
def extract_transactions_for_account(entries: data.Entries,
                                     config: Investment) -> data.Entries:
    """Get the list of transactions affecting an investment account."""
    match_accounts = set([config.asset_account])
    match_accounts.update(config.dividend_accounts)
    match_accounts.update(config.match_accounts)
    return [
        entry for entry in data.filter_txns(entries)
        if any(posting.account in match_accounts for posting in entry.postings)
    ]
Exemple #13
0
def find_similar_entries(entries,
                         source_entries,
                         comparator=None,
                         window_days=2):
    """Find which entries from a list are potential duplicates of a set.

    Note: If there are multiple entries from 'source_entries' matching an entry
    in 'entries', only the first match is returned. Note that this function
    could in theory decide to merge some of the imported entries with each
    other.

    Args:
      entries: The list of entries to classify as duplicate or note.
      source_entries: The list of entries against which to match. This is the
        previous, or existing set of entries to compare against. This may be null
        or empty.
      comparator: A functor used to establish the similarity of two entries.
      window_days: The number of days (inclusive) before or after to scan the
        entries to classify against.
    Returns:
      A list of pairs of entries (entry, source_entry) where entry is from
      'entries' and is deemed to be a duplicate of source_entry, from
      'source_entries'.
    """
    window_head = datetime.timedelta(days=window_days)
    window_tail = datetime.timedelta(days=window_days + 1)

    if comparator is None:
        comparator = SimilarityComparator()

    # For each of the new entries, look at existing entries at a nearby date.
    duplicates = []
    if source_entries is not None:
        for entry in data.filter_txns(entries):
            for source_entry in data.filter_txns(
                    data.iter_entry_dates(source_entries,
                                          entry.date - window_head,
                                          entry.date + window_tail)):
                if comparator(entry, source_entry):
                    duplicates.append((entry, source_entry))
                    break
    return duplicates
Exemple #14
0
    def process_entries(self, imported_entries) -> List[Union[ALL_DIRECTIVES]]:
        """Process imported entries.

        Transactions might be modified, all other entries are left as is.

        Returns:
            The list of entries to be imported.
        """
        enhanced_transactions = self.process_transactions(
            list(filter_txns(imported_entries)))
        return merge_non_transaction_entries(imported_entries,
                                             enhanced_transactions)
Exemple #15
0
def main():
    optparser = argparse.ArgumentParser(description=__doc__)
    optparser.add_argument('filename', help='Transactions to be considered')
    optparser.add_argument('filename_diff', help='Transactions to be removed')

    optparser.add_argument('-q',
                           '--quiet',
                           action='store_true',
                           help="Don't print file or line numbers.")

    args = optparser.parse_args()

    # Parse the ledger files.
    entries, errors, options = loader.load_file(args.filename,
                                                log_errors=logging.error)
    entries_diff, errors_diff, options_diff = loader.load_file(
        args.filename_diff, log_errors=logging.error)

    # Create a mapping from links to lists of transactions to find.
    link_map = collections.defaultdict(list)
    for entry in data.filter_txns(entries_diff):
        for link in entry.links:
            link_map[link].append(entry)

    # Filter out the transactions.
    filtered_entries = []
    for entry in data.filter_txns(entries):
        for link in entry.links:
            if link in link_map:
                break
        else:
            filtered_entries.append(entry)

    # Print out something about each entry.
    for entry in filtered_entries:
        if not args.quiet:
            print()
            print('{}'.format(printer.render_source(entry.meta)))
            print()
        print(printer.format_entry(entry))
Exemple #16
0
def recur(entries, options_map, config_string=""):
    """
    Beancount plugin: Dublicates all entry postings over time.

    Args:
      entries: A list of directives. We're interested only in the Transaction instances.
      options_map: A parser options dict.
      config_string: A configuration string in JSON format given in source file.
    Returns:
      A tuple of entries and errors.
    """

    errors = []

    ## Parse config and set defaults
    config_obj = read_config(config_string)
    config = {
        # ALIASES_BEFORE  : config_obj.pop('aliases_before'  , ['recurBefore']),
        'aliases_after'   : config_obj.pop('aliases_after'   , ['recurAfter', 'recur']),
        'alias_seperator' : config_obj.pop('aliases_after'   , '-'),
        'default_duration': config_obj.pop('default_duration', 'inf'),
        'default_step'    : config_obj.pop('default_step'    , 'Day'),
        'min_value' : D(str(config_obj.pop('min_value'       , 0.05))),
        'max_new_tx'      : config_obj.pop('max_new_tx'      , 9999),
        'suffix'          : config_obj.pop('suffix'          , ' (recur %d/%d)'),
        'tag'             : config_obj.pop('tag'             , 'recurred'),
    }

    newEntries = []
    trashbin = []
    for tx in filter_txns(entries):

        # Recur at entry level only, so that it balances.
        pass

        # We are interested in only marked entries. TODO: ALIASES_BEFORE.
        params = extract_mark_tx(tx, config)
        if not params:
            continue

        # For selected entries add new entries.
        trashbin.append(tx)

        if('recur' in tx.meta):
            tx.meta.pop('recur')

        newEntries = newEntries + new_whole_entries(tx, params, dublicate_over_period, config)

    for trash in trashbin:
        entries.remove(trash)

    return entries + newEntries, errors
Exemple #17
0
def do_region(filename, args, conversion=None):
    """Print out a list of transactions in a region and balances.

    Args:
      filename: A string, which consists in the filename.
      args: A tuple of the rest of arguments. We're expecting the first argument
        to be a string which contains either a lineno integer or a
        (filename:)?lineno:lineno combination (which can be used if the location
        is not in the top-level file).
      convert: A string, one of None, 'value', or 'cost'; if set, convert
        balances output to market value (or cost).
    """
    # Parse the arguments, get the line number.
    if len(args) != 1:
        raise SystemExit("Missing line number or link argument.")
    location_spec = args[0]

    # Load the input file.
    entries, errors, options_map = loader.load_file(filename)

    # Parse the argument as a line number or a
    # "<filename>:<lineno>:<lineno>" spec to pull context from, with
    # optional filename and optional last line number.
    #
    # If a filename is not provided, the ledger's top-level filename is used
    # (this is the common case). An explicit filename is used to get context
    # in included files.
    #
    # If a single line number is provided the closest transaction is
    # selected. If an internal of line numbers is provided, the list of all
    # transactions whose first line is inside the interval are selected.
    match = re.match(r"(?:(.+?):)?(\d+):(\d+)$", location_spec)
    if not match:
        raise SystemExit("Invalid line number or link format for region.")

    included_filename, first_line, last_line = match.groups()
    search_filename = (path.abspath(included_filename)
                       if included_filename else options_map['filename'])
    lineno = int(first_line)
    last_lineno = int(last_line)

    # Find all the entries in the region. (To be clear, this isn't like the
    # 'linked' command, none of the links are followed.)
    region_entries = [
        entry for entry in data.filter_txns(entries)
        if (entry.meta['filename'] == search_filename
            and lineno <= entry.meta['lineno'] <= last_lineno)
    ]

    price_map = prices.build_price_map(
        entries) if conversion == 'value' else None
    render_mini_balances(region_entries, options_map, conversion, price_map)
Exemple #18
0
def get_postings(filename, account_regexp, tag=None):
    if tag:
        match = lambda entry, posting: (re.match(
            account_regexp, posting.account) and tag in entry.tags)
    else:
        match = lambda _, posting: (re.match(account_regexp, posting.account))

    entries, _, _ = loader.load_file(filename)
    txn_postings = [
        data.TxnPosting(entry, posting) for entry in data.filter_txns(entries)
        for posting in entry.postings if match(entry, posting)
    ]
    return txn_postings
Exemple #19
0
    def test_find_similar_entries__multiple_matches(self, entries, _, __):
        """
            plugin "beancount.plugins.auto_accounts"

            2016-02-01 * "A"
              Assets:Account1    10.00 USD
              Assets:Account2   -10.00 USD

            2016-02-02 * "B"
              Assets:Account1    10.00 USD
              Assets:Account2   -10.00 USD

            2016-02-03 * "C"
              Assets:Account1    10.00 USD
              Assets:Account2   -10.00 USD

            2016-02-04 * "D"
              Assets:Account1    10.00 USD
              Assets:Account2   -10.00 USD

            2016-02-05 * "D"
              Assets:Account1    10.00 USD
              Assets:Account2   -10.00 USD
        """
        # Test it with a single entry.
        new_entries = list(data.filter_txns(entries))[2:3]
        duplicates = similar.find_similar_entries(new_entries,
                                                  entries,
                                                  window_days=1)
        self.assertEqual(1, len(duplicates))
        self.assertEqual(new_entries[0], duplicates[0][0])

        # Test it with multiple duplicate entries.
        new_entries = list(data.filter_txns(entries))[1:4]
        duplicates = similar.find_similar_entries(new_entries,
                                                  entries,
                                                  window_days=1)
        self.assertEqual(len(new_entries), len(duplicates))
Exemple #20
0
 def load_training_data(self, existing_entries):
     """Load training data, i.e., a list of Beancount entries."""
     training_data = existing_entries or []
     training_data = list(filter_txns(training_data))
     length_all = len(training_data)
     training_data = [
         txn for txn in training_data if self.training_data_filter(txn)
     ]
     logger.debug(
         "Filtered training data to %s of %s entries.",
         len(training_data),
         length_all,
     )
     self.training_data = training_data
def split(entries, options_map, config_string=""):
    """
    Beancount plugin: Dublicates all entry postings over time at fraction of value.

    Args:
      entries: A list of directives. We're interested only in the Transaction instances.
      options_map: A parser options dict.
      config_string: A configuration string in JSON format given in source file.
    Returns:
      A tuple of entries and errors.
    """

    errors = []

    ## Parse config and set defaults
    config_obj = read_config(config_string)
    config = {
        # ALIASES_BEFORE  : config_obj.pop('aliases_before'  , ['splitBefore']),
        'aliases_after'   : config_obj.pop('aliases_after'   , ['splitAfter', 'split']),
        'alias_seperator' : config_obj.pop('aliases_after'   , '-'),
        'default_duration': config_obj.pop('default_duration', 'Month'),
        'default_step'    : config_obj.pop('default_step'    , 'Day'),
        'min_value' : D(str(config_obj.pop('min_value'       , 0.05))),
        'max_new_tx'      : config_obj.pop('max_new_tx'      , 9999),
        'suffix'          : config_obj.pop('suffix'          , ' (split %d/%d)'),
        'tag'             : config_obj.pop('tag'             , 'splitted'),
    }

    newEntries = []
    trashbin = []
    for tx in filter_txns(entries):

        # Split at entry level only, so that it balances.
        pass

        # We are interested in only marked entries. TODO: ALIASES_BEFORE.
        params = extract_mark_tx(tx, config)
        if not params:
            continue

        # For selected entries add new entries.
        trashbin.append(tx)
        newEntries = newEntries + new_whole_entries(tx, params, distribute_over_period, config)

    for trash in trashbin:
        entries.remove(trash)

    return entries + newEntries, errors
Exemple #22
0
    def test_unmergeable(self, entries, errors, __):
        """
            plugin "beancount.plugins.auto_accounts"

            2015-01-01 *
              Expenses:Restaurant    1.11 USD
              Equity:Other

            2015-01-02 * "Account"
              Expenses:Grocery       1.11 USD
              Equity:Other

            2015-01-03 * "Currency"
              Expenses:Restaurant    1.11 CAD
              Equity:Other

            2015-01-04 * "Cost"
              Expenses:Restaurant    1.11 USD {5 ECUS}
              Equity:Other

            2015-01-05 * "Price"
              Expenses:Restaurant    1.11 USD @ 10 ECUS
              Equity:Other

            2015-01-05 * "Flag"
              ! Expenses:Restaurant  1.11 USD
              Equity:Other

        """
        txn = next(data.filter_txns(entries))
        merged_entry = compress.merge(entries, txn)

        self.assertEqualEntries(
            """

          2015-01-01 *
            Equity:Other            -1.11 CAD
            Equity:Other           -16.65 ECUS
            Equity:Other            -3.33 USD
            Expenses:Grocery         1.11 USD
            Expenses:Restaurant      1.11 CAD
            Expenses:Restaurant      1.11 USD @ 10 ECUS
            Expenses:Restaurant      1.11 USD {5 ECUS, 2015-01-04}
            Expenses:Restaurant      1.11 USD
            ! Expenses:Restaurant    1.11 USD

        """, [merged_entry])
Exemple #23
0
def merge(entries, prototype_txn):
    """Merge the postings of a list of Transactions into a single one.

    Merge postings the given entries into a single entry with the Transaction
    attributes of the prototype. Return the new entry. The combined list of
    postings are merged if everything about the postings is the same except the
    number.

    Args:
      entries: A list of directives.
      prototype_txn: A Transaction which is used to create the compressed
          Transaction instance. Its list of postings is ignored.
    Returns:
      A new Transaction instance which contains all the postings from the input
      entries merged together.

    """
    # Aggregate the postings together. This is a mapping of numberless postings
    # to their number of units.
    postings_map = collections.defaultdict(Decimal)
    for entry in data.filter_txns(entries):
        for posting in entry.postings:
            # We strip the number off the posting to act as an aggregation key.
            key = data.Posting(posting.account,
                               Amount(None, posting.units.currency),
                               posting.cost, posting.price, posting.flag, None)
            postings_map[key] += posting.units.number

    # Create a new transaction with the aggregated postings.
    new_entry = data.Transaction(prototype_txn.meta, prototype_txn.date,
                                 prototype_txn.flag, prototype_txn.payee,
                                 prototype_txn.narration, data.EMPTY_SET,
                                 data.EMPTY_SET, [])

    # Sort for at least some stability of output.
    sorted_items = sorted(postings_map.items(),
                          key=lambda item:
                          (item[0].account, item[0].units.currency, item[1]))

    # Issue the merged postings.
    for posting, number in sorted_items:
        units = Amount(number, posting.units.currency)
        new_entry.postings.append(
            data.Posting(posting.account, units, posting.cost, posting.price,
                         posting.flag, posting.meta))

    return new_entry
Exemple #24
0
def validate(entries, options_map, raw_rules):
    """Traverse all entries and ensure each of them satisfies RULES checks.

    Args:
      entries: a list of directives
      options_map: an options map (unused)
    Returns:
      a list of new errors, if any

    """
    errors = []
    rules = compile_rules(eval(raw_rules))

    for entry in filter_txns(entries):
        errors.extend(validate_txn(entry, rules))

    return entries, errors
Exemple #25
0
    def test_transfer_lots(self, entries, errors, _):
        """
        plugin "beanlabs.plugins.transfer_lots" "transfer"

        2020-01-01 open Assets:Bank:Checking
        2020-01-01 open Assets:Coinbase  "FIFO"
        2020-01-01 open Assets:Binance

        2020-12-25 * "Fill up account with crypto"
          Assets:Coinbase         0.2 BTC {15000 USD}
          Assets:Coinbase         0.3 BTC {16000 USD}
          Assets:Coinbase         0.1 BTC {17000 USD}
          Assets:Coinbase         0.4 BTC {18000 USD, "wow"}
          Assets:Bank:Checking

        2020-12-26 * "Transfer lots" #transfer
          Assets:Coinbase        -0.4 BTC {}
          Assets:Binance

        2020-12-26 * "Transfer the special lot" #transfer
          Assets:Coinbase        -0.4 BTC {"wow"}
          Assets:Binance
        """
        self.assertEqualEntries(
            """

        2020-12-25 * "Fill up account with crypto"
          Assets:Coinbase            0.2 BTC {15000 USD, 2020-12-25}
          Assets:Coinbase            0.3 BTC {16000 USD, 2020-12-25}
          Assets:Coinbase            0.1 BTC {17000 USD, 2020-12-25}
          Assets:Coinbase            0.4 BTC {18000 USD, 2020-12-25, "wow"}
          Assets:Bank:Checking  -16700.0 USD

        2020-12-26 * "Transfer lots" #transfer
          Assets:Coinbase  -0.2 BTC {15000 USD, 2020-12-25}
          Assets:Coinbase  -0.2 BTC {16000 USD, 2020-12-25}
          Assets:Binance    0.2 BTC {15000 USD, 2020-12-25}
          Assets:Binance    0.2 BTC {16000 USD, 2020-12-25}

        2020-12-26 * "Transfer the special lot" #transfer
          Assets:Coinbase  -0.4 BTC {18000 USD, 2020-12-25, "wow"}
          Assets:Binance    0.4 BTC {18000 USD, 2020-12-25, "wow"}

        """, list(data.filter_txns(entries)))
    def test_selection(self, entries, errors, _):
        """
          plugin "beancount.plugins.currency_accounts" ""

          2018-01-01 open Assets:Checking
          2018-01-01 open Income:Salary

          2018-03-01 * ""
            Assets:Checking    100.00 USD
            Income:Salary     -100.00 USD

          2018-03-02 * "" #processed
            Assets:Checking    1200.00 CAD
            Income:Salary     -1000.00 USD @ 1.2 CAD
        """
        self.assertFalse(errors)
        for entry in data.filter_txns(entries):
            self.assertTrue(
                (currency_accounts.META_PROCESSED in entry.meta) == (
                    'processed' in entry.tags))
Exemple #27
0
    def test_merge_price(self, entries, errors, __):
        """
            plugin "beancount.plugins.auto_accounts"

            2011-05-17 * "Something"
              Expenses:Restaurant   1.11 USD @ 100 ECUS
              Assets:Other

            2011-05-18 * "Something Else"
              Expenses:Restaurant   1.22 USD @ 100 ECUS
              Assets:Other
        """
        txn = next(data.filter_txns(entries))
        merged_entry = compress.merge(entries, txn)
        self.assertEqualEntries("""

            2011-05-17 * "Something"
              Expenses:Restaurant   2.33 USD @ 100 ECUS
              Assets:Other       -233.00 ECUS

        """, [merged_entry])
Exemple #28
0
def convert(entries, output_format="ledger", dcontext=None, config={}):
    """
    Convert beancount entries to ledger output
    """

    if not dcontext:
        dcontext = display_context.DisplayContext()
        for entry in filter_txns(entries):
            for posting in entry.postings:
                if posting.units is None:
                    continue
                if (posting.meta and "__automatic__" in posting.meta
                        and "__residual__" not in posting.meta):
                    continue
                dcontext.update(posting.units.number, posting.units.currency)

    if output_format == "hledger":
        printer = HLedgerPrinter(dcontext=dcontext, config=config)
    else:
        printer = LedgerPrinter(dcontext=dcontext, config=config)
    return "\n".join(map_data(printer(entry), config) for entry in entries)
Exemple #29
0
def infer_investments_configuration(entries: data.Entries,
                                    account_list: List[Account],
                                    out_config: InvestmentConfig):
    """Infer a reasonable configuration for input."""

    all_accounts = set(getters.get_account_open_close(entries))

    for account in account_list:
        aconfig = out_config.investment.add()
        aconfig.currency = accountlib.leaf(account)
        aconfig.asset_account = account

        regexp = re.compile(
            re.sub(r"^[A-Z][^:]+:", "[A-Z][A-Za-z0-9]+:", account) +
            ":Dividends?")
        for maccount in filter(regexp.match, all_accounts):
            aconfig.dividend_accounts.append(maccount)

        match_accounts = set()
        match_accounts.add(aconfig.asset_account)
        match_accounts.update(aconfig.dividend_accounts)
        match_accounts.update(aconfig.match_accounts)

        # Figure out the total set of accounts seed in those transactions.
        cash_accounts = set()
        for entry in data.filter_txns(entries):
            if any(posting.account in match_accounts
                   for posting in entry.postings):
                for posting in entry.postings:
                    if (posting.account == aconfig.asset_account
                            or posting.account in aconfig.dividend_accounts
                            or posting.account in aconfig.match_accounts):
                        continue
                    if (re.search(r":(Cash|Checking|Receivable|GSURefund)$",
                                  posting.account) or re.search(
                                      r"Receivable|Payable", posting.account)
                            or re.match(r"Income:.*:(Match401k)$",
                                        posting.account)):
                        cash_accounts.add(posting.account)
        aconfig.cash_accounts.extend(cash_accounts)
Exemple #30
0
def main():
    date_parser = lambda s: dateutil.parser.parse(s).date()
    parser = argparse.ArgumentParser(description=__doc__.strip())

    parser.add_argument('-s',
                        '--start-date',
                        action='store',
                        type=date_parser,
                        help="Start date to timespan to filter")
    parser.add_argument('-e',
                        '--end-date',
                        action='store',
                        type=date_parser,
                        help="End date of timespan to filter")

    parser.add_argument('filename', help='Beancount ledger file')
    args = parser.parse_args()

    entries, _, options_map = loader.load_file(args.filename)

    if args.start_date:
        entries = (entry for entry in entries if entry.date >= args.start_date)
    if args.end_date:
        entries = (entry for entry in entries if entry.date < args.end_date)

    balances = collections.defaultdict(inventory.Inventory)
    rows = [('open_date', 'open_posting', 'close_date', 'close_posting',
             'close_price')]
    for entry in data.filter_txns(entries):
        for posting in entry.postings:
            account_balance = balances[posting.account]
            closing = position.get_position(posting)
            price = posting.price
            opening, booking = account_balance.add_position(posting)
            if posting.cost is not None and booking == inventory.MatchResult.REDUCED:
                rows.append(
                    (opening.cost.date, opening, entry.date, closing, price))

    table = petl.wrap(rows)
    print(table.lookallstr())
Exemple #31
0
    def test_simple(self, entries, _, __):
        """
            plugin "beancount.plugins.auto_accounts"

            2016-01-03 * "Base reservation" ^base
              Expenses:Alcohol     20.00 USD
              Expenses:Tips         1.03 USD
              Assets:Other

            2016-01-03 * "Similar amount within bounds" ^in-bounds
              Expenses:Alcohol     20.99 USD
              Assets:Other
            2016-01-03 * "Similar amount out of bounds" ^out-bounds
              Expenses:Alcohol     21.00 USD
              Assets:Other

            2016-01-06 * "Date too far" ^too-late
              Expenses:Alcohol     20.00 USD
              Expenses:Tips         1.03 USD
              Assets:Other

            2016-01-03 * "Non-overlapping accounts" ^non-accounts
              Expenses:Alcohol     20.00 USD
              Expenses:Tips         1.03 USD
              Assets:SomethingElse
        """
        txns = list(data.filter_txns(entries))

        def compare(expected, link1, link2):
            self.assertEqual(
                expected,
                self.comparator(
                    next(txn for txn in txns if link1 in txn.links),
                    next(txn for txn in txns if link2 in txn.links)))

        compare(True, 'base', 'base')
        compare(True, 'base', 'in-bounds')
        compare(False, 'base', 'out-bounds')
        compare(False, 'base', 'too-late')
        compare(False, 'base', 'non-accounts')
def check(entries, options_map):
    errors = []

    for entry in data.filter_txns(entries):
        positivePortfolioSums = defaultdict(Decimal)
        negativePortfolioSums = defaultdict(Decimal)
        for posting in entry.postings:
            if posting.meta and 'portfolio_check_weight' in posting.meta:
                weight = Decimal(posting.meta['portfolio_check_weight'])
            else:
                weight = round(convert.get_weight(posting).number, 2)
            account = posting.account
            portfolio = account.split(':')[1]
            if weight > 0:
                positivePortfolioSums[portfolio] += weight
            else:
                negativePortfolioSums[portfolio] += weight

        portfolios = set(
            list(positivePortfolioSums.keys()) +
            list(negativePortfolioSums.keys()))
        weight = None
        for portfolio in portfolios:
            positiveWeight = positivePortfolioSums[portfolio]
            negativeWeight = -negativePortfolioSums[portfolio]
            if (not isclose(positiveWeight, negativeWeight, abs_tol=0.05)):
                errors.append(
                    NonZeroWeightPerPortfolio(
                        entry.meta,
                        f'Weights for portfolio {portfolio} don\'t equal zero {positiveWeight} != {-negativeWeight}',
                        entry))
            if weight and weight != positiveWeight and 'skip_cross_portfolio_check' not in entry.meta:
                errors.append(
                    DifferentWeightPerPortfolio(
                        entry.meta, 'Not all portfolios have the same weight',
                        entry))
            weight = positiveWeight

    return entries, errors