def is_valid_document(self, file_path): """Check if file_path is the path to a document. It should either occur in one of the Document entries or in a "statement"-metadata in a Transaction entry. """ for entry in misc_utils.filter_type(self.entries, Document): if entry.filename == file_path: return True for entry in misc_utils.filter_type(self.entries, Transaction): if "statement" in entry.meta and entry.meta["statement"] == file_path: return True return False
def postings(self): """All postings contained in some transaction.""" return [ posting for entry in filter_type(self.entries, Transaction) for posting in entry.postings ]
def render_htmldiv(self, entries, errors, options_map, file): document_entries = list(misc_utils.filter_type(entries, data.Document)) if document_entries: journal_html.html_entries_table(file, document_entries, self.formatter) else: file.write("<p>(No documents.)</p>")
def load_file(self): custom_entries = list(filter_type(self.ledger.all_entries, Custom)) self.sidebar_links = _sidebar_links(custom_entries) self.upcoming_events = _upcoming_events( self.ledger.all_entries, self.ledger.fava_options['upcoming-events'])
def load_file(self): """Load the main file and all included files and set attributes.""" # use the internal function to disable cache if not self._is_encrypted: # pylint: disable=protected-access self.all_entries, self.errors, self.options = \ loader._load([(self.beancount_file_path, True)], None, None, None) include_path = os.path.dirname(self.beancount_file_path) self._watcher.update(self.options['include'], [ os.path.join(include_path, path) for path in self.options['documents'] ]) else: self.all_entries, self.errors, self.options = \ loader.load_file(self.beancount_file_path) self.price_map = prices.build_price_map(self.all_entries) self.account_types = get_account_types(self.options) self.all_root_account = realization.realize(self.all_entries, self.account_types) if self.options['render_commas']: self._format_string = '{:,f}' self._default_format_string = '{:,.2f}' else: self._format_string = '{:f}' self._default_format_string = '{:.2f}' self.fava_options, errors = parse_options( filter_type(self.all_entries, Custom)) self.errors.extend(errors) for mod in MODULES: getattr(self, mod).load_file() self.filter(True)
def payee_accounts(self, payee): """Rank accounts for the given payee.""" account_ranker = ExponentialDecayRanker(self.accounts) for txn in filter_type(self.ledger.all_entries, Transaction): if txn.payee == payee: for posting in txn.postings: account_ranker.update(posting.account, txn.date) return account_ranker.sort()
def render_htmldiv(self, entries, errors, options_map, file): # Return the subset of transaction entries which have a conversion. conversion_entries = [entry for entry in misc_utils.filter_type(entries, data.Transaction) if data.transaction_has_conversion(entry)] journal_html.html_entries_table(file, conversion_entries, self.formatter, render_postings=True)
def events(self, event_type=None): """List events (possibly filtered by type).""" events = list(filter_type(self.entries, Event)) if event_type: return filter(lambda e: e.type == event_type, events) return events
def events(self, event_type=None): """List events (possibly filtered by type).""" events = list(filter_type(self.entries, Event)) if event_type: return filter(lambda e: e.type == event_type, events) return events
def events(self, event_type=None): """List events (possibly filtered by type).""" events = filter_type(self.entries, Event) if event_type: return [event for event in events if event.type == event_type] return list(events)
def is_valid_document(self, file_path): """Check if the given file_path is present in one of the Document entries or in a "statement"-metadata in a Transaction entry. :param file_path: A path to a file. :return: True when the file_path is refered to in a Document entry, False otherwise. """ is_present = False for entry in misc_utils.filter_type(self.entries, Document): if entry.filename == file_path: is_present = True if is_present == False: for entry in misc_utils.filter_type(self.entries, Transaction): if 'statement' in entry.meta and entry.meta['statement'] == file_path: is_present = True return is_present
def is_valid_document(self, file_path): """Check if the given file_path is present in one of the Document entries or in a "statement"-metadata in a Transaction entry. :param file_path: A path to a file. :return: True when the file_path is refered to in a Document entry, False otherwise. """ is_present = False for entry in misc_utils.filter_type(self.entries, Document): if entry.filename == file_path: is_present = True if is_present == False: for entry in misc_utils.filter_type(self.entries, Transaction): if 'statement' in entry.meta and entry.meta[ 'statement'] == file_path: is_present = True return is_present
def document_path(self, file_path): """Check if file_path is the path to a document and the absolute path if valid. Throws if the path is not valid. It should occur in one of the Document entries. """ for entry in misc_utils.filter_type(self.entries, Document): if entry.filename == file_path: return self.abs_path(file_path) raise FavaFileNotFoundException()
def load_file(self): custom_entries = list(filter_type(self.ledger.all_entries, Custom)) self.sidebar_links = _sidebar_links(custom_entries) self.upcoming_events = _upcoming_events( self.ledger.all_entries, self.ledger.fava_options['upcoming-events']) if not self.ledger.options['operating_currency']: self.ledger.errors.append( FavaError( None, 'No operating currency specified. ' 'Please add one to your beancount file.', None))
def is_document_path(self, path): """Check if file at path is a document. Raises: FavaAPIException: If ``path`` is not the path of one of the documents. """ for entry in filter_type(self.entries, Document): if entry.filename == path: return raise FavaAPIException( 'File "{}" not found in document entries.'.format(path))
def test_filter_type(self): # pylint: disable=invalid-name class A: pass class B: pass class C: pass data = [x() for x in [A, B, A, A, C, B, C, A]] self.assertEqual([A, A, A, A], list(map(type, misc_utils.filter_type(data, A))))
def doc(filename=None): "Serve static filenames for documents directives." filename = '/' + filename # Check that there is a document directive that has this filename. # This is for security; we don't want to be able to serve just any file. for entry in misc_utils.filter_type(app.entries, data.Document): if entry.filename == filename: break else: raise bottle.HTTPError(404, "Not found") # Just serve the file ourselves. return bottle.static_file(path.basename(filename), path.dirname(filename))
def document_path(self, file_path): """Get absolute path of a document. Returns: The absolute path of `file_path` if it points to a document. Raises: FavaFileNotFoundException: If `path` is not the path to one of the documents. """ for entry in misc_utils.filter_type(self.entries, Document): if entry.filename == file_path: return self.abs_path(file_path) raise FavaFileNotFoundException()
def document_path(self, path): """Get absolute path of a document. Returns: The absolute path of ``path`` if it points to a document. Raises: FavaAPIException: If ``path`` is not the path of one of the documents. """ for entry in filter_type(self.entries, Document): if entry.filename == path: return path raise FavaAPIException( 'File "{}" not found in document entries.'.format(path))
def _upcoming_events(entries, max_delta): """Parse entries for upcoming events. Args: entries: A list of entries. max_delta: Number of days that should be considered. Returns: A list of the Events in entries that are less than `max_delta` days away. """ today = datetime.date.today() upcoming_events = [] for event in filter_type(entries, Event): delta = event.date - today if delta.days >= 0 and delta.days < max_delta: upcoming_events.append(event) return upcoming_events
def interval_totals(self, interval, accounts): """Renders totals for account (or accounts) in the intervals. Args: interval: A string for the interval. accounts: A single account (str) or a tuple of accounts. """ for begin, end in pairwise(self.ledger.interval_ends(interval)): inventory = Inventory() entries = iter_entry_dates(self.ledger.entries, begin, end) for entry in filter_type(entries, Transaction): for posting in entry.postings: if posting.account.startswith(accounts): inventory.add_position(posting) yield { 'begin_date': begin, 'totals': _inventory_cost_or_value(inventory, end), 'budgets': self.ledger.budgets.calculate(accounts[0], begin, end), }
def interval_totals(self, interval, accounts): """Renders totals for account (or accounts) in the intervals. Args: interval: An interval. accounts: A single account (str) or a tuple of accounts. """ for begin, end in pairwise(self.ledger.interval_ends(interval)): inventory = CounterInventory() entries = iter_entry_dates(self.ledger.entries, begin, end) for entry in filter_type(entries, Transaction): for posting in entry.postings: if posting.account.startswith(accounts): inventory.add_position(posting) yield { 'date': begin, 'balance': cost_or_value(inventory, end), 'budgets': self.ledger.budgets.calculate_children(accounts, begin, end), }
def interval_totals(self, interval, accounts): """Renders totals for account (or accounts) in the intervals. Args: interval: An interval. accounts: A single account (str) or a tuple of accounts. """ for begin, end in pairwise(self.ledger.interval_ends(interval)): inventory = CounterInventory() entries = iter_entry_dates(self.ledger.entries, begin, end) for entry in filter_type(entries, Transaction): for posting in entry.postings: if posting.account.startswith(accounts): inventory.add_position(posting) yield { "date": begin, "balance": cost_or_value(inventory, end), "budgets": self.ledger.budgets.calculate_children( accounts, begin, end ), }
def load_file(self): all_entries = self.ledger.all_entries self.tags = getters.get_all_tags(all_entries) self.years = list(getters.get_active_years(all_entries))[::-1] account_ranker = ExponentialDecayRanker( self.list_accounts(active_only=True)) currency_ranker = ExponentialDecayRanker( self.ledger.options['commodities']) payee_ranker = ExponentialDecayRanker() for txn in filter_type(all_entries, Transaction): if txn.payee: payee_ranker.update(txn.payee, txn.date) for posting in txn.postings: account_ranker.update(posting.account, txn.date) currency_ranker.update(posting.units.currency, txn.date) if posting.cost: currency_ranker.update(posting.cost.currency, txn.date) self.accounts = account_ranker.sort() self.currencies = currency_ranker.sort() self.payees = payee_ranker.sort()
def load_file(self): self.queries = list(filter_type(self.ledger.all_entries, Query))
def load_file(self): self.budget_entries, errors = parse_budgets( filter_type(self.ledger.all_entries, Custom)) self.ledger.errors.extend(errors)
def documents(self): """All currently filtered documents.""" return list(filter_type(self.entries, Document))
def execute_select(query, entries, options_map): """Given a compiled select statement, execute the query. Args: query: An instance of a query_compile.Query entries: A list of directives. options_map: A parser's option_map. Returns: A pair of: result_types: A list of (name, data-type) item pairs. result_rows: A list of ResultRow tuples of length and types described by 'result_types'. """ # Figure out the result types that describe what we return. result_types = [Column(target.name, target.c_expr.dtype) for target in query.c_targets if target.name is not None] # Pre-compute lists of the expressions to evaluate. group_indexes = (set(query.group_indexes) if query.group_indexes is not None else query.group_indexes) # Indexes of the columns for result rows and order rows. result_indexes = [index for index, c_target in enumerate(query.c_targets) if c_target.name] order_spec = query.order_spec # Figure out if we need to compute balance. uses_balance = any(uses_balance_column(c_expr) for c_expr in itertools.chain( [c_target.c_expr for c_target in query.c_targets], [query.c_where] if query.c_where else [])) context = create_row_context(entries, options_map) # Filter the entries using the FROM clause. filt_entries = (filter_entries(query.c_from, entries, options_map, context) if query.c_from is not None else entries) # Dispatch between the non-aggregated queries and aggregated queries. c_where = query.c_where rows = [] # Precompute a list of expressions to be evaluated. c_target_exprs = [c_target.c_expr for c_target in query.c_targets] if query.group_indexes is None: # This is a non-aggregated query. # Iterate over all the postings once. for entry in misc_utils.filter_type(filt_entries, data.Transaction): context.entry = entry for posting in entry.postings: context.posting = posting if c_where is None or c_where(context): # Compute the balance. if uses_balance: context.balance.add_position(posting) # Evaluate all the values. values = [c_expr(context) for c_expr in c_target_exprs] rows.append(values) else: # This is an aggregated query. # Precompute lists of non-aggregate and aggregate expressions to # evaluate. For aggregate targets, we hunt down the aggregate # sub-expressions to evaluate, to avoid recursion during iteration. c_nonaggregate_exprs = [] c_aggregate_exprs = [] for index, c_expr in enumerate(c_target_exprs): if index in group_indexes: c_nonaggregate_exprs.append(c_expr) else: _, aggregate_exprs = query_compile.get_columns_and_aggregates(c_expr) c_aggregate_exprs.extend(aggregate_exprs) # Note: it is possible that there are no aggregates to compute here. You could # have all columns be non-aggregates and group-by the entire list of columns. # Pre-allocate handles in aggregation nodes. allocator = Allocator() for c_expr in c_aggregate_exprs: c_expr.allocate(allocator) # Iterate over all the postings to evaluate the aggregates. agg_store = {} for entry in misc_utils.filter_type(filt_entries, data.Transaction): context.entry = entry for posting in entry.postings: context.posting = posting if c_where is None or c_where(context): # Compute the balance. if uses_balance: context.balance.add_position(posting) # Compute the non-aggregate expressions. row_key = tuple(c_expr(context) for c_expr in c_nonaggregate_exprs) # Get an appropriate store for the unique key of this row. try: store = agg_store[row_key] except KeyError: # This is a row; create a new store. store = allocator.create_store() for c_expr in c_aggregate_exprs: c_expr.initialize(store) agg_store[row_key] = store # Update the aggregate expressions. for c_expr in c_aggregate_exprs: c_expr.update(store, context) # Iterate over all the aggregations. for key, store in agg_store.items(): key_iter = iter(key) values = [] # Finalize the store. for c_expr in c_aggregate_exprs: c_expr.finalize(store) context.store = store for index, c_expr in enumerate(c_target_exprs): if index in group_indexes: value = next(key_iter) else: value = c_expr(context) values.append(value) # Skip row if HAVING clause expression is false. if query.having_index is not None: if not values[query.having_index]: continue rows.append(values) # Order results if requested. if order_spec is not None: # Process the order-by clauses grouped by their ordering direction. for reverse, spec in itertools.groupby(reversed(order_spec), key=operator.itemgetter(1)): indexes = reversed([i[0] for i in spec]) # The rows may contain None values: nullitemgetter() # replaces these with a special value that compares # smaller than anything else. rows.sort(key=nullitemgetter(*indexes), reverse=reverse) # Convert results into list of tuples. rows = [tuple(row[i] for i in result_indexes) for row in rows] # Apply distinct. if query.distinct: rows = list(misc_utils.uniquify(rows)) # Apply limit. if query.limit is not None: rows = rows[:query.limit] return result_types, rows
def pad(entries, options_map): """Insert transaction entries for to fulfill a subsequent balance check. Synthesize and insert Transaction entries right after Pad entries in order to fulfill checks in the padded accounts. Returns a new list of entries. Note that this doesn't pad across parent-child relationships, it is a very simple kind of pad. (I have found this to be sufficient in practice, and simpler to implement and understand.) Furthermore, this pads for a single currency only, that is, balance checks are specified only for one currency at a time, and pads will only be inserted for those currencies. Args: entries: A list of directives. options_map: A parser options dict. Returns: A new list of directives, with Pad entries inserte, and a list of new errors produced. """ pad_errors = [] # Find all the pad entries and group them by account. pads = list(misc_utils.filter_type(entries, data.Pad)) pad_dict = misc_utils.groupby(lambda x: x.account, pads) # Partially realize the postings, so we can iterate them by account. by_account = realization.postings_by_account(entries) # A dict of pad -> list of entries to be inserted. new_entries = {id(pad): [] for pad in pads} # Process each account that has a padding group. for account_, pad_list in sorted(pad_dict.items()): # Last encountered / currency active pad entry. active_pad = None # Gather all the postings for the account and its children. postings = [] is_child = account.parent_matcher(account_) for item_account, item_postings in by_account.items(): if is_child(item_account): postings.extend(item_postings) postings.sort(key=data.posting_sortkey) # A set of currencies already padded so far in this account. padded_lots = set() pad_balance = inventory.Inventory() for entry in postings: assert not isinstance(entry, data.Posting) if isinstance(entry, data.TxnPosting): # This is a transaction; update the running balance for this # account. pad_balance.add_position(entry.posting.position) elif isinstance(entry, data.Pad): if entry.account == account_: # Mark this newly encountered pad as active and allow all lots # to be padded heretofore. active_pad = entry padded_lots = set() elif isinstance(entry, data.Balance): check_amount = entry.amount # Compare the current balance amount to the expected one from # the check entry. IMPORTANT: You need to understand that this # does not check a single position, but rather checks that the # total amount for a particular currency (which itself is # distinct from the cost). balance_amount = pad_balance.get_units(check_amount.currency) diff_amount = amount.amount_sub(balance_amount, check_amount) # Use the specified tolerance or automatically infer it. tolerance = balance.get_tolerance(entry, options_map) if abs(diff_amount.number) > tolerance: # The check fails; we need to pad. # Pad only if pad entry is active and we haven't already # padded that lot since it was last encountered. if active_pad and (check_amount.currency not in padded_lots): # Note: we decide that it's an error to try to pad # positions at cost; we check here that all the existing # positions with that currency have no cost. positions = [ pos for pos in pad_balance.get_positions() if pos.lot.currency == check_amount.currency ] for position_ in positions: if position_.lot.cost is not None: pad_errors.append( PadError(entry.meta, ( "Attempt to pad an entry with cost for " "balance: {}".format(pad_balance)), active_pad)) # Thus our padding lot is without cost by default. lot = position.Lot(check_amount.currency, None, None) diff_position = position.Position( lot, check_amount.number - balance_amount.number) # Synthesize a new transaction entry for the difference. narration = ('(Padding inserted for Balance of {} for ' 'difference {})').format( check_amount, diff_position) new_entry = data.Transaction(active_pad.meta.copy(), active_pad.date, flags.FLAG_PADDING, None, narration, None, None, []) new_entry.postings.append( data.Posting(active_pad.account, diff_position, None, None, None)) new_entry.postings.append( data.Posting(active_pad.source_account, -diff_position, None, None, None)) # Save it for later insertion after the active pad. new_entries[id(active_pad)].append(new_entry) # Fixup the running balance. position_, _ = pad_balance.add_position(diff_position) if position_.is_negative_at_cost(): raise ValueError( "Position held at cost goes negative: {}". format(position_)) # Mark this lot as padded. Further checks should not pad this lot. padded_lots.add(check_amount.currency) # Insert the newly created entries right after the pad entries that created them. padded_entries = [] for entry in entries: padded_entries.append(entry) if isinstance(entry, data.Pad): entry_list = new_entries[id(entry)] if entry_list: padded_entries.extend(entry_list) else: # Generate errors on unused pad entries. pad_errors.append( PadError(entry.meta, "Unused Pad entry", entry)) return padded_entries, pad_errors
def execute_query(query, entries, options_map): """Given a compiled select statement, execute the query. Args: query: An instance of a query_compile.Query entries: A list of directives. options_map: A parser's option_map. Returns: A pair of: result_types: A list of (name, data-type) item pairs. result_rows: A list of ResultRow tuples of length and types described by 'result_types'. """ # Figure out the result types that describe what we return. result_types = [(target.name, target.c_expr.dtype) for target in query.c_targets if target.name is not None] # Create a class for each final result. # pylint: disable=invalid-name ResultRow = collections.namedtuple( 'ResultRow', [target.name for target in query.c_targets if target.name is not None]) # Pre-compute lists of the expressions to evaluate. group_indexes = (set(query.group_indexes) if query.group_indexes is not None else query.group_indexes) # Indexes of the columns for result rows and order rows. result_indexes = [ index for index, c_target in enumerate(query.c_targets) if c_target.name ] order_indexes = query.order_indexes # Figure out if we need to compute balance. uses_balance = any( uses_balance_column(c_expr) for c_expr in itertools.chain([c_target.c_expr for c_target in query.c_targets], [query.c_where] if query.c_where else [])) context = create_row_context(entries, options_map) # Filter the entries using the FROM clause. filt_entries = (filter_entries(query.c_from, entries, options_map, context) if query.c_from is not None else entries) # Dispatch between the non-aggregated queries and aggregated queries. c_where = query.c_where schwartz_rows = [] # Precompute a list of expressions to be evaluated. c_target_exprs = [c_target.c_expr for c_target in query.c_targets] if query.group_indexes is None: # This is a non-aggregated query. # Iterate over all the postings once and produce schwartzian rows. for entry in misc_utils.filter_type(filt_entries, data.Transaction): context.entry = entry for posting in entry.postings: context.posting = posting if c_where is None or c_where(context): # Compute the balance. if uses_balance: context.balance.add_position(posting) # Evaluate all the values. values = [c_expr(context) for c_expr in c_target_exprs] # Compute result and sort-key objects. result = ResultRow._make(values[index] for index in result_indexes) sortkey = row_sortkey(order_indexes, values, c_target_exprs) schwartz_rows.append((sortkey, result)) else: # This is an aggregated query. # Precompute lists of non-aggregate and aggregate expressions to # evaluate. For aggregate targets, we hunt down the aggregate # sub-expressions to evaluate, to avoid recursion during iteration. c_nonaggregate_exprs = [] c_aggregate_exprs = [] for index, c_expr in enumerate(c_target_exprs): if index in group_indexes: c_nonaggregate_exprs.append(c_expr) else: _, aggregate_exprs = query_compile.get_columns_and_aggregates( c_expr) c_aggregate_exprs.extend(aggregate_exprs) # Note: it is possible that there are no aggregates to compute here. You could # have all columns be non-aggregates and group-by the entire list of columns. # Pre-allocate handles in aggregation nodes. allocator = Allocator() for c_expr in c_aggregate_exprs: c_expr.allocate(allocator) # Iterate over all the postings to evaluate the aggregates. agg_store = {} for entry in misc_utils.filter_type(filt_entries, data.Transaction): context.entry = entry for posting in entry.postings: context.posting = posting if c_where is None or c_where(context): # Compute the balance. if uses_balance: context.balance.add_position(posting) # Compute the non-aggregate expressions. row_key = tuple( c_expr(context) for c_expr in c_nonaggregate_exprs) # Get an appropriate store for the unique key of this row. try: store = agg_store[row_key] except KeyError: # This is a row; create a new store. store = allocator.create_store() for c_expr in c_aggregate_exprs: c_expr.initialize(store) agg_store[row_key] = store # Update the aggregate expressions. for c_expr in c_aggregate_exprs: c_expr.update(store, context) # Iterate over all the aggregations to produce the schwartzian rows. for key, store in agg_store.items(): key_iter = iter(key) values = [] # Finalize the store. for c_expr in c_aggregate_exprs: c_expr.finalize(store) context.store = store for index, c_expr in enumerate(c_target_exprs): if index in group_indexes: value = next(key_iter) else: value = c_expr(context) values.append(value) # Compute result and sort-key objects. result = ResultRow._make(values[index] for index in result_indexes) sortkey = row_sortkey(order_indexes, values, c_target_exprs) schwartz_rows.append((sortkey, result)) # Order results if requested. if order_indexes is not None: schwartz_rows.sort(key=operator.itemgetter(0), reverse=(query.ordering == 'DESC')) # Extract final results, in sorted order at this point. result_rows = [x[1] for x in schwartz_rows] # Apply distinct. if query.distinct: result_rows = list(misc_utils.uniquify(result_rows)) # Apply limit. if query.limit is not None: result_rows = result_rows[:query.limit] # Flatten inventories if requested. if query.flatten: result_types, result_rows = flatten_results(result_types, result_rows) return (result_types, result_rows)
def postings(self): """All postings contained in some transaction.""" return [posting for entry in filter_type(self.entries, Transaction) for posting in entry.postings]