Beispiel #1
0
def compare_entries(entries1, entries2):
    """Compare two lists of entries. This is used for testing.

    The entries are compared with disregard for their file location.

    Args:
      entries1: A list of directives of any type.
      entries2: Another list of directives of any type.
    Returns:
      A tuple of (success, not_found1, not_found2), where the fields are:
        success: A booelan, true if all the values are equal.
        missing1: A list of directives from 'entries1' not found in
          'entries2'.
        missing2: A list of directives from 'entries2' not found in
          'entries1'.
    Raises:
      ValueError: If a duplicate entry is found.
    """
    hashes1, errors1 = hash_entries(entries1)
    hashes2, errors2 = hash_entries(entries2)
    keys1 = set(hashes1.keys())
    keys2 = set(hashes2.keys())

    if errors1 or errors2:
        error = (errors1 + errors2)[0]
        raise ValueError(str(error))

    same = keys1 == keys2
    missing1 = data.sorted([hashes1[key] for key in keys1 - keys2])
    missing2 = data.sorted([hashes2[key] for key in keys2 - keys1])
    return (same, missing1, missing2)
Beispiel #2
0
def compare_entries(entries1: List[Directive], entries2: List[Directive]):
    hashes1 = {
        hash_entry(entry, exclude_meta=True): entry for entry in entries1}
    hashes2 = {
        hash_entry(entry, exclude_meta=True): entry for entry in entries2}

    keys1 = Counter(hash_entry(entry, exclude_meta=True) for entry in entries1)
    keys2 = Counter(hash_entry(entry, exclude_meta=True) for entry in entries2)

    same = keys1 == keys2
    missing1 = data.sorted([hashes1[key] for key in keys1 - keys2])
    missing2 = data.sorted([hashes2[key] for key in keys2 - keys1])
    return same, missing1, missing2
Beispiel #3
0
def do_missing_open(filename, args):
    """Print out Open directives that are missing for the given input file.

    This can be useful during demos in order to quickly generate all the
    required Open directives without having to type them manually.

    Args:
      filename: A string, which consists in the filename.
      args: A tuple of the rest of arguments. We're expecting the first argument
        to be an integer as a string.
    """
    from beancount.parser import printer
    from beancount.core import data
    from beancount.core import getters
    from beancount import loader

    entries, errors, options_map = loader.load_file(filename)

    # Get accounts usage and open directives.
    first_use_map, _ = getters.get_accounts_use_map(entries)
    open_close_map = getters.get_account_open_close(entries)

    new_entries = []
    for account, first_use_date in first_use_map.items():
        if account not in open_close_map:
            new_entries.append(
                data.Open(data.new_metadata(filename, 0), first_use_date,
                          account, None, None))

    dcontext = options_map['dcontext']
    printer.print_entries(data.sorted(new_entries), dcontext)
Beispiel #4
0
def write_transactions_by_type(output_signatures: str,
                               account_data: AccountData,
                               dcontext: display_context.DisplayContext):
    """Write files of transactions by signature, for debugging."""

    # Build signature map.
    signature_map = collections.defaultdict(list)
    for accdata in account_data:
        for entry in accdata.transactions:
            signature_map[entry.meta['signature']].append(entry)

    # Render them to files, for debugging.
    os.makedirs(output_signatures, exist_ok=True)
    for sig, sigentries in signature_map.items():
        sigentries = data.sorted(sigentries)

        filename = "{}.org".format(sig)
        with open(path.join(output_signatures, filename), "w") as catfile:
            fprint = partial(print, file=catfile)
            fprint(
                ";; -*- mode: beancount; coding: utf-8; fill-column: 400 -*-")

            description = get_description(sig) or "?"
            fprint("description: {}".format(description))
            fprint("number_entries: {}".format(len(sigentries)))
            fprint()

            epr = printer.EntryPrinter(dcontext=dcontext,
                                       stringify_invalid_types=True)
            for entry in sigentries:
                fprint(epr(entry))
                fprint()
Beispiel #5
0
def extract(file, account_name, flag, currency):
    ofx = OfxParser.parse(strio(file.contents()))
    account = ofx.account
    statement = account.statement
    assert statement.currency.lower() == currency.lower(), (
        statement.currency + " != " + currency
    )
    ledger = []
    # create transactions
    for transaction in statement.transactions:
        units = Amount(transaction.amount, currency)
        posting = data.Posting(account_name, units, None, None, None, None)
        ref = data.new_metadata(file.name, 0)
        entry = data.Transaction(
            ref,
            transaction.date.date(),
            flag,
            titlecase(transaction.payee),
            transaction.memo,
            data.EMPTY_SET,
            data.EMPTY_SET,
            [posting],
        )
        ledger.append(entry)
    ledger = data.sorted(ledger)
    # make balance
    b = balance(file, account_name, currency, statement, ledger)
    if b != None:
        ledger.append(b)
    return ledger
Beispiel #6
0
def excludes_entries(subset_entries, entries):
    """Check that a list of entries does not appear in another list.

    Args:
      subset_entries: The set of entries to look for in 'entries'.
      entries: The larger list of entries that should not include 'subset_entries'.
    Returns:
      A boolean and a list of entries that are not supposed to appear.
    Raises:
      ValueError: If a duplicate entry is found.
    """
    subset_hashes, subset_errors = hash_entries(subset_entries,
                                                exclude_meta=True)
    subset_keys = set(subset_hashes.keys())
    hashes, errors = hash_entries(entries, exclude_meta=True)
    keys = set(hashes.keys())

    if subset_errors or errors:
        error = (subset_errors + errors)[0]
        raise ValueError(str(error))

    intersection = keys.intersection(subset_keys)
    excludes = not bool(intersection)
    extra = data.sorted([subset_hashes[key] for key in intersection])
    return (excludes, extra)
Beispiel #7
0
def export_v2_data(filename: str, output_filename: str,
                   num_directives: Optional[int]):
    if output_filename.endswith(".pbtxt"):
        output = open(output_filename, 'w')
        writer = None

        def write(message):
            print(message, file=output)
    else:
        output = open(output_filename, 'wb')
        writer = riegeli.RecordWriter(output)
        write = writer.write_message

    #entries, errors, options_map = loader.load_file(filename)
    entries, errors, options_map = parser.parse_file(filename)
    entries = data.sorted(entries)

    if num_directives:
        entries = itertools.islice(entries, num_directives)
    for entry in entries:
        if isinstance(entry, data.Transaction):
            pbdir = convert_Transaction(entry)
        elif isinstance(entry, data.Open):
            pbdir = convert_Open(entry)
        elif isinstance(entry, data.Close):
            pbdir = convert_Close(entry)
        elif isinstance(entry, data.Commodity):
            pbdir = convert_Commodity(entry)
        elif isinstance(entry, data.Event):
            pbdir = convert_Event(entry)
        elif isinstance(entry, data.Note):
            pbdir = convert_Note(entry)
        elif isinstance(entry, data.Query):
            pbdir = convert_Query(entry)
        elif isinstance(entry, data.Price):
            pbdir = convert_Price(entry)
        elif isinstance(entry, data.Balance):
            pbdir = convert_Balance(entry)
        elif isinstance(entry, data.Pad):
            pbdir = convert_Pad(entry)
        else:
            pbdir = None

        if pbdir is not None:
            write("#---")
            write("# {}".format(pbdir.location.lineno))
            write("#")
            write(pbdir)
            write("")

        if 0:
            print('-' * 80)
            printer.print_entry(entry)
            print(txn)
            print()

    if hasattr(writer, "close"):
        writer.close()
    output.close()
Beispiel #8
0
def extract(soup, filename, acctid_regexp, account, flag, balance_type):
    """Extract transactions from an OFX file.

    Args:
      soup: A BeautifulSoup root node.
      acctid_regexp: A regular expression string matching the account we're interested in.
      account: An account string onto which to post the amounts found in the file.
      flag: A single-character string.
      balance_type: An enum of type BalanceType.
    Returns:
      A sorted list of entries.
    """
    new_entries = []
    counter = itertools.count()
    for acctid, currency, transactions, balance in find_statement_transactions(
            soup):
        if not re.match(acctid_regexp, acctid):
            continue

        # Create Transaction directives.
        stmt_entries = []
        for stmttrn in transactions:
            entry = build_transaction(stmttrn, flag, account, currency)
            entry = entry._replace(
                meta=data.new_metadata(filename, next(counter)))
            stmt_entries.append(entry)
        stmt_entries = data.sorted(stmt_entries)
        new_entries.extend(stmt_entries)

        # Create a Balance directive.
        if balance and balance_type is not BalanceType.NONE:
            date, number = balance
            if balance_type is BalanceType.LAST and stmt_entries:
                date = stmt_entries[-1].date

            # The Balance assertion occurs at the beginning of the date, so move
            # it to the following day.
            date += datetime.timedelta(days=1)

            meta = data.new_metadata(filename, next(counter))
            balance_entry = data.Balance(meta, date, account,
                                         amount.Amount(number, currency), None,
                                         None)
            new_entries.append(balance_entry)

    return data.sorted(new_entries)
Beispiel #9
0
def main():
    argparser = argparse.ArgumentParser(description=__doc__)
    argparser.add_argument('infile',
                           type=argparse.FileType('r'),
                           help='Filename or "-" for stdin')
    args = argparser.parse_args()

    # Read input from stdin or a given filename.
    entries, errors, options = loader.load_string(args.infile.read())

    # Print out sorted entries.
    for entry in data.sorted(entries):
        printer.print_entry(entry)
Beispiel #10
0
    def extract(self, file, existing_entries):
        csvfile = open(file=file.name, encoding='windows_1252')
        reader = csv.reader(csvfile, delimiter=';')
        meta = data.new_metadata(file.name, 0)
        entries = []

        for row in reader:

            try:
                book_date, text, credit, debit, val_date, balance = tuple(row)
                book_date = datetime.strptime(book_date, '%Y-%m-%d').date()
                if credit:
                    amount = data.Amount(Decimal(credit), self.currency)
                elif debit:
                    amount = data.Amount(Decimal(debit), self.currency)
                else:
                    amount = None
                if balance:
                    balance = data.Amount(Decimal(balance), self.currency)
                else:
                    balance = None
            except Exception as e:
                logging.debug(e)
            else:
                logging.debug((book_date, text, amount, val_date, balance))
                posting = data.Posting(self.account, amount, None, None, None,
                                       None)
                entry = data.Transaction(meta, book_date, '*', '', text,
                                         data.EMPTY_SET, data.EMPTY_SET,
                                         [posting])
                entries.append(entry)
                # only add balance on SOM
                book_date = book_date + timedelta(days=1)
                if balance and book_date.day == 1:
                    entry = data.Balance(meta, book_date, self.account,
                                         balance, None, None)
                    entries.append(entry)

        csvfile.close()
        entries = data.sorted(entries)
        return entries
Beispiel #11
0
def missing_open(filename):
    """Print Open directives missing in FILENAME.

    This can be useful during demos in order to quickly generate all the
    required Open directives without having to type them manually.

    """
    entries, errors, options_map = loader.load_file(filename)

    # Get accounts usage and open directives.
    first_use_map, _ = getters.get_accounts_use_map(entries)
    open_close_map = getters.get_account_open_close(entries)

    new_entries = []
    for account, first_use_date in first_use_map.items():
        if account not in open_close_map:
            new_entries.append(
                data.Open(data.new_metadata(filename, 0), first_use_date, account,
                          None, None))

    dcontext = options_map['dcontext']
    printer.print_entries(data.sorted(new_entries), dcontext)
Beispiel #12
0
def includes_entries(subset_entries, entries):
    """Check if a list of entries is included in another list.

    Args:
      subset_entries: The set of entries to look for in 'entries'.
      entries: The larger list of entries that could include 'subset_entries'.
    Returns:
      A boolean and a list of missing entries.
    Raises:
      ValueError: If a duplicate entry is found.
    """
    subset_hashes, subset_errors = hash_entries(subset_entries)
    subset_keys = set(subset_hashes.keys())
    hashes, errors = hash_entries(entries)
    keys = set(hashes.keys())

    if subset_errors or errors:
        error = (subset_errors + errors)[0]
        raise ValueError(str(error))

    includes = subset_keys.issubset(keys)
    missing = data.sorted([subset_hashes[key] for key in subset_keys - keys])
    return (includes, missing)
Beispiel #13
0
 def test_sort(self):
     entries = self.create_sort_data()
     sorted_entries = data.sorted(entries)
     self.check_sorted(sorted_entries)
Beispiel #14
0
 def print(self, output):
     printer.print_entries(list(
         chain(self.entries, self.pad_balances,
               bc.sorted(self.transactions))),
                           file=output)
Beispiel #15
0
def write_returns_html(
        dirname: str,
        pricer: returnslib.Pricer,
        account_data: List[AccountData],
        title: str,
        end_date: Date,
        target_currency: Optional[Currency] = None) -> subprocess.Popen:
    """Write out returns report to a directory with files in it."""

    logging.info("Writing returns dir for %s: %s", title, dirname)
    os.makedirs(dirname, exist_ok=True)
    with open(path.join(dirname, "index.html"), "w") as indexfile:
        fprint = partial(print, file=indexfile)
        fprint(RETURNS_TEMPLATE_PRE.format(style=STYLE, title=title))

        if not target_currency:
            cost_currencies = set(r.cost_currency for r in account_data)
            target_currency = cost_currencies.pop()
            assert not cost_currencies, (
                "Incompatible cost currencies {} for accounts {}".format(
                    cost_currencies,
                    ",".join([r.account for r in account_data])))

        # TOOD(blais): Prices should be plot separately, by currency.
        # fprint("<h2>Prices</h2>")
        # pairs = set((r.currency, r.cost_currency) for r in account_data)
        # plots = plot_prices(dirname, pricer.price_map, pairs)
        # for _, filename in sorted(plots.items()):
        #     fprint('<img src={} style="width: 100%"/>'.format(filename))

        fprint("<h2>Cash Flows</h2>")

        cash_flows = returnslib.truncate_and_merge_cash_flows(
            pricer, account_data, None, end_date)
        returns = returnslib.compute_returns(cash_flows, pricer,
                                             target_currency, end_date)

        transactions = data.sorted(
            [txn for ad in account_data for txn in ad.transactions])

        # Note: This is where the vast majority of the time is spent.
        plots = plot_flows(dirname, pricer.price_map, cash_flows, transactions,
                           returns.total)
        fprint('<img src={} style="width: 100%"/>'.format(plots["flows"]))
        fprint('<img src={} style="width: 100%"/>'.format(plots["cumvalue"]))

        fprint("<h2>Returns</h2>")
        fprint(
            render_table(Table(["Total", "Ex-Div", "Div"],
                               [[returns.total, returns.exdiv, returns.div]]),
                         floatfmt="{:.2%}"))

        # Compute table of returns over intervals.
        table = compute_returns_table(pricer, target_currency, account_data,
                                      get_calendar_intervals(TODAY))
        fprint("<p>", render_table(table, floatfmt="{:.1%}", classes=["full"]),
               "</p>")

        table = compute_returns_table(pricer, target_currency, account_data,
                                      get_cumulative_intervals(TODAY))
        fprint("<p>", render_table(table, floatfmt="{:.1%}", classes=["full"]),
               "</p>")

        fprint('<h2 class="new-page">Accounts</h2>')
        fprint("<p>Cost Currency: {}</p>".format(target_currency))
        accounts_df = get_accounts_table(account_data)
        fprint(accounts_df.to_html())

        fprint('<h2 class="new-page">Cash Flows</h2>')
        df = investments.cash_flows_to_table(cash_flows)
        fprint(df.to_html())

        fprint(RETURNS_TEMPLATE_POST)

    return indexfile.name
Beispiel #16
0
def process_args():
    """Process the arguments. This also initializes the logging module.

    Returns:
      A tuple of:
        args: The argparse receiver of command-line arguments.
        jobs: A list of DatedPrice job objects.
        entries: A list of all the parsed entries.
    """
    parser = version.ArgumentParser(
        description=beancount.prices.__doc__.splitlines()[0])

    # Input sources or filenames.
    parser.add_argument(
        'sources',
        nargs='+',
        help=('A list of filenames (or source "module/symbol", if -e is '
              'specified) from which to create a list of jobs.'))

    parser.add_argument(
        '-e',
        '--expressions',
        '--expression',
        action='store_true',
        help=('Interpret the arguments as "module/symbol" source strings.'))

    # Regular options.
    parser.add_argument(
        '-v',
        '--verbose',
        action='count',
        help=("Print out progress log. Specify twice for debugging info."))

    parser.add_argument(
        '-d',
        '--date',
        action='store',
        type=date_utils.parse_date_liberally,
        help=("Specify the date for which to fetch the prices."))

    parser.add_argument(
        '-i',
        '--inactive',
        action='store_true',
        help=
        ("Select all commodities from input files, not just the ones active on the date"
         ))

    parser.add_argument(
        '-u',
        '--undeclared',
        action='store',
        help=
        ("Include commodities viewed in the file even without a "
         "corresponding Commodity directive, from this default source. "
         "The currency name itself is used as the lookup symbol in this default source."
         ))

    parser.add_argument(
        '-c',
        '--clobber',
        action='store_true',
        help=
        ("Do not skip prices which are already present in input files; fetch them anyway."
         ))

    parser.add_argument(
        '-a',
        '--all',
        action='store_true',
        help=("A shorthand for --inactive, --undeclared, --clobber."))

    parser.add_argument(
        '-s',
        '--swap-inverted',
        action='store_true',
        help=
        ("For inverted sources, swap currencies instead of inverting the rate. "
         "For example, if fetching the rate for CAD from 'USD:google/^CURRENCY:USDCAD' "
         "results in 1.25, by default we would output \"price CAD  0.8000 USD\". "
         "Using this option we would instead output \" price USD   1.2500 CAD\"."
         ))

    parser.add_argument(
        '-n',
        '--dry-run',
        action='store_true',
        help=
        ("Don't actually fetch the prices, just print the list of the ones to be fetched."
         ))

    # Caching options.
    cache_group = parser.add_argument_group('cache')
    cache_filename = path.join(tempfile.gettempdir(),
                               "{}.cache".format(path.basename(sys.argv[0])))
    cache_group.add_argument(
        '--cache',
        dest='cache_filename',
        action='store',
        default=cache_filename,
        help="Enable the cache and with the given cache name.")
    cache_group.add_argument('--no-cache',
                             dest='cache_filename',
                             action='store_const',
                             const=None,
                             help="Disable the price cache.")

    cache_group.add_argument('--clear-cache',
                             action='store_true',
                             help="Clear the cache prior to startup")

    args = parser.parse_args()

    verbose_levels = {
        None: logging.WARN,
        0: logging.WARN,
        1: logging.INFO,
        2: logging.DEBUG
    }
    logging.basicConfig(level=verbose_levels[args.verbose],
                        format='%(levelname)-8s: %(message)s')

    if args.all:
        args.inactive = args.clobber = True
        args.undeclared = DEFAULT_SOURCE

    # Setup for processing.
    setup_cache(args.cache_filename, args.clear_cache)

    # Get the list of DatedPrice jobs to get from the arguments.
    logging.info("Processing at date: %s", args.date or datetime.date.today())
    jobs = []
    all_entries = []
    dcontext = None
    if args.expressions:
        # Interpret the arguments as price sources.
        for source_str in args.sources:
            psources = []
            try:
                psource_map = find_prices.parse_source_map(source_str)
            except ValueError:
                extra = "; did you provide a filename?" if path.exists(
                    source_str) else ''
                msg = ('Invalid source "{{}}"{}. '.format(extra) +
                       'Supported format is "CCY:module/SYMBOL"')
                parser.error(msg.format(source_str))
            else:
                for currency, psources in psource_map.items():
                    jobs.append(
                        find_prices.DatedPrice(psources[0].symbol, currency,
                                               args.date, psources))
    else:
        # Interpret the arguments as Beancount input filenames.
        for filename in args.sources:
            if not path.exists(filename) or not path.isfile(filename):
                parser.error('File does not exist: "{}"; '
                             'did you mean to use -e?'.format(filename))
                continue
            logging.info('Loading "%s"', filename)
            entries, errors, options_map = loader.load_file(
                filename, log_errors=sys.stderr)
            if dcontext is None:
                dcontext = options_map['dcontext']
            jobs.extend(
                find_prices.get_price_jobs_at_date(entries, args.date,
                                                   args.inactive,
                                                   args.undeclared))
            all_entries.extend(entries)

    return args, jobs, data.sorted(all_entries), dcontext
def main():
    argparser = argparse.ArgumentParser()
    ameritrade.add_args(argparser)

    argparser.add_argument('-i',
                           '--ignore-errors',
                           dest='raise_error',
                           action='store_false',
                           default=True,
                           help="Raise an error on unhandled messages")
    argparser.add_argument(
        '-J',
        '--debug-file',
        '--json',
        action='store',
        help="Debug filename where to strore al the raw JSON")
    argparser.add_argument(
        '-j',
        '--debug-transaction',
        action='store',
        type=int,
        help="Process a single transaction and print debugging data about it.")

    argparser.add_argument('-e',
                           '--end-date',
                           action='store',
                           help="Period of end date minus one year.")

    argparser.add_argument('-B',
                           '--no-booking',
                           dest='booking',
                           action='store_false',
                           default=True,
                           help="Do booking to resolve lots.")

    argparser.add_argument('-l',
                           '--ledger',
                           action='store',
                           help=("Beancount ledger to remove already imported "
                                 "transactions (optional)."))

    argparser.add_argument(
        '-g',
        '--group-by-underlying',
        action='store_true',
        help=("Group the transaction output by corresponding "
              "underlying. This is great for options."))

    args = argparser.parse_args()

    # Open a connection and figure out the main account.
    api = ameritrade.open(ameritrade.config_from_args(args))
    accountId = utils.GetMainAccount(api)
    positions = utils.GetPositions(api, accountId)

    # Fetch transactions.
    # Note that the following arguments are also honored:
    #   endDate=datetime.date.today().isoformat())
    #   startDate='2014-01-01',
    #   endDate='2015-01-01')
    if args.end_date:
        end_date = parser.parse(args.end_date).date()
        start_date = end_date - datetime.timedelta(days=364)
        start = start_date.isoformat()
        end = end_date.isoformat()
    else:
        start = end = None
    txns = api.GetTransactions(accountId=accountId,
                               startDate=start,
                               endDate=end)
    if isinstance(txns, dict):
        pprint(txns, sys.stderr)
        return
    txns.reverse()

    # Optionally write out the raw original content downloaded to a file.
    if args.debug_file:
        with open(args.debug, 'w') as ofile:
            ofile.write(pformat(txns))

    # Process each of the transactions.
    entries = []
    balances = collections.defaultdict(inventory.Inventory)
    commodities = {}
    for txn in txns:
        if args.debug_transaction and txn[
                'transactionId'] != args.debug_transaction:
            continue
        else:
            pprint.pprint(txn)

        # print('{:30} {}'.format(txn['type'], txn['description'])); continue
        dispatch_entries = RunDispatch(txn, balances, commodities,
                                       args.raise_error)
        if dispatch_entries:
            entries.extend(dispatch_entries)

            # Update a balance account of just the units.
            #
            # This is only here so that the options removal can figure out which
            # side is the reduction side and what sign to use on the position
            # change. Ideally the API would provide a side indication and we
            # wouldn't have to maintain any state at alll. {492fa5292636}
            for entry in data.filter_txns(dispatch_entries):
                for posting in entry.postings:
                    balance = balances[posting.account]
                    if posting.units is not None:
                        balance.add_amount(posting.units)

    # Add a final balance entry.
    balance_entry = CreateBalance(api, accountId)
    if balance_entry:
        entries.append(balance_entry)

    if args.booking:
        # Book the entries.
        entries, balance_errors = booking.book(entries,
                                               OPTIONS_DEFAULTS.copy())
        if balance_errors:
            printer.print_errors(balance_errors)

        # Remove dates on reductions when they have no prices. This is an
        # artifact of not being able to pass in prior balance state to the
        # booking code, which we will fix in v3.
        entries = RemoveDateReductions(entries)

        # Match up the trades we can in this subset of the history and pair them up
        # with a common random id.
        entries, balances = MatchTrades(entries)

        # Add zero prices for expired options for which we still have non-zero
        # positions.
        entries.extend(GetExpiredOptionsPrices(positions, balances))

    # If a Beancount ledger has been specified, open it, read it in, and remove
    # all the transactions up to the latest one with the transaction id (as a
    # link) that's present in the ledger.
    if args.ledger:
        ledger_entries, _, __ = loader.load_file(args.ledger)

        # Find the date of the last transaction in the ledger with a TD
        # transaction id, and the full set of links to remove.
        links = set()
        last_date = None
        for entry in data.filter_txns(ledger_entries):
            for link in (entry.links or {}):
                if re.match(r"td-\d{9,}", link):
                    links.add(link)
                    last_date = entry.date

        # Remove all the transactions already present in the ledger.
        #
        # Also remove all the non-transactions with a date before that of the
        # last linked one that was found. This allows us to remove Price and
        # Commodity directives. (In v3, when links are available on every
        # directive, we can just use the links.)
        entries = [
            entry for entry in entries
            if ((isinstance(entry, data.Transaction) and not entry.links
                 & links) or (not isinstance(entry, data.Transaction)
                              and entry.date >= last_date))
        ]

    if args.group_by_underlying:
        # Group the transactions by their underlying, with org-mode separators.
        groups = GroupByUnderlying(entries)
        for (has_option, currency), group_entries in sorted(groups.items()):
            header = currency or "General"
            if has_option:
                header = "Options: {}".format(header)
            print("** {}".format(header))
            print()
            printer.print_entries(data.sorted(group_entries), file=sys.stdout)
            print()
            print()
    else:
        # Render all the entries chronologically (the default).
        sentries = SortCommodityFirst(entries)
        printer.print_entries(sentries, file=sys.stdout)