示例#1
0
def main():
    commands_doc = ('Available Commands:\n' +
                    '\n'.join('  {:24}: {}'.format(*x)
                              for x in get_commands()))
    argparser = version.ArgumentParser(
        description=__doc__,
        formatter_class=argparse.RawTextHelpFormatter,
        epilog=commands_doc)
    argparser.add_argument('command',
                           action='store',
                           help="The command to run.")
    argparser.add_argument('filename',
                           nargs='?',
                           help='Beancount input filename.')
    argparser.add_argument('rest', nargs='*', help='All remaining arguments.')
    opts = argparser.parse_args()

    # Run the command.
    try:
        command_name = "do_{}".format(opts.command.replace('-', '_'))
        function = globals()[command_name]
    except KeyError:
        argparser.error("Invalid command name: '{}'".format(opts.command))
    else:
        function(opts.filename, opts.rest)
示例#2
0
def main():
    """Main web service runner. This runs the event loop and blocks indefinitely."""

    argparser = version.ArgumentParser(description=__doc__.strip())
    add_web_arguments(argparser)
    args = argparser.parse_args()

    run_app(args)
    def test_version(self):
        parser = version.ArgumentParser()
        self.assertIsInstance(parser, argparse.ArgumentParser)

        with test_utils.capture('stdout') as stdout, self.assertRaises(
                SystemExit):
            parser.parse_args(['--version'])
        self.assertRegex(stdout.getvalue(), r'Beancount \d+\.\d+\.\d+')
示例#4
0
def main():
    """Extract trades from metadata-annotated postings and report on them.
    """
    logging.basicConfig(level=logging.INFO,
                        format='%(levelname)-8s: %(message)s')
    parser = version.ArgumentParser(description=__doc__.strip())
    parser.add_argument('filename', help='Beancount input filename')

    oparser = parser.add_argument_group('Outputs')
    oparser.add_argument(
        '-o',
        '--output',
        action='store',
        help="Filename to output results to (default goes to stdout)")
    oparser.add_argument('-f',
                         '--format',
                         default='text',
                         choices=['text', 'csv'],
                         help="Output format to render to (text, csv)")

    args = parser.parse_args()

    # Load the input file.
    entries, errors, options_map = loader.load_file(args.filename)

    # Get the list of trades.
    trades = extract_trades(entries)

    # Produce a table of all the trades.
    columns = ('units currency cost_currency '
               'buy_date buy_price sell_date sell_price pnl').split()
    header = [
        'Units', 'Currency', 'Cost Currency', 'Buy Date', 'Buy Price',
        'Sell Date', 'Sell Price', 'P/L'
    ]
    body = []
    for aug, red in trades:
        units = -red.posting.units.number
        buy_price = aug.posting.price.number
        sell_price = red.posting.price.number
        pnl = (units * (sell_price - buy_price)).quantize(buy_price)
        body.append([
            -red.posting.units.number, red.posting.units.currency,
            red.posting.price.currency,
            aug.txn.date.isoformat(), buy_price,
            red.txn.date.isoformat(), sell_price, pnl
        ])
    trades_table = table.Table(columns, header, body)

    # Render the table as text or CSV.
    outfile = open(args.output, 'w') if args.output else sys.stdout
    table.render_table(trades_table, outfile, args.format)
示例#5
0
def main():
    logging.basicConfig(level=logging.INFO, format='%(levelname)-8s: %(message)s')
    parser = version.ArgumentParser(description=__doc__.strip())
    parser.add_argument('filename', help='Beancount input filename')
    args = parser.parse_args()

    entries, _, options_map = loader.load_file(args.filename,
                                               log_errors=logging.error)

    report = create_report(entries, options_map)

    text = format_xhtml_report(report, options_map)
    sys.stdout.write(text)
示例#6
0
def main():
    parser = version.ArgumentParser(description=__doc__.strip())

    parser.add_argument('filename', nargs='?', help='Beancount filename')

    parser.add_argument('-o',
                        '--output',
                        action='store',
                        help="Output file (stdout if not specified)")

    parser.add_argument(
        '-w',
        '--prefix-width',
        action='store',
        type=int,
        help=("Use this prefix width instead of determining an optimal "
              "value automatically"))

    parser.add_argument(
        '-W',
        '--num-width',
        action='store',
        type=int,
        help=("Use this width to render numbers instead of determining "
              "an optimal value"))

    parser.add_argument('-c',
                        '--currency-column',
                        action='store',
                        type=int,
                        help=("Align currencies in this column."))

    opts = parser.parse_args()

    # Read the original contents.
    file = open(opts.filename) if opts.filename not in (None,
                                                        '-') else sys.stdin
    contents = file.read()
    file.close()

    # Align the contents.
    formatted_contents = align_beancount(contents, opts.prefix_width,
                                         opts.num_width, opts.currency_column)

    # Make sure not to open the output file until we've passed out sanity
    # checks. We want to allow overwriting the input file, but want to avoid
    # losing it in case of errors!
    outfile = open(opts.output, 'w') if opts.output else sys.stdout
    outfile.write(formatted_contents)

    return 0
示例#7
0
    def scrape(self, filename, extra_args=None):
        # Create a set of valid arguments to run the app.
        abs_filename = path.join(test_utils.find_repository_root(__file__),
                                 'examples', filename)

        argparser = version.ArgumentParser()
        group = web.add_web_arguments(argparser)
        argv = [
            abs_filename, '--quiet', '--port',
            str(test_utils.get_test_port())
        ]
        if extra_args:
            argv.extend(extra_args)
        webargs = argparser.parse_args(argv)

        web.scrape_webapp(webargs, self.check_page_okay, self.ignore_regexp)
示例#8
0
    def from_args(cls, argv=None, **kwds):
        """A convenience method used to create an instance from arguments.

        This creates an instance of the report with default arguments. This is a
        convenience that may be used for tests. Our actual script uses subparsers
        and invokes add_args() and creates an appropriate instance directly.

        Args:
          argv: A list of strings, command-line arguments to use to construct the report.
          kwds: A dict of other keyword arguments to pass to the report's constructor.
        Returns:
          A new instance of the report.
        """
        parser = version.ArgumentParser()
        cls.add_args(parser)
        return cls(parser.parse_args(argv or []), parser, **kwds)
def main():
    parser = version.ArgumentParser(description=__doc__)

    parser.add_argument('filename', help='Beancount input filename.')

    parser.add_argument('-v',
                        '--verbose',
                        action='store_true',
                        help='Print timings.')

    # Note: These are useful during development. We need to devise a global
    # mechanism that will work from all the invocation programs, embedded in the
    # loader.
    parser.add_argument('-C',
                        '--no-cache',
                        action='store_false',
                        dest='use_cache',
                        default=True,
                        help='Disable the cache from the command-line.')
    parser.add_argument('--cache-filename',
                        action='store',
                        help='Override the name of the cache')

    opts = parser.parse_args()

    if opts.verbose:
        logging.basicConfig(level=logging.INFO,
                            format='%(levelname)-8s: %(message)s')

    # Override loader caching setup if disabled or if the filename is
    # overridden.
    if not opts.use_cache or opts.cache_filename:
        loader.initialize(opts.use_cache, opts.cache_filename)

    with misc_utils.log_time('beancount.loader (total)', logging.info):
        # Load up the file, print errors, checking and validation are invoked
        # automatically.
        entries, errors, _ = loader.load_file(
            opts.filename,
            log_timings=logging.info,
            log_errors=sys.stderr,
            # Force slow and hardcore validations, just for check.
            extra_validations=validation.HARDCORE_VALIDATIONS)

    # Exit with an error code if there were any errors, so this can be used in a
    # shell conditional.
    return 1 if errors else 0
示例#10
0
def main():
    logging.basicConfig(level=logging.INFO,
                        format='%(levelname)-8s: %(message)s')
    parser = version.ArgumentParser(description=__doc__.strip())
    parser.add_argument('filename', help='Beancount filename')
    parser.add_argument('output_directory',
                        help='Output directory for the tutorial files')
    args = parser.parse_args()
    rootdir = test_utils.find_repository_root(__file__)

    for report_name, command_template in COMMANDS:
        logging.info('Generating %s: %s', report_name, command_template)
        rootdir = test_utils.find_repository_root(__file__)
        output_filename = path.join(args.output_directory,
                                    '{}.output'.format(report_name))
        errors_filename = path.join(args.output_directory,
                                    '{}.errors'.format(report_name))
        command = command_template.format(args.filename)
        command_args = command.split()[1:]
        with test_utils.capture('stdout') as stdout:
            with test_utils.capture('stderr') as stderr:
                try:
                    returncode = report.main(command_args)
                except SystemExit as exc:
                    returncode = exc.code
                if returncode != 0:
                    with open(errors_filename) as efile:
                        errors = efile.read()
                    raise RuntimeError(
                        "Error running '{}': exit with {}; errors: {}".format(
                            command, returncode, errors))

        # NOTE(blais): This could be removed by improving the capture() function.
        output_filename = path.join(args.output_directory,
                                    '{}.output'.format(report_name))
        with open(output_filename, 'w') as outfile:
            outfile.write(stdout.getvalue())

        if stderr.getvalue():
            errors_filename = path.join(args.output_directory,
                                        '{}.errors'.format(report_name))
            with open(errors_filename, 'w') as outfile:
                outfile.write(stderr.getvalue())

    return 0
示例#11
0
def get_list_report_string(only_report=None):
    """Return a formatted string for the list of supported reports.

    Args:
      only_report: A string, the name of a single report to produce the help
        for. If not specified, list all the available reports.
    Returns:
      A help string, or None, if 'only_report' was provided and is not a valid
      report name.
    """
    oss = io.StringIO()
    num_reports = 0
    for report_class in get_all_reports():
        # Filter the name
        if only_report and only_report not in report_class.names:
            continue

        # Get the textual description.
        description = textwrap.fill(re.sub(
            ' +', ' ', ' '.join(report_class.__doc__.splitlines())),
                                    initial_indent="    ",
                                    subsequent_indent="    ",
                                    width=80)

        # Get the report's arguments.
        parser = version.ArgumentParser()
        report_ = report_class
        report_class.add_args(parser)

        # Get the list of supported formats.
        ## formats = report_class.get_supported_formats()

        oss.write('{}:\n{}\n'.format(','.join(report_.names), description))
        num_reports += 1

    if not num_reports:
        return None
    return oss.getvalue()
def create_legacy_arguments_parser(description: str, run_func: callable):
    """Create an arguments parser for all the ingestion bean-tools.

    Args:
      description: The program description string.
      func: A callable function to run the particular command.
    Returns:
      An argparse.Namespace instance with the rest of arguments in 'rest'.
    """
    parser = version.ArgumentParser(description=description)

    parser.add_argument('config', action='store', metavar='CONFIG_FILENAME',
                        help=('Importer configuration file. '
                              'This is a Python file with a data structure that '
                              'is specific to your accounts'))

    parser.add_argument('downloads', nargs='+', metavar='DIR-OR-FILE',
                        default=[],
                        help='Filenames or directories to search for files to import')

    parser.set_defaults(command=run_func)

    return parser
示例#13
0
def main():
    parser = version.ArgumentParser(description=__doc__)
    parser.add_argument('filename', help='Beancount input filename')
    parser.add_argument('database', help='Filename of database file to create')
    args = parser.parse_args()
    logging.basicConfig(level=logging.INFO,
                        format='%(levelname)-8s: %(message)s')

    entries, errors, options_map = loader.load_file(args.filename,
                                                    log_timings=logging.info,
                                                    log_errors=sys.stderr)

    # Delete previous database if it already exists.
    if path.exists(args.database):
        os.remove(args.database)

    # The only supported DBAPI-2.0 backend for now is SQLite3.
    connection = dbapi.connect(args.database)

    setup_decimal_support()
    for function in [
            output_common,
            output_transactions,
            OpenWriter(),
            CloseWriter(),
            PadWriter(),
            BalanceWriter(),
            NoteWriter(),
            PriceWriter(),
            DocumentWriter(),
    ]:
        step_name = getattr(function, '__name__', function.__class__.__name__)
        with misc_utils.log_time(step_name, logging.info):
            function(connection, entries)

    return 0
示例#14
0
def main():
    parser = version.ArgumentParser(description=__doc__)

    web_group = web.add_web_arguments(parser)
    web_group.set_defaults(port=9475)

    group = parser.add_argument_group("Bake process arguments")

    group.add_argument('output',
                       help=('The output directory or archive name. If you '
                             'specify a filename with a well-known extension,'
                             'we automatically archive the fetched directory '
                             'contents to this archive name and delete them.'))

    # In order to be able to bake in a reasonable amount of time, we need to
    # remove some pages; you can use this switch to do that.
    group.add_argument('--render-all-pages',
                       '--full',
                       action='store_true',
                       help=("Don't ignore some of the more numerious pages, "
                             "like monthly reports."))

    opts = parser.parse_args()

    # Figure out the archival method.
    output_directory, extension = file_utils.path_greedy_split(opts.output)
    if extension:
        try:
            archival_command = ARCHIVERS[extension]
        except KeyError as exc:
            raise SystemExit(
                "ERROR: Unknown archiver type '{}'".format(extension)) from exc
    else:
        archival_command = None

    # Check pre-conditions on input/output filenames.
    if not path.exists(opts.filename):
        raise SystemExit("ERROR: Missing input file '{}'".format(
            opts.filename))
    if path.exists(opts.output):
        raise SystemExit("ERROR: Output path already exists '{}'".format(
            opts.output))
    if path.exists(output_directory):
        raise SystemExit("ERROR: Output directory already exists '{}'".format(
            output_directory))

    # Bake to a directory hierarchy of files with local links.
    bake_to_directory(opts, output_directory, opts.render_all_pages)

    # Verify the bake output files. This is just a sanity checking step.
    # You can also use "bean-doctor validate_html <file> to run this manually.
    logging.info('Validating HTML output files & links.')
    files, missing, empty = scrape.validate_local_links_in_dir(
        output_directory)
    logging.info('Validation: %d files processed', len(files))
    for target in missing:
        logging.error("Validation error: Missing '%s'", target)
    for target in empty:
        logging.error("Validation error: Empty '%s'", target)

    # Archive if requested.
    if archival_command is not None:
        # Normalize the paths and ensure sanity before we start compression.
        output_directory = path.abspath(output_directory)
        archive_filename = path.abspath(opts.output)
        if not path.exists(output_directory):
            raise IOError("Directory to archive '{}' does not exist".format(
                output_directory))
        if path.exists(archive_filename):
            raise IOError("Output archive name '{}' already exists".format(
                archive_filename))

        # Dispatch to a particular compressor.
        if isinstance(archival_command, str):
            archive(archival_command, output_directory, archive_filename, True)
        elif callable(archival_command):
            archival_command(output_directory, archive_filename)

        # Delete the output directory.
        shutil.rmtree(output_directory)

    print("Output in '{}'".format(opts.output))
示例#15
0
def process_args():
    """Process the arguments. This also initializes the logging module.

    Returns:
      A tuple of:
        args: The argparse receiver of command-line arguments.
        jobs: A list of DatedPrice job objects.
        entries: A list of all the parsed entries.
    """
    parser = version.ArgumentParser(
        description=beancount.prices.__doc__.splitlines()[0])

    # Input sources or filenames.
    parser.add_argument(
        'sources',
        nargs='+',
        help=('A list of filenames (or source "module/symbol", if -e is '
              'specified) from which to create a list of jobs.'))

    parser.add_argument(
        '-e',
        '--expressions',
        '--expression',
        action='store_true',
        help=('Interpret the arguments as "module/symbol" source strings.'))

    # Regular options.
    parser.add_argument(
        '-v',
        '--verbose',
        action='count',
        help=("Print out progress log. Specify twice for debugging info."))

    parser.add_argument(
        '-d',
        '--date',
        action='store',
        type=date_utils.parse_date_liberally,
        help=("Specify the date for which to fetch the prices."))

    parser.add_argument(
        '-i',
        '--inactive',
        action='store_true',
        help=
        ("Select all commodities from input files, not just the ones active on the date"
         ))

    parser.add_argument(
        '-u',
        '--undeclared',
        action='store',
        help=
        ("Include commodities viewed in the file even without a "
         "corresponding Commodity directive, from this default source. "
         "The currency name itself is used as the lookup symbol in this default source."
         ))

    parser.add_argument(
        '-c',
        '--clobber',
        action='store_true',
        help=
        ("Do not skip prices which are already present in input files; fetch them anyway."
         ))

    parser.add_argument(
        '-a',
        '--all',
        action='store_true',
        help=("A shorthand for --inactive, --undeclared, --clobber."))

    parser.add_argument(
        '-s',
        '--swap-inverted',
        action='store_true',
        help=
        ("For inverted sources, swap currencies instead of inverting the rate. "
         "For example, if fetching the rate for CAD from 'USD:google/^CURRENCY:USDCAD' "
         "results in 1.25, by default we would output \"price CAD  0.8000 USD\". "
         "Using this option we would instead output \" price USD   1.2500 CAD\"."
         ))

    parser.add_argument(
        '-n',
        '--dry-run',
        action='store_true',
        help=
        ("Don't actually fetch the prices, just print the list of the ones to be fetched."
         ))

    # Caching options.
    cache_group = parser.add_argument_group('cache')
    cache_filename = path.join(tempfile.gettempdir(),
                               "{}.cache".format(path.basename(sys.argv[0])))
    cache_group.add_argument(
        '--cache',
        dest='cache_filename',
        action='store',
        default=cache_filename,
        help="Enable the cache and with the given cache name.")
    cache_group.add_argument('--no-cache',
                             dest='cache_filename',
                             action='store_const',
                             const=None,
                             help="Disable the price cache.")

    cache_group.add_argument('--clear-cache',
                             action='store_true',
                             help="Clear the cache prior to startup")

    args = parser.parse_args()

    verbose_levels = {
        None: logging.WARN,
        0: logging.WARN,
        1: logging.INFO,
        2: logging.DEBUG
    }
    logging.basicConfig(level=verbose_levels[args.verbose],
                        format='%(levelname)-8s: %(message)s')

    if args.all:
        args.inactive = args.clobber = True
        args.undeclared = DEFAULT_SOURCE

    # Setup for processing.
    setup_cache(args.cache_filename, args.clear_cache)

    # Get the list of DatedPrice jobs to get from the arguments.
    logging.info("Processing at date: %s", args.date or datetime.date.today())
    jobs = []
    all_entries = []
    dcontext = None
    if args.expressions:
        # Interpret the arguments as price sources.
        for source_str in args.sources:
            psources = []
            try:
                psource_map = find_prices.parse_source_map(source_str)
            except ValueError:
                extra = "; did you provide a filename?" if path.exists(
                    source_str) else ''
                msg = ('Invalid source "{{}}"{}. '.format(extra) +
                       'Supported format is "CCY:module/SYMBOL"')
                parser.error(msg.format(source_str))
            else:
                for currency, psources in psource_map.items():
                    jobs.append(
                        find_prices.DatedPrice(psources[0].symbol, currency,
                                               args.date, psources))
    else:
        # Interpret the arguments as Beancount input filenames.
        for filename in args.sources:
            if not path.exists(filename) or not path.isfile(filename):
                parser.error('File does not exist: "{}"; '
                             'did you mean to use -e?'.format(filename))
                continue
            logging.info('Loading "%s"', filename)
            entries, errors, options_map = loader.load_file(
                filename, log_errors=sys.stderr)
            if dcontext is None:
                dcontext = options_map['dcontext']
            jobs.extend(
                find_prices.get_price_jobs_at_date(entries, args.date,
                                                   args.inactive,
                                                   args.undeclared))
            all_entries.extend(entries)

    return args, jobs, data.sorted(all_entries), dcontext
示例#16
0
def main():
    """Generate final reports for a shared expenses on a trip or project.

    For each of many participants, generate a detailed list of expenses,
    contributions, a categorized summary of expenses, and a final balance. Also
    produce a global list of final balances so that participants can reconcile
    between each other.
    """

    logging.basicConfig(level=logging.INFO,
                        format='%(levelname)-8s: %(message)s')
    parser = version.ArgumentParser(description=__doc__.strip())
    parser.add_argument('filename', help='Beancount input filename')

    parser.add_argument(
        '-c',
        '--currency',
        action='store',
        help="Convert all the amounts to a single common currency")

    oparser = parser.add_argument_group('Outputs')

    oparser.add_argument('-o',
                         '--output-text',
                         '--text',
                         action='store',
                         help="Render results to text boxes")
    oparser.add_argument('--output-csv',
                         '--csv',
                         action='store',
                         help="Render results to CSV files")
    oparser.add_argument('--output-stdout',
                         '--stdout',
                         action='store_true',
                         help="Render results to stdout")

    args = parser.parse_args()

    # Ensure the directories exist.
    for directory in [args.output_text, args.output_csv]:
        if directory and not path.exists(directory):
            os.makedirs(directory, exist_ok=True)

    # Load the input file and get the list of participants.
    entries, errors, options_map = loader.load_file(args.filename)
    participants = get_participants(args.filename, options_map)

    for participant in participants:
        print("Participant: {}".format(participant))

        save_query("balances",
                   participant,
                   entries,
                   options_map,
                   r"""
          SELECT
            PARENT(account) AS account,
            CONV[SUM(position)] AS amount
          WHERE account ~ ':\b{}'
          GROUP BY 1
          ORDER BY 2 DESC
        """,
                   participant,
                   boxed=False,
                   args=args)

        save_query("expenses",
                   participant,
                   entries,
                   options_map,
                   r"""
          SELECT
            date, flag, description,
            PARENT(account) AS account,
            JOINSTR(links) AS links,
            CONV[position] AS amount,
            CONV[balance] AS balance
          WHERE account ~ 'Expenses.*\b{}'
        """,
                   participant,
                   args=args)

        save_query("income",
                   participant,
                   entries,
                   options_map,
                   r"""
          SELECT
            date, flag, description,
            account,
            JOINSTR(links) AS links,
            CONV[position] AS amount,
            CONV[balance] AS balance
          WHERE account ~ 'Income.*\b{}'
        """,
                   participant,
                   args=args)

    save_query("final",
               None,
               entries,
               options_map,
               r"""
      SELECT
        GREP('\b({})\b', account) AS participant,
        CONV[SUM(position)] AS balance
      GROUP BY 1
      ORDER BY 2
    """,
               '|'.join(participants),
               args=args)
示例#17
0
def main(argv=None):
    parser = version.ArgumentParser(description=__doc__)

    parser.add_argument(
        '--help-reports',
        '--list-reports',
        nargs='?',
        default=None,
        action=ListReportsAction,
        help="Print the full list of supported reports and exit.")

    parser.add_argument(
        '--help-formats',
        '--list-formats',
        nargs='?',
        default=None,
        action=ListFormatsAction,
        help="Print the full list of supported formats and exit.")

    parser.add_argument(
        '-f',
        '--format',
        default=None,
        choices=['text', 'csv', 'html', 'htmldiv', 'xls', 'ofx', 'beancount'],
        help="Output format.")

    parser.add_argument(
        '-o',
        '--output',
        action='store',
        help=("Output filename. If not specified, the output goes "
              "to stdout. The filename is inspected to select a "
              "sensible default format, if one is not requested."))

    parser.add_argument('-t',
                        '--timings',
                        '--verbose',
                        action='store_true',
                        help='Print timings.')

    parser.add_argument('-q',
                        '--no-errors',
                        action='store_true',
                        help='Do not report errors.')

    parser.add_argument('filename',
                        metavar='FILENAME.beancount',
                        help='The Beancount input filename to load.')

    subparsers = parser.add_subparsers(
        title='report', help='Name/specification of the desired report.')

    for report_class in get_all_reports():
        name, aliases = report_class.names[0], report_class.names[1:]

        oss = io.StringIO()
        oss.write('  {} (aliases: {}; formats: {})'.format(
            report_class.__doc__, ','.join(report_class.names),
            ','.join(report_class.get_supported_formats())))

        report_parser = subparsers.add_parser(name,
                                              aliases=aliases,
                                              description=oss.getvalue())
        report_parser.set_defaults(report_class=report_class)
        report_class.add_args(report_parser)

        # Each subparser must gather the filter arguments. This is unfortunate,
        # but it works.
        report_parser.add_argument(
            'filters',
            nargs='*',
            help='Filter expression(s) to select the subset of transactions.')

    args = parser.parse_args(args=argv)

    # Warn on filters--not supported at this time.
    if hasattr(args, 'filters') and args.filters:
        parser.error(
            ("Filters are not supported yet. Extra args: {}. "
             "See bean-query if you need filtering now.").format(args.filters))

    # Handle special commands.
    if args.help_reports:
        print(get_list_report_string())
        return

    is_check = False
    if hasattr(args, 'report_class'):
        # Open output file and guess file format.
        outfile = open(args.output, 'w') if args.output else sys.stdout
        args.format = args.format or file_utils.guess_file_format(args.output)

        # Create the requested report and parse its arguments.
        chosen_report = args.report_class(args, parser)
        if chosen_report is None:
            parser.error("Unknown report")
        is_check = isinstance(chosen_report, misc_reports.ErrorReport)

        # Verify early that the format is supported, in order to avoid parsing the
        # input file if we need to bail out.
        supported_formats = chosen_report.get_supported_formats()
        if args.format and args.format not in supported_formats:
            parser.error(
                "Unsupported format '{}' for {} (available: {})".format(
                    args.format, chosen_report.names[0],
                    ','.join(supported_formats)))

    # Force hardcore validations, just for check.
    extra_validations = (validation.HARDCORE_VALIDATIONS if is_check else None)

    logging.basicConfig(
        level=logging.INFO if args.timings else logging.WARNING,
        format='%(levelname)-8s: %(message)s')

    # Parse the input file.
    errors_file = None if args.no_errors else sys.stderr
    with misc_utils.log_time('beancount.loader (total)', logging.info):
        entries, errors, options_map = loader.load_file(
            args.filename,
            log_timings=logging.info,
            log_errors=errors_file,
            extra_validations=extra_validations)

    if hasattr(args, 'report_class'):
        # Create holdings list.
        with misc_utils.log_time('report.render', logging.info):
            try:
                chosen_report.render(entries, errors, options_map, args.format,
                                     outfile)
            except base.ReportError as exc:
                sys.stderr.write("Error: {}\n".format(exc))
                return 1
    else:
        print(get_list_report_string())

    return (1 if errors else 0)
def ingest(importers_list, detect_duplicates_func=None, hooks=None):
    """Driver function that calls all the ingestion tools.

    Put a call to this function at the end of your importer configuration to
    make your import script; this should be its main function, like this:

      from beancount.ingest.scripts_utils import ingest
      my_importers = [ ... ]
      ingest(my_importers)

    This more explicit way of invoking the ingestion is now the preferred way to
    invoke the various tools, and replaces calling the bean-identify,
    bean-extract, bean-file tools with a --config argument. When you call the
    import script itself (as as program) it will parse the arguments, expecting
    a subcommand ('identify', 'extract' or 'file') and corresponding
    subcommand-specific arguments.

    Here you can override some importer values, such as installing a custom
    duplicate finding hook, and eventually more. Note that this newer invocation
    method is optional and if it is not present, a call to ingest() is generated
    implicitly, and it functions as it used to. Future configurable
    customization of the ingestion process will be implemented by inserting new
    arguments to this function, this is the motivation behind doing this.

    Note that invocation by the three bean-* ingestion tools is still supported,
    and calling ingest() explicitly from your import configuration file will not
    break these tools either, if you invoke them on it; the values you provide
    to this function will be used by those tools.

    Args:
      importers_list: A list of importer instances. This is used as a
        chain-of-responsibility, called on each file.
      detect_duplicates_func: (DEPRECATED) An optional function which accepts a
        list of lists of imported entries and a list of entries already existing
        in the user's ledger. See function find_duplicate_entries(), which is
        the default implementation for this. Use 'filter_funcs' instead.
      hooks: An optional list of hook functions to apply to the list of extract
        (filename, entries) pairs, in order. This replaces
        'detect_duplicates_func'.
    """
    if detect_duplicates_func is not None:
        warnings.warn("Argument 'detect_duplicates_func' is deprecated.")
        # Fold it in hooks.
        if hooks is None:
            hooks = []
        hooks.insert(0, detect_duplicates_func)
        del detect_duplicates_func

    if ingest.args is not None:
        # The script has been called from one of the bean-* ingestion tools.
        # 'ingest.args' is only set when we're being invoked from one of the
        # bean-xxx tools (see below).

        # Mark this function as called, so that if it is called from an import
        # triggered by one of the ingestion tools, it won't be called again
        # afterwards.
        ingest.was_called = True

        # Use those args rather than to try to parse the command-line arguments
        # from a naked ingest() call as a script. {39c7af4f6af5}
        args, parser = ingest.args
    else:
        # The script is called directly. This is the main program of the import
        # script itself. This is the new invocation method.
        parser = version.ArgumentParser(description=DESCRIPTION)

        # Use required on subparsers.
        # FIXME: Remove this when we require version 3.7 or above.
        kwargs = {}
        if sys.version_info >= (3, 7):
            kwargs['required'] = True
        subparsers = parser.add_subparsers(dest='command', **kwargs)

        parser.add_argument('--downloads', '-d', metavar='DIR-OR-FILE',
                            action='append', default=[],
                            help='Filenames or directories to search for files to import')

        for cmdname, module in [('identify', identify),
                                ('extract', extract),
                                ('file', file)]:
            parser_cmd = subparsers.add_parser(cmdname, help=module.DESCRIPTION)
            parser_cmd.set_defaults(command=module.run)
            module.add_arguments(parser_cmd)

        args = parser.parse_args()

        if not args.downloads:
            args.downloads.append(os.getcwd())

        # Implement required ourselves.
        # FIXME: Remove this when we require version 3.7 or above.
        if not (sys.version_info >= (3, 7)):
            if not hasattr(args, 'command'):
                parser.error("Subcommand is required.")

    abs_downloads = list(map(path.abspath, args.downloads))
    args.command(args, parser, importers_list, abs_downloads, hooks=hooks)
    return 0
def main():
    parser = version.ArgumentParser(description=__doc__)

    parser.add_argument('-f', '--format', action='store', default=_SUPPORTED_FORMATS[0],
                        choices=_SUPPORTED_FORMATS, # 'html', 'htmldiv', 'beancount', 'xls',
                        help="Output format.")

    parser.add_argument('-m', '--numberify', action='store_true', default=False,
                        help="Numberify the output, removing the currencies.")

    parser.add_argument('-o', '--output', action='store',
                        help=("Output filename. If not specified, the output goes "
                              "to stdout. The filename is inspected to select a "
                              "sensible default format, if one is not requested."))

    parser.add_argument('-q', '--no-errors', action='store_true',
                        help='Do not report errors')

    parser.add_argument('filename', metavar='FILENAME.beancount',
                        help='The Beancount input filename to load')

    parser.add_argument('query', nargs='*',
                        help='A query to run directly')

    args = parser.parse_args()

    # Parse the input file.
    def load():
        errors_file = None if args.no_errors else sys.stderr
        with misc_utils.log_time('beancount.loader (total)', logging.info):
            return loader.load_file(args.filename,
                                    log_timings=logging.info,
                                    log_errors=errors_file)

    # Create a receiver for output.
    outfile = sys.stdout if args.output is None else open(args.output, 'w')

    # Create the shell.
    is_interactive = sys.stdin.isatty() and not args.query
    shell_obj = BQLShell(is_interactive, load, outfile, args.format, args.numberify)
    shell_obj.on_Reload()

    # Run interactively if we're a TTY and no query is supplied.
    if is_interactive:
        try:
            shell_obj.cmdloop()
        except KeyboardInterrupt:
            print('\nExit')
    else:
        # Run in batch mode (Non-interactive).
        if args.query:
            # We have a query to run.
            query = ' '.join(args.query)
        else:
            # If we have no query and we're not a TTY, read the BQL command from
            # standard input.
            query = sys.stdin.read()

        shell_obj.onecmd(query)

    return 0