def test_to_excel(example_ledger): types, rows = run_query( example_ledger.all_entries, example_ledger.options, 'balances', numberify=True) assert excel.to_excel(types, rows, 'ods', 'balances')
def test_AnyMeta(self, entries, _, options_map): """ 2016-11-20 * name: "TheName" address: "1 Wrong Way" empty: "NotEmpty" Assets:Banking 1 USD color: "Green" address: "1 Right Way" empty: """ rtypes, rrows = query.run_query(entries, options_map, 'SELECT ANY_META("name") as m') self.assertEqual([('TheName', )], rrows) rtypes, rrows = query.run_query(entries, options_map, 'SELECT ANY_META("color") as m') self.assertEqual([('Green', )], rrows) rtypes, rrows = query.run_query(entries, options_map, 'SELECT ANY_META("address") as m') self.assertEqual([('1 Right Way', )], rrows) rtypes, rrows = query.run_query(entries, options_map, 'SELECT ANY_META("empty") as m') self.assertEqual([(None, )], rrows)
def make_table(self): """An account tree based on matching regex patterns.""" cash = self.ledger.all_root_account.get('Assets') credit = self.ledger.all_root_account.get('Liabilities') _, wrows = query.run_query( self.ledger.entries, self.ledger.options, ''' select account,number(only("USD", sum(position))) from not "future" in tags where account ~ "^(Assets|Liabilities)" and (meta('_cleared') = True or number < 0) group by 1''') _, crows = query.run_query( self.ledger.entries, self.ledger.options, ''' select account,number(only("USD", sum(position))) from not "future" in tags where account ~ "^(Assets|Liabilities)" and (meta('_cleared') = True) group by 1''') _, trows = query.run_query( self.ledger.entries, self.ledger.options, ''' select account,number(only("USD", sum(position))) from not "future" in tags where account ~ "^(Assets|Liabilities)" group by 1''') self.wrows = dict(wrows) self.crows = dict(crows) self.trows = dict(trows) return [cash, credit]
def _wct(self, account_name): if not account_name: account_name = '^(Assets:|Liabilities:)' _, wrow = query.run_query( self.ledger.entries, self.ledger.options, ''' select number(only("USD", sum(position))) where account = "{}" and (meta('_cleared') = True or number < 0) ''', account_name) if not wrow: wrow = [[Decimal()]] _, crow = query.run_query( self.ledger.entries, self.ledger.options, ''' select number(only("USD", sum(position))) where account = "{}" and (meta('_cleared') = True) ''', account_name) if not crow: crow = [[Decimal()]] _, trow = query.run_query( self.ledger.entries, self.ledger.options, ''' select number(only("USD", sum(position))) where account = "{}" ''', account_name) if not trow: trow = [[Decimal()]] return wrow[0][0], crow[0][0], trow[0][0]
def test_GrepN(self, entries, _, options_map): """ 2016-11-20 * "prev match in context next" Assets:Banking 1 USD """ rtypes, rrows = query.run_query( entries, options_map, ''' SELECT GREPN("in", narration, 0) as m ''') self.assertEqual([('in', )], rrows) rtypes, rrows = query.run_query( entries, options_map, ''' SELECT GREPN("match (.*) context", narration, 1) as m ''') self.assertEqual([('in', )], rrows) rtypes, rrows = query.run_query( entries, options_map, ''' SELECT GREPN("(.*) in (.*)", narration, 2) as m ''') self.assertEqual([('context next', )], rrows) rtypes, rrows = query.run_query( entries, options_map, ''' SELECT GREPN("ab(at)hing", "abathing", 1) as m ''') self.assertEqual([('at', )], rrows)
def test_Date(self, entries, _, options_map): """ 2016-11-20 * "ok" Assets:Banking 1 USD """ rtypes, rrows = query.run_query(entries, options_map, 'SELECT date(2020, 1, 2) as m') self.assertEqual([(datetime.date(2020, 1, 2), )], rrows) rtypes, rrows = query.run_query(entries, options_map, 'SELECT date(year, month, 1) as m') self.assertEqual([(datetime.date(2016, 11, 1), )], rrows) with self.assertRaisesRegex(ValueError, "day is out of range for month"): rtypes, rrows = query.run_query(entries, options_map, 'SELECT date(2020, 2, 32) as m') rtypes, rrows = query.run_query(entries, options_map, 'SELECT date("2020-01-02") as m') self.assertEqual([(datetime.date(2020, 1, 2), )], rrows) rtypes, rrows = query.run_query(entries, options_map, 'SELECT date("2016/11/1") as m') self.assertEqual([(datetime.date(2016, 11, 1), )], rrows)
def test_to_csv(example_ledger): types, rows = run_query(example_ledger.all_entries, example_ledger.options, 'balances', numberify=True) assert excel.to_csv(types, rows) types, rows = run_query(example_ledger.all_entries, example_ledger.options, 'select account, tags, date, day', numberify=True) assert excel.to_csv(types, rows)
def test_to_csv(example_ledger): types, rows = run_query( example_ledger.all_entries, example_ledger.options, 'balances', numberify=True) assert excel.to_csv(types, rows) types, rows = run_query( example_ledger.all_entries, example_ledger.options, 'select account, tags, date, day', numberify=True) assert excel.to_csv(types, rows)
def test_to_csv(example_ledger: FavaLedger) -> None: types, rows = run_query( example_ledger.all_entries, example_ledger.options, "balances", numberify=True, ) assert excel.to_csv(types, rows) types, rows = run_query( example_ledger.all_entries, example_ledger.options, "select account, tags, date, day", numberify=True, ) assert excel.to_csv(types, rows)
def find_funds(self, price): bql = "SELECT flag, filename, lineno, location, account, other_accounts, year, month, day, number, currency where account = \"{}\" and currency = \"CNY\" and number = {}".format( FundAccount, price) items = query.run_query(self.entries, self.option_map, bql) # length = len(items[1]) feePrice = round(price * fee, 2) for item in items[1]: current_date = date(item.year, item.month, item.day) date_string = current_date.strftime("%Y%m%d") print('Updating ' + date_string) for fund_item in fund_data: if fund_item[0] == date_string: fund_price = float(fund_item[1]) count = (price - feePrice) / fund_price self.update_line_to_new_line( item.location, transactionTemplate.substitute( fundAccount=FundAccount, fundCount=round(count, 2), fundCurrency=currency, costPrice=round(fund_price, 5), feeAccount=FeeAccount, fee=feePrice, deviationAccount=DeviationAccount, otherAccount=item.other_accounts[0], originalPrice=price), 1)
def get_allocations(entries, options_map, portfolio): allocation_query = r""" SELECT currency, GETITEM(CURRENCY_META(currency), "asset-class") as c, GETITEM(CURRENCY_META(currency), "asset-subclass") as s, account, getprice(currency, "{1}", today()) as price, convert(value(sum(position)), "{1}") as market_value, convert(cost(sum(position)), "{1}") as book_value, GETITEM(OPEN_META(account), "asset-class") as act_class, GETITEM(OPEN_META(account), "asset-subclass") as act_subclass WHERE GETITEM(OPEN_META(account), "portfolio") = "{0}" GROUP BY currency, c, s, account, price, act_class, act_subclass """ target_currency = options_map['operating_currency'][0] rtypes, rrows = query.run_query(entries, options_map, allocation_query, portfolio, target_currency, numberify=True) allocations = Allocations() for row in rrows: position = _position_from_row(row) allocations.append(position) return allocations
def find_duplicate(self, entry, money, replace_account = ''): bql = "SELECT flag, filename, lineno, location, account, year, month, day, str(entry_meta('timestamp')) as timestamp, metas() as metas WHERE year = {} AND month = {} AND day = {} AND number = {} AND currency = 'CNY' ORDER BY timestamp ASC".format(entry.date.year, entry.date.month, entry.date.day, -money) items = query.run_query(self.entries, self.option_map, bql) length = len(items[1]) if (length == 0): return False updated_items = [] for item in items[1]: item_timestamp = item.timestamp.replace("'", '') if ( (not 'timestamp' in entry.meta) or item_timestamp == entry.meta['timestamp'] or item.timestamp == 'None' or item.timestamp == '' ): updated_items.append(item) if replace_account != '' and item.account in public_accounts: self.update_transaction_account(item.location, item.account, replace_account) for key, value in entry.meta.items(): if key == 'filename' or key == 'lineno': continue if not key in item.metas: self.append_text_to_transaction(item.filename, item.lineno, '{}: "{}"'.format(key, value)) if 'timestamp' in entry.meta and item_timestamp == entry.meta['timestamp']: break if len(updated_items) > 1: for item in updated_items: self.update_transaction_flag(item.location, item.flag, '!') return len(updated_items) > 0
def test_to_excel(example_ledger): types, rows = run_query( example_ledger.all_entries, example_ledger.options, "balances", numberify=True, ) assert excel.to_excel(types, rows, "ods", "balances")
def find_duplicate(self, entry, money, unique_no=None, replace_account='', currency='CNY'): # 要查询的是实际付款的账户,而不是支出信息 bql = "SELECT flag, filename, lineno, location, account, year, month, day, str(entry_meta('timestamp')) as timestamp, metas() as metas WHERE year = {} AND month = {} AND day = {} AND number(convert(units(position), '{}')) = {} ORDER BY timestamp ASC".format( entry.date.year, entry.date.month, entry.date.day, currency, money) items = query.run_query(self.entries, self.option_map, bql) length = len(items[1]) if (length == 0): return False updated_items = [] for item in items[1]: same_trade = False item_timestamp = item.timestamp.replace("'", '') # 如果已经被录入了,且unique_no相同,则判定为是同导入器导入的同交易,啥都不做 if unique_no != None: if unique_no in entry.meta and unique_no in item.metas: if item.metas[unique_no] == entry.meta[unique_no]: same_trade = True # unique_no存在但不同,那就绝对不是同一笔交易了 # 这个时候就直接返回不存在同订单 else: continue if same_trade: return True # 否则,可能是不同账单的同交易,此时判断时间 # 如果时间戳相同,或某个导入器的数据没有时间戳,则判断其为「还需进一步处理」的同笔交易 # 例如,手工输入的交易,打上支付宝订单号。 # 另外因为支付宝的傻逼账单,这里还需要承担支付手段更新的功能 if ((not 'timestamp' in entry.meta) or item_timestamp == entry.meta['timestamp'] or item.timestamp == 'None' or item.timestamp == ''): updated_items.append(item) if replace_account != '' and item.account in public_accounts: self.update_transaction_account(item.location, item.account, replace_account) for key, value in entry.meta.items(): if key == 'filename' or key == 'lineno': continue if not key in item.metas: self.append_text_to_transaction( item.filename, item.lineno, '{}: "{}"'.format(key, value)) # 如果有时间戳,且时间戳相同,则判定为同交易 # 100%确认是同一笔交易后,就没必要再给其他的「金额相同」的交易加信息了 if 'timestamp' in entry.meta and item_timestamp == entry.meta[ 'timestamp']: break if len(updated_items) > 1: for item in updated_items: self.update_transaction_flag(item.location, item.flag, '!') return len(updated_items) > 0
def test_Lower(self, entries, _, options_map): """ 2016-11-20 * "I love candy" Assets:Banking -1 USD """ rtypes, rrows = query.run_query( entries, options_map, ''' SELECT Lower(narration) as m where date = 2016-11-20 ''') self.assertEqual([('i love candy', )], rrows)
def test_Coalesce(self, entries, _, options_map): """ 2016-11-20 * Assets:Banking 1 USD """ rtypes, rrows = query.run_query( entries, options_map, 'SELECT COALESCE(account, price) as m') self.assertEqual([('Assets:Banking', )], rrows) rtypes, rrows = query.run_query( entries, options_map, 'SELECT COALESCE(price, account) as m') self.assertEqual([('Assets:Banking', )], rrows) rtypes, rrows = query.run_query( entries, options_map, 'SELECT COALESCE(price, cost_number) as m') self.assertEqual([(None, )], rrows) rtypes, rrows = query.run_query( entries, options_map, 'SELECT COALESCE(narration, account) as m') self.assertEqual([('', )], rrows)
def query_to_file(self, query_string, result_format): """Get query result as file. Arguments: query_string: A string, the query to run. result_format: The file format to save to. Returns: A tuple (name, data), where name is either 'query_result' or the name of a custom query if the query string is 'run name_of_query'. ``data`` contains the file contents. Raises: FavaAPIException: If the result format is not supported or the query failed. """ name = "query_result" try: statement = self.parser.parse(query_string) except query_parser.ParseError as exception: raise FavaAPIException(str(exception)) if statement.__class__.__name__ == "RunCustom": name = statement.query_name try: query = next( (query for query in self.queries if query.name == name) ) except StopIteration: raise FavaAPIException('Query "{}" not found.'.format(name)) query_string = query.query_string try: types, rows = run_query( self.ledger.all_entries, self.ledger.options, query_string, numberify=True, ) except ( query_compile.CompilationError, query_parser.ParseError, ) as exception: raise FavaAPIException(str(exception)) if result_format == "csv": data = to_csv(types, rows) else: if not HAVE_EXCEL: raise FavaAPIException("Result format not supported.") data = to_excel(types, rows, result_format, query_string) return name, data
def test_DateDiffAdjust(self, entries, _, options_map): """ 2016-11-20 * "ok" Assets:Banking -1 STOCK { 5 USD, 2016-10-30 } """ rtypes, rrows = query.run_query( entries, options_map, 'SELECT date_diff(date, cost_date) as m') self.assertEqual([(21, )], rrows) rtypes, rrows = query.run_query( entries, options_map, 'SELECT date_diff(cost_date, date) as m') self.assertEqual([(-21, )], rrows) rtypes, rrows = query.run_query(entries, options_map, 'SELECT date_add(date, 1) as m') self.assertEqual([(datetime.date(2016, 11, 21), )], rrows) rtypes, rrows = query.run_query(entries, options_map, 'SELECT date_add(date, -1) as m') self.assertEqual([(datetime.date(2016, 11, 19), )], rrows)
def test_execute_query(example_ledger): query_shell = QueryShell(example_ledger) assert query_shell.execute_query("help exit") == ( QueryShell.noop.__doc__ + "\n", None, None, ) assert query_shell.execute_query("help")[1:] == (None, None) assert query_shell.execute_query("balances")[1:] == query.run_query( query_shell.entries, query_shell.options_map, "balances")
def find_plac_actuals(self): actuals = [] for account in self.accounts: query_str = "select COST(SUM(position)) where account ~ \'" + str( account) + "\' and date >= " + str( self.start_date) + " and date <= " + str( self.end_date) + ";" query_temp = query.run_query(self.entries, self.options_map, query_str, numberify=True) actuals.append(query_temp[1][0][0]) return actuals
def test_execute_query(example_ledger): query_shell = QueryShell(example_ledger) assert query_shell.execute_query('help exit') == \ (QueryShell.noop.__doc__ + '\n', None, None) assert query_shell.execute_query('help')[1:] == \ (None, None) assert query_shell.execute_query('balances', add_to_history=True)[1:] == \ query.run_query(query_shell.entries, query_shell.options_map, 'balances') assert query_shell.get_history(1) == ['balances']
def query_to_file(self, query_string, result_format): """Get query result as file. Arguments: query_string: A string, the query to run. result_format: The file format to save to. Returns: A tuple (name, data), where name is either 'query_result' or the name of a custom query if the query string is 'run name_of_query'. ``data`` contains the file contents. Raises: FavaAPIException: If the result format is not supported or the query failed. """ name = 'query_result' try: statement = self.parser.parse(query_string) except query_parser.ParseError as exception: raise FavaAPIException(str(exception)) if statement.__class__.__name__ == 'RunCustom': name = statement.query_name try: query = next((query for query in self.queries if query.name == name)) except StopIteration: raise FavaAPIException('Query "{}" not found.'.format(name)) query_string = query.query_string try: types, rows = run_query( self.ledger.all_entries, self.ledger.options, query_string, numberify=True) except (query_compile.CompilationError, query_parser.ParseError) as exception: raise FavaAPIException(str(exception)) if result_format == 'csv': data = to_csv(types, rows) else: if not HAVE_EXCEL: raise FavaAPIException('Result format not supported.') data = to_excel(types, rows, result_format, query_string) return name, data
def test_Subst(self, entries, _, options_map): """ 2016-11-20 * "I love candy" Assets:Banking -1 USD 2016-11-21 * "Buy thing thing" Assets:Cash -1 USD """ rtypes, rrows = query.run_query( entries, options_map, ''' SELECT SUBST("[Cc]andy", "carrots", narration) as m where date = 2016-11-20 ''') self.assertEqual([('I love carrots', )], rrows) rtypes, rrows = query.run_query( entries, options_map, ''' SELECT SUBST("thing", "t", narration) as m where date = 2016-11-21 ''') self.assertEqual([('Buy t t', )], rrows) rtypes, rrows = query.run_query( entries, options_map, ''' SELECT SUBST("random", "t", narration) as m where date = 2016-11-21 ''') self.assertEqual([('Buy thing thing', )], rrows) rtypes, rrows = query.run_query( entries, options_map, ''' SELECT SUBST("(love)", "\\1 \\1", narration) as m where date = 2016-11-20 ''') self.assertEqual([('I love love candy', )], rrows) rtypes, rrows = query.run_query( entries, options_map, ''' SELECT SUBST("Assets:.*", "Savings", account) as a, str(sum(position)) as p ''') self.assertEqual([('Savings', '(-2 USD)')], rrows)
def main(): parser = argparse.ArgumentParser(description=__doc__.strip()) parser.add_argument('filename', help='Beancount input filename') parser.add_argument('--from', dest='dfrom', action='store', default='1970-01-01') parser.add_argument('--to', dest='dto', action='store', default='2199-01-01') args = parser.parse_args() # Create a SQL query. date_from = parse(args.dfrom).date() date_to = parse(args.dto).date() sql = """ SELECT year, month, root(account, 1) as ar, sum(position) as pos FROM date > {date_from} AND date < {date_to} WHERE account ~ "Expenses" OR account ~ "Liabilities:Loans" OR account ~ "Income" GROUP BY year, month, ar ORDER BY year, month, ar FLATTEN """.format(**locals()) # Load the file and run a query on it. entries, _, options_map = loader.load_file(args.filename) rtypes, rrows = query.run_query(entries, options_map, sql) # Pivot on the year/month + currency out = collections.defaultdict(lambda: collections.defaultdict(dict)) for row in rrows: d = out['{}/{:02d}'.format(row.year, row.month)][row.pos.lot.currency] d[row.ar] = row.pos.number # Write this out to a CSV file. wr = csv.writer(sys.stdout) for month, currencies in sorted(out.items()): for currency, accounts in sorted(currencies.items()): exp = accounts.get('Expenses', ZERO) loans = accounts.get('Liabilities:Loans', ZERO) inc = accounts.get('Income', ZERO) wr.writerow((month, exp, loans, -inc, exp + loans - inc, currency))
def test_run_query(self): rootdir = test_utils.find_repository_root(__file__) filename = path.join(rootdir, 'examples', 'example.beancount') entries, errors, options_map = loader.load_file(filename) assert not errors sql_query = r""" SELECT account, SUM(position) AS amount WHERE account ~ 'Expenses:' GROUP BY 1 ORDER BY 2 DESC """ rtypes, rrows = query.run_query(entries, options_map, sql_query, 'Martin', numberify=True) self.assertEqual(['account', 'amount (USD)', 'amount (IRAUSD)', 'amount (VACHR)'], [rt[0] for rt in rtypes]) self.assertEqual(len(rrows[0]), 4)
def test_query() -> None: assert run_text("help") assert ( run_text("help exit") == "Doesn't do anything in Fava's query shell.") assert run("lex select date, balance")[0] == "\n".join([ "LexToken(SELECT,'SELECT',1,0)", "LexToken(ID,'date',1,7)", "LexToken(COMMA,',',1,11)", "LexToken(ID,'balance',1,13)", ]) assert run_text("run") == "custom_query\ncustom query with space" bal = run("balances") assert run("run custom_query") == bal assert run("run 'custom query with space'") == bal assert run("balances")[1:] == run_query(LEDGER.entries, LEDGER.options, "balances") assert (run_text("asdf") == "ERROR: Syntax error near 'asdf' (at 0)\n asdf\n ^")
def test_run_query(self): rootdir = test_utils.find_repository_root(__file__) filename = path.join(rootdir, 'examples', 'sharing', 'cozumel2015.beancount') entries, _, options_map = loader.load_file(filename) sql_query = r""" SELECT PARENT(account) AS account, SUM(position) AS amount WHERE account ~ 'Expenses.*\b{}' GROUP BY 1 ORDER BY 2 DESC """ rtypes, rrows = query.run_query(entries, options_map, sql_query, 'Martin', numberify=True) self.assertEqual(['account', 'amount (USD)', 'amount (MXN)'], [rt[0] for rt in rtypes]) self.assertEqual(13, len(rrows))
def load_beancount_file(file_name): entries, _, opts = load_file(file_name) currency = opts["operating_currency"][0] cols, rows = run_query(entries, opts, "SELECT account, YEAR(date) AS year,\ MONTH(date) as month,\ SUM(convert(position, '{}', date)) AS amount\ WHERE account ~ 'Expenses'\ OR account ~ 'Income'\ GROUP BY account, year, month\ ORDER BY account, year, month".format(currency) ) cols, rows = numberify_results(cols, rows) df = pd.DataFrame(rows, columns=[k[0] for k in cols]) df.rename(columns={"account": "Account", "year": "Year", "month": "Month", "amount ({})".format(currency): "Amount ({})".format(currency)}, inplace=True) df = df.astype({"Account": str, "Year": int, "Month": int, "Amount ({})".format(currency): np.float}) df = df[["Account", "Year", "Month", "Amount ({})".format(currency)]].fillna(0) df["YearMonth"] = df.apply(lambda x: "{}-{:0>2d}".format(x["Year"], x["Month"]), axis=1) df = df[["Account", "YearMonth", "Amount ({})".format(currency)]] os.remove(file_name) return df
def query_func(self, sql): entries, _, options_map = loader.load_file(self.f) rtypes, rrows = query.run_query(entries, options_map, sql) return rtypes, rrows
def save_query(title, participant, entries, options_map, sql_query, *format_args, boxed=True, spaced=False, args=None): """Save the multiple files for this query. Args: title: A string, the title of this particular report to render. participant: A string, the name of the participant under consideration. entries: A list of directives (as per the loader). options_map: A dict of options (as per the loader). sql_query: A string with the SQL query, possibly with some placeholders left for *format_args to replace. *format_args: A tuple of arguments to be formatted into the SQL query string. This is provided as a convenience. boxed: A boolean, true if we should render the results in a fancy-looking ASCII box. spaced: If true, leave an empty line between each of the rows. This is useful if the results have a lot of rows that render over multiple lines. args: A dummy object with the following attributes: output_text: An optional directory name, to produce a text rendering of the report. output_csv: An optional directory name, to produce a CSV rendering of the report. output_stdout: A boolean, if true, also render the output to stdout. currency: An optional currency (a string). If you use this, you should wrap query targets to be converted with the pseudo-function "CONV[...]" and it will get replaced to CONVERT(..., CURRENCY) automatically. """ # Replace CONV() to convert the currencies or not; if so, replace to # CONVERT(..., currency). replacement = (r'\1' if args.currency is None else r'CONVERT(\1, "{}")'.format(args.currency)) sql_query = re.sub(r'CONV\[(.*?)\]', replacement, sql_query) # Run the query. rtypes, rrows = query.run_query(entries, options_map, sql_query, *format_args, numberify=True) # The base of all filenames. filebase = title.replace(' ', '_') fmtopts = dict(boxed=boxed, spaced=spaced) # Output the text files. if args.output_text: basedir = (path.join(args.output_text, participant) if participant else args.output_text) os.makedirs(basedir, exist_ok=True) filename = path.join(basedir, filebase + '.txt') with open(filename, 'w') as file: query_render.render_text(rtypes, rrows, options_map['dcontext'], file, **fmtopts) # Output the CSV files. if args.output_csv: basedir = (path.join(args.output_csv, participant) if participant else args.output_csv) os.makedirs(basedir, exist_ok=True) filename = path.join(basedir, filebase + '.csv') with open(filename, 'w') as file: query_render.render_csv(rtypes, rrows, options_map['dcontext'], file, expand=False) if args.output_stdout: # Write out the query to stdout. query_render.render_text(rtypes, rrows, options_map['dcontext'], sys.stdout, **fmtopts)
def get_spending(entries, options_map, min_date): query_rows = query.run_query(entries, options_map, QUERY, numberify=True) spending = [(datetime.date(n[0], n[1], 1), float(n[2])) for n in query_rows[1]] filtered = [n for n in spending if n[0] >= min_date] return pandas.Series(dict(filtered))
def query_func(self, sql): rtypes, rrows = query.run_query(self.entries, self.options_map, sql) return rtypes, rrows
def query(self, bql_query_string): return query.run_query(self.entries, self.options, bql_query_string)
def query(self, query_string, numberify=False): return query.run_query(self.all_entries, self.options, query_string, numberify=numberify)