def test_scale_holding(self): test_holding = holdings.Holding( 'Assets:US:Checking', D('100'), 'MSFT', D('54.34'), 'USD', D('5434.00'), D('6000.00'), D('60'), datetime.date(2012, 5, 2)) expected_holding = holdings.Holding( 'Assets:US:Checking', D('70.0'), 'MSFT', D('54.34'), 'USD', D('3803.80'), D('4200.00'), D('60'), datetime.date(2012, 5, 2)) self.assertEqual(expected_holding, holdings.scale_holding(test_holding, D('0.7')))
def parse_shipments(soup) -> List[Shipment]: shipped_pattern = '^Shipped on ([^\\n]+)$' def is_shipment_header_table(node): if node.name != 'table': return False text = node.text.strip() m = re.match(shipped_pattern, text) return m is not None header_tables = soup.find_all(is_shipment_header_table) shipments = [] # type: List[Shipment] errors = [] # type: Errors for header_table in header_tables: text = header_table.text.strip() m = re.match(shipped_pattern, text) assert m is not None shipped_date = dateutil.parser.parse(m.group(1)).date() items = [] shipment_table = header_table.find_parent('table') def is_items_ordered_header(node): if node.name != 'tr': return False tds = node('td') if len(tds) < 2: return False return (tds[0].text.strip() == 'Items Ordered' and tds[1].text.strip() == 'Price') items_ordered_header = shipment_table.find(is_items_ordered_header) item_rows = items_ordered_header.find_next_siblings('tr') for item_row in item_rows: tds = item_row('td') description_node = tds[0] price_node = tds[1] price = price_node.text.strip() pattern_without_condition = r'^\s*(?P<quantity>[0-9]+)\s+of:(?P<description>.*)\n\s*Sold by: (?P<sold_by>[^\n]+)' pattern_with_condition = pattern_without_condition + r'\n.*\n\s*Condition: (?P<condition>[^\n]+)' m = re.match(pattern_with_condition, description_node.text, re.UNICODE | re.DOTALL) if m is None: m = re.match(pattern_without_condition, description_node.text, re.UNICODE | re.DOTALL) assert m is not None description = re.sub(r'\s+', ' ', m.group('description').strip()) sold_by = re.sub(r'\s+', ' ', m.group('sold_by').strip()) try: condition = re.sub(r'\s+', ' ', m.group('condition').strip()) except IndexError: condition = None suffix = ' (seller profile)' if sold_by.endswith(suffix): sold_by = sold_by[:-len(suffix)] items.append( Item( quantity=D(m.group('quantity')), description=description, sold_by=sold_by, condition=condition, price=parse_amount(price), )) items_subtotal = parse_amount( get_field_in_table(shipment_table, r'Item\(s\) Subtotal:')) expected_items_subtotal = reduce_amounts( beancount.core.amount.mul(x.price, D(x.quantity)) for x in items) if expected_items_subtotal != items_subtotal: errors.append( 'expected items subtotal is %r, but parsed value is %r' % (expected_items_subtotal, items_subtotal)) output_fields = dict() output_fields['pretax_adjustments'] = get_adjustments_in_table( shipment_table, pretax_adjustment_fields_pattern) output_fields['posttax_adjustments'] = get_adjustments_in_table( shipment_table, posttax_adjustment_fields_pattern) pretax_parts = [items_subtotal] + [ a.amount for a in output_fields['pretax_adjustments'] ] total_before_tax = parse_amount( get_field_in_table(shipment_table, 'Total before tax:')) expected_total_before_tax = reduce_amounts(pretax_parts) if expected_total_before_tax != total_before_tax: errors.append( 'expected total before tax is %s, but parsed value is %s' % (expected_total_before_tax, total_before_tax)) sales_tax = get_adjustments_in_table(shipment_table, 'Sales Tax:') posttax_parts = ( [total_before_tax] + [a.amount for a in sales_tax] + [a.amount for a in output_fields['posttax_adjustments']]) total = parse_amount( get_field_in_table(shipment_table, 'Total for This Shipment:')) expected_total = reduce_amounts(posttax_parts) if expected_total != total: errors.append('expected total is %s, but parsed value is %s' % (expected_total, total)) shipments.append( Shipment(shipped_date=shipped_date, items=items, items_subtotal=items_subtotal, total_before_tax=total_before_tax, tax=sales_tax, total=total, errors=errors, **output_fields)) return shipments
def get_google_finance_latest_price(ticker): """Return the latest price found for the symbol. Args: symbol: An 'ExchangeCode:Symbol' string that is the unambiguous ticker for the particular financial instrument to query. Returns: A pair of a price (a Decimal object) and the actual date of that price (a datetime.datetime instance). """ if ':' in ticker: exchange_code, symbol = ticker.split(':') else: exchange_code, symbol = None, ticker # Build the query. params_dict = { 'q': symbol, 'f': 'd,c', # Date,Close } if exchange_code: params_dict['x'] = exchange_code # Always reach back 5 days in time because of long weekends. if exchange_code in ('MUTF', 'MUTF_CA'): params_dict['p'] = '5d' else: params_dict['p'] = '5d' params_dict['i'] = 300 # secs, to get the most recent. url = 'http://www.google.com/finance/getprices?{}'.format( parse.urlencode(sorted(params_dict.items()))) # Fetch the data. data = request.urlopen(url).read().decode('utf-8') data = parse.unquote(data).strip() # Process the meta-data. metadata = {} lines = data.splitlines() for index, line in enumerate(lines): mo = re.match('([A-Z_+]+)=(.*)$', line) if not mo: break metadata[mo.group(1)] = mo.group(2) else: # No data was found. return None, None interval = int(metadata['INTERVAL']) data_lines = lines[index:] for line in data_lines: if re.match('TIMEZONE_OFFSET', line): continue time_str, price_str = line.split(',') mo = re.match('a(\d+)', time_str) if mo: time_marker = datetime.datetime.fromtimestamp(int(mo.group(1))) time = time_marker else: seconds = int(time_str) * interval time = time_marker + datetime.timedelta(seconds=seconds) price = D(price_str) return (price, time)
def test_precision(self): # Some display context. dcontext = display_context.DisplayContext() dcontext.update(D('111'), 'JPY') dcontext.update(D('1.111'), 'RGAGX') dcontext.update(D('1.11'), 'USD') dformat = dcontext.build() # Input data. itypes = [('number', Decimal), ('amount', amount.Amount), ('position', position.Position), ('inventory', inventory.Inventory)] irows = [[ D(amt.split()[0]), A(amt), position.from_string(amt), inventory.from_string(amt) ] for amt in [ '123.45678909876 JPY', '1.67321232123 RGAGX', '5.67345434543 USD' ]] # First check with no explicit quantization. atypes, arows = numberify.numberify_results(itypes, irows) erows = [[ D('123.45678909876'), None, None, D('123.45678909876'), None, None, D('123.45678909876'), None, None, D('123.45678909876') ], [ D('1.67321232123'), None, D('1.67321232123'), None, None, D('1.67321232123'), None, None, D('1.67321232123'), None ], [ D('5.67345434543'), D('5.67345434543'), None, None, D('5.67345434543'), None, None, D('5.67345434543'), None, None ]] self.assertEqual(erows, arows) # Then compare with quantization. atypes, arows = numberify.numberify_results(itypes, irows, dformat) erows = [[ D('123.45678909876'), None, None, D('123'), None, None, D('123'), None, None, D('123') ], [ D('1.67321232123'), None, D('1.673'), None, None, D('1.673'), None, None, D('1.673'), None ], [ D('5.67345434543'), D('5.67'), None, None, D('5.67'), None, None, D('5.67'), None, None ]] self.assertEqual(erows, arows)
"amount": "" }, ], } with app.test_request_context(): serialised = loads(dumps(serialise(txn))) assert serialised == json_txn @pytest.mark.parametrize( "pos,amount", [ ((A("100 USD"), None, None, None, None), "100 USD"), ( (A("100 USD"), Cost(D("10"), "EUR", None, None), None, None, None), "100 USD {10 EUR}", ), ( ( A("100 USD"), Cost(D("10"), "EUR", None, None), A("11 EUR"), None, None, ), "100 USD {10 EUR} @ 11 EUR", ), ((A("100 USD"), None, A("11 EUR"), None, None), "100 USD @ 11 EUR"), ( (
def _convert_decimal(raw: str) -> Optional[Decimal]: if raw in ("", "--"): return None return D(raw.replace("$", ""))
def parse_arguments(argv, **kwargs): argparser = argparse.ArgumentParser( parents=[reconcile.get_entry_file_selector_argparser(kwargs)]) argparser.add_argument('--journal_input', help='Top-level Beancount input file', required=kwargs.get('journal_input') is None) argparser.add_argument( '--ignored_journal', help='Beancount input file containing ignored entries', required=kwargs.get('ignored_journal') is None) argparser.add_argument('--data_sources', help='Data sources JSON specification', type=json.loads, default=[]) argparser.add_argument( '--ignore_account_for_classification_pattern', help= 'Regular expression matching account names that should be ignored for the purpose of automatic classification. Only transactions with exactly two non-ignored postings are used.', default=training.DEFAULT_IGNORE_ACCOUNT_FOR_CLASSIFICATION_PATTERN) argparser.add_argument( '--log-output', type=str, help='Filename to which log output will be written.') argparser.add_argument( '--account_pattern', type=str, help='Regular expression for limiting accounts to reconcile.') argparser.add_argument('-p', '--port', type=int, default=8101, help='Port on which webserver listens.') argparser.add_argument('-a', '--address', type=str, default='127.0.0.1', help='Address on which webserver listens.') argparser.add_argument('--browser', action='store_true', help='Open a web browser automatically.') argparser.add_argument('-d', '--debug', help='Set log verbosity to DEBUG.', action='store_const', dest='loglevel', const=logging.DEBUG, default=logging.WARNING) argparser.add_argument('-v', '--verbose', help='Set log verbosity to INFO.', action='store_const', dest='loglevel', const=logging.DEBUG) argparser.add_argument( '--fuzzy_match_days', type=int, default=5, help= 'Maximum number of days by which the dates of two matching entries may differ.' ) argparser.add_argument( '--fuzzy_match_amount', type=Decimal, default=D('0.01'), help= 'Maximum amount by which the weights of two matching entries may differ.' ) argparser.add_argument( '--classifier_cache', type=str, help= 'Cache file for automatic account prediction classifier. This speeds up loading.' ) argparser.set_defaults(**kwargs) return argparser.parse_args(argv)
def __init__(self, path): fields = self.fields = get_release_fields(path) self.award_id = fields['Award ID'] self.release_date = dateutil.parser.parse( fields['Release Date']).date() if 'Settlement Date' in fields: self.settlement_date = dateutil.parser.parse( fields['Settlement Date']).date() else: self.settlement_date = None self.symbol = fields['Trading Symbol'] self.path = path self.amount_released = Amount( currency=self.symbol, number=D( fields.get('Quantity Released') or fields.get('Quantity Released / Sold'))) if 'FMV @ Vest / FMV Date' in fields: self.vest_price, self.vest_date = parse_price_and_date( fields['FMV @ Vest / FMV Date']) else: self.vest_price = parse_amount(fields['FMV @ Vest']) self.vest_date = self.release_date self.fee_amount = parse_amount( fields.get('Sup Trn Fee') or fields.get('SuppTranFee')) self.total_tax_amount = parse_amount( fields.get('Total Tax Amount') or fields.get('Total Tax Amount Due')) # positive self.total_release_cost = amount_abs( parse_amount(fields['Total Release Cost'])) self.released_market_value = Amount( round(self.vest_price.number * self.amount_released.number, 2), self.vest_price.currency) self.released_market_value_minus_taxes = Amount( currency=self.total_tax_amount.currency, number=self.released_market_value.number - self.total_tax_amount.number) self.transfer_amount = None self.transfer_description = None for excess_field in [ *(key for key in fields if key.lower().endswith('due to participant')), 'Excess Amount' ]: if excess_field in fields: self.transfer_amount = parse_amount(fields[excess_field]) self.transfer_description = excess_field break net_quantity = fields.get('Net Quantity') if net_quantity is None: self.net_release_shares = None else: #self.share_price = parse_amount(fields['Share']) # Release value after costs self.net_release_shares = Amount(currency=self.symbol, number=D(net_quantity)) net_proceeds = fields.get('Net Proceeds') if net_proceeds is None: self.net_proceeds = None else: self.net_proceeds = parse_amount(net_proceeds) self.sale_price, self.sale_date = parse_price_and_date( fields['WA Sale Price for Quantity Sold/Sale Date']) self.total_proceeds = amount_abs( parse_amount(fields['Sale PricexQuantity Sold'])) # The sale price listed does not have sufficient precision; calculate it from the total instead. self.sale_price = amount_div(self.total_proceeds, self.amount_released.number) capital_gains_number = round( (self.sale_price.number - self.vest_price.number) * self.amount_released.number, 2) self.capital_gains = Amount(capital_gains_number, self.sale_price.currency)
def Panic(self, sale, lots): # OMG, IT is happening!! Doom = [] for idx, row in sale.iterrows(): # continue # debugging currency = row['currency'] currency_IBcommision = row['ibCommissionCurrency'] symbol = row['symbol'] proceeds = amount.Amount(row['proceeds'].__round__(2), currency) commission = amount.Amount((row['ibCommission'].__round__(2)), currency_IBcommision) quantity = amount.Amount(row['quantity'], symbol) price = amount.Amount(row['tradePrice'], currency) text = row['description'] date = row['dateTime'].date() number_per = D(row['tradePrice']) currency_cost = currency # Closed lot rows (potentially multiple) follow sell row lotpostings = [] sum_lots_quantity = 0 # mylots: lots closed by sale 'row' # symbol must match; begin at the row after the sell row # we do not know the number of lot rows; stop iteration if quantity is enough mylots = lots[(lots['symbol'] == row['symbol']) & (lots.index > idx)] for li, clo in mylots.iterrows(): sum_lots_quantity += clo['quantity'] if sum_lots_quantity > -row['quantity']: # oops, too many lots (warning issued below) break cost = position.CostSpec( number_per=0 if self.suppressClosedLotPrice else round( clo['tradePrice'], 2), number_total=None, currency=clo['currency'], date=clo['openDateTime'].date(), label=None, merge=False) lotpostings.append( data.Posting( self.getAssetAccount(symbol), amount.Amount(-clo['quantity'], clo['symbol']), cost, price, None, None)) if sum_lots_quantity == -row['quantity']: # Exact match is expected: # all lots found for this sell transaction break if sum_lots_quantity != -row['quantity']: warnings.warn(f"Lots matching failure: sell index={idx}") postings = [ # data.Posting(self.getAssetAccount(symbol), # this first posting is probably wrong # quantity, None, price, None, None), data.Posting(self.getLiquidityAccount(currency), proceeds, None, None, None, None) ] + \ lotpostings + \ [data.Posting(self.getPNLAccount(symbol), None, None, None, None, None), data.Posting(self.getLiquidityAccount(currency_IBcommision), commission, None, None, None, None), data.Posting(self.getFeesAccount(currency_IBcommision), minus(commission), None, None, None, None) ] Doom.append( data.Transaction( data.new_metadata('Buy', 0), date, self.flag, symbol, # payee ' '.join( ['SELL', quantity.to_string(), '@', price.to_string()]), data.EMPTY_SET, data.EMPTY_SET, postings)) return Doom
def generate_table(self, entries, errors, options_map): ABC = collections.namedtuple('ABC', 'account balance') return table.create_table( [ABC('account1', D(2000)), ABC('account2', D(5000))])
def get_txns_from_text(self, corpus, f): """ BASIC CARD - CHEONG YIU FUNG 4518-3545-XXXX-XXXX 22 SEP 23 SEP 7-ELEVEN, HK (1535) SHATIN HK 13.50 24 SEP 25 SEP THE H.K. MI-HOME HONG KONG HK 219.00 24 SEP 26 SEP MCDONALD'S-102-FULL WI HONG KONG HK 53.50 26 SEP 27 SEP TSUI WAH RESTAURANT MONG KOK HK 119.00 Observations: 1) New transaction starts at lines with a new transaction date 2) Amount is at the same line of new transaction """ statement_date = self.file_date(f) def is_useful_lines(line): # Skip useless lines. It's either the OPENING BALANCE, or the line # that indicates beginning of transactions, which starts with # account number line = line.strip() return (not line.startswith("SUBTOTAL")) and ( not line.startswith("ODD CENTS")) and ( not line.startswith("PREVIOUS BALANCE")) and ( not line.startswith("BASIC CARD")) lines = corpus.split('\n') lines = list(filter(is_useful_lines, lines)) # Hmm. Next is a terrible trick here where we reconstruct the lines so # that dates could be aligned. See magic-number-master-power.png. (DBS # has similar issues) lines = [' '.join(l[:36].split()) + l[36:] for l in lines] # Remove empty strings '' from list lines = list(filter(None, [l.rstrip() for l in lines])) if self.debug: print("Actual Parsed lines:") print('\n'.join(lines)) print("padwidth: {}".format(self.pad_width)) print("Account: {}".format(self.file_account(f))) # Prepare variables entries = [] narration = '' # Initialize narration for line_no in range(len(lines)): line = lines[line_no] if self.debug: print("Line: {}".format(line)) # If starts with a digit, it indicates a date line so we parse it if line[0].isdigit(): str_txn_date, str_post_date, description, str_amount = [ x.decode().strip() for x in struct.unpack( self.unpack_format, str.encode(line.ljust(self.pad_width))) ] if self.debug: print( "{0: >10} {1: >10} description {2: >20} amount {3: >15}" .format(str_txn_date, str_post_date, description, str_amount)) post_date = datetime.strptime(str_post_date, "%d %b") txn_date = datetime.strptime(str_txn_date, "%d %b") # Cross-year handling if statement_date.month == 1 and post_date.month == 12: post_date = post_date.replace(year=statement_date.year - 1).date() txn_date = txn_date.replace(year=statement_date.year - 1).date() else: post_date = post_date.replace( year=statement_date.year).date() txn_date = txn_date.replace( year=statement_date.year).date() amount = str_amount.replace(",", "") txn_amount = D( amount[:-2]) if amount[-2:] == 'CR' else -D(amount) if line[0].isdigit(): # If it's a transaction line, description is extracted narration = ' '.join( [narration, ' '.join(description.split())]) else: # Otherwise, the whole line is description narration = ' '.join( [narration, ' '.join(line.strip().split())]) # Only create the new transaction when we see the next line starts # with a digit (indicating that's next transaction), or when we're # at the last line already. if line_no == (len(lines) - 1) or lines[line_no + 1][0].isdigit(): entries.append( self.create_txn(f, line_no, narration.strip(), post_date, txn_date, txn_amount)) narration = '' # Reset title for next transaction return entries
def test_net_worth(example_ledger): data = example_ledger.charts.net_worth(Interval.MONTH) assert data[-18]['date'] == datetime.date(2015, 1, 1) assert data[-18]['balance']['USD'] == D('39125.34004') assert data[-1]['date'] == datetime.date(2016, 5, 10) assert data[-1]['balance']['USD'] == D('102327.53144')
def fetch_balance(self, name, item, access_token): try: response = self.client.Accounts.get(access_token) except plaid.errors.PlaidError as e: logging.warning("Plaid error: %s", e.message) return if self.args.debug: pretty_print_stderr(response) if "accounts" not in response: logging.warning("No accounts, aborting") return assert "accounts" in item for account_def in item["accounts"]: if account_def["sync"] != "balance": continue # checking for every configured account in the response account_res = next( filter( lambda tacc: account_def["id"] == tacc["account_id"], response["accounts"], ), None, ) if account_res is None: logging.warning("Not present in response: %s", account_def["name"]) continue assert "balances" in account_res assert (account_def["currency"] == account_res["balances"] ["iso_currency_code"]) if ("current" not in account_res["balances"] or account_res["balances"]["current"] is None): logging.warning("No 'current' account balance, aborting") continue bal = D(account_res["balances"]["current"]) # sadly, plaid-python parses as `float` https://github.com/plaid/plaid-python/issues/136 bal = round(bal, 2) if account_res["type"] in {"credit", "loan"}: # the balance is a liability in the case of credit cards, and loans # https://plaid.com/docs/#account-types bal = -bal meta = data.new_metadata("foo", 0) balance_entry = Balance( # pylint: disable=not-callable meta=meta, date=date.today(), account=account_def["name"], amount=Amount(bal, account_def["currency"]), tolerance=None, diff_amount=None, ) ledger = [] ledger.append(self.pad(meta, account_def["name"])) ledger.append(balance_entry) if self.output_mode == "text": print( f"; = {account_def['name']}, {account_def['currency']} =") for entry in ledger: out = printer.format_entry(entry) print(out) else: assert self.output_mode == "db" self.output[account_def["name"]] = ledger logging.info("Done %s", name) if self.output_mode == "text": print() # newline
def fetch_transactions(self, name, item, access_token): # Pull transactions for the last 30 days start_date = "{:%Y-%m-%d}".format(datetime.now() + timedelta(days=-self.args.days)) end_date = "{:%Y-%m-%d}".format(datetime.now()) # the transactions in the response are paginated, so make multiple calls while increasing the offset to # retrieve all transactions transactions = [] total_transactions = 1 first_response = None while len(transactions) < total_transactions: try: response = self.client.Transactions.get( access_token, start_date, end_date, offset=len(transactions)) except plaid.errors.PlaidError as e: logging.warning("Plaid error: %s", e.message) return transactions.extend(response["transactions"]) if first_response is None: first_response = response total_transactions = response["total_transactions"] if self.args.debug: pretty_print_stderr(response) if "accounts" not in first_response: logging.warning("No accounts, aborting") return assert "accounts" in item for account in item["accounts"]: if account["sync"] != "transactions": continue currency = account["currency"] # checking for every configured account in the response t_account = next( filter( lambda tacc: account["id"] == tacc["account_id"], first_response["accounts"], ), None, ) if t_account is None: logging.warning("Not present in response: %s", account["name"]) continue ledger = [] for transaction in transactions: if account["id"] != transaction["account_id"]: continue assert currency == transaction["iso_currency_code"] if transaction["pending"]: # we want to wait for the transaction to be posted continue amount = D(transaction["amount"]) # sadly, plaid-python parses as `float` https://github.com/plaid/plaid-python/issues/136 amount = round(amount, 2) posting = Posting(account["name"], Amount(-amount, currency), None, None, None, None) ref = data.new_metadata("foo", 0) entry = Transaction( # pylint: disable=not-callable meta=ref, date=date.fromisoformat(transaction["date"]), flag=flags.FLAG_OKAY, payee=transaction["name"], narration="", tags=data.EMPTY_SET, links=data.EMPTY_SET, postings=[posting], ) ledger.append(entry) ledger.reverse( ) # API returns transactions in reverse chronological order if self.output_mode == "text": # print entries to stdout print("; = {}, {} =".format(account["name"], currency)) print("; {} transactions\n".format(len(ledger))) # flag the duplicates self.annotate_duplicate_entries(ledger) # add the balance directive if "current" in t_account["balances"]: bal = D(t_account["balances"]["current"]) # sadly, plaid-python parses as `float` https://github.com/plaid/plaid-python/issues/136 bal = round(bal, 2) if t_account["type"] in {"credit", "loan"}: # the balance is a liability in the case of credit cards, and loans # https://plaid.com/docs/#account-types bal = -bal if t_account["balances"]["current"] != None: meta = data.new_metadata("foo", 0) entry = Balance( # pylint: disable=not-callable meta=meta, date=date.today(), account=account["name"], amount=Amount(bal, currency), tolerance=None, diff_amount=None, ) ledger.append(entry) if self.output_mode == "db": # write the account's ledger to intermediate output, pickled file self.output[account["name"]] = ledger else: assert self.output_mode == "text" # print out all the entries for entry in ledger: out = printer.format_entry(entry) if DUPLICATE_META in entry.meta: out = textwrap.indent(out, "; ") print(out) logging.info("Done %s", name) if self.output_mode == "text": print() # newline
def test_nones(self): rdr = self.get(None, D('0.1234'), None) self.assertEqual('1 ', rdr.format(D('1'))) self.assertEqual(' ', rdr.format(None))
def test_round_to(self): self.assertEqual(D('135.12'), number.round_to(D('135.12345'), D('0.01'))) self.assertEqual(D('135.12'), number.round_to(D('135.12987'), D('0.01'))) self.assertEqual(D('-135.12'), number.round_to(D('-135.12345'), D('0.01'))) self.assertEqual(D('-135.12'), number.round_to(D('-135.12987'), D('0.01'))) self.assertEqual(D('130'), number.round_to(D('135.12345'), D('10'))) self.assertEqual(D('130'), number.round_to(D('135.12987'), D('10'))) self.assertEqual(D('-130'), number.round_to(D('-135.12345'), D('10'))) self.assertEqual(D('-130'), number.round_to(D('-135.12987'), D('10')))
def setUp(self): self.dcontext = display_context.DisplayContext() self.dcontext.update(D('1.00'), 'USD') self.dcontext.update(D('1.00'), 'CAD')
def test_same_sign(self): self.assertTrue(number.same_sign(D('135.12345'), D('234.20'))) self.assertFalse(number.same_sign(D('135.12345'), D('-234.20'))) self.assertFalse(number.same_sign(D('-135.12345'), D('234.20'))) self.assertTrue(number.same_sign(D('-135.12345'), D('-234.20'))) self.assertTrue(number.same_sign(D('135.12345'), ZERO)) self.assertTrue(number.same_sign(ZERO, D('135.12345'))) self.assertTrue(number.same_sign(ZERO, ZERO)) self.assertFalse(number.same_sign(D('-135.12345'), ZERO)) self.assertFalse(number.same_sign(ZERO, D('-135.12345')))
def test_expr_constant(self): self.assertEqual(qc.EvalConstant(D(17)), qc.compile_expression(qp.Constant(D(17)), qe.TargetsEnvironment()))
def test_integer(self): rdr = self.get(D('1')) self.assertEqual('2', rdr.format(D('2')))
def _make_import_result(self, txn_id: str, data: Dict[str, Any], json_path: str): if data.get('status') == 'PENDING': return None date = dateutil.parser.parse(data['date']).date() payee = data['counterparty']['name'] narration = data['transactionType'] txn_meta = collections.OrderedDict() # type: Meta counterparty_metadata = [(self.prefix + '_counterparty', payee)] funding_source_metadata = [] # type: List[Tuple[str, Any]] for key in ('email', 'url', 'phone'): if key in data['counterparty']: value = data['counterparty'][key] if not value: continue counterparty_metadata.append((self.prefix + '_counterparty_' + key, value)) if 'merchantCategory' in data: counterparty_metadata.append((self.prefix + '_merchant_category', data['merchantCategory'])) if 'invoiceId' in data: counterparty_metadata.append((self.prefix + '_invoice_id', data['invoiceId'])) if 'notesInfo' in data: note = data['notesInfo']['note'] note = re.sub(r'\s+', ' ', note) narration += ' - ' + note counterparty_metadata.append((self.prefix + '_note', note)) transaction = Transaction( meta=txn_meta, date=date, flag='*', payee=payee, narration=narration, links=frozenset([self.link_prefix + txn_id]), tags=EMPTY_SET, postings=[]) is_credit = data['isCredit'] counterparty_amount = parse_amount(data['amount']['grossAmount']) funding_source_amount = parse_amount(data['amount']['netAmount']) fee_amount = parse_amount(data['amount']['feeAmount']) transaction_type_enum = data['transactionTypeEnum'] # Metadata added to postings to the `self.assets_account` account. assets_account_metadata = [ (self.transaction_meta_key, txn_id), (POSTING_DATE_KEY, date), ] # If True, the posting legs to the funding source are negative, and all # other legs are positive. negate_funding_source_amounts = True counterparty_remainder_account = FIXME_ACCOUNT + ':A' if transaction_type_enum == 'TRANSFER_TO_BANK': counterparty_remainder_account = self.assets_account counterparty_metadata = [] negate_funding_source_amounts = False elif transaction_type_enum == 'TRANSFER_FROM_BANK': counterparty_remainder_account = self.assets_account counterparty_metadata = [] negate_funding_source_amounts = True elif transaction_type_enum.endswith('_SENT') or transaction_type_enum.endswith('_PURCHASE'): negate_funding_source_amounts = True elif transaction_type_enum.endswith('_RECEIVED'): negate_funding_source_amounts = False elif transaction_type_enum == 'MONEY_TRANSFER': counterparty_remainder_account = self.assets_account funding_source_metadata = counterparty_metadata counterparty_metadata = [] negate_funding_source_amounts = is_credit elif transaction_type_enum == 'REFUND': negate_funding_source_amounts = False elif transaction_type_enum == 'EBAY_SALE': negate_funding_source_amounts = False elif transaction_type_enum == 'INVOICE_PAID': pass else: raise RuntimeError('Unknown transaction type: %s' % transaction_type_enum) negate_counterparty_amounts = not negate_funding_source_amounts if negate_funding_source_amounts: funding_source_amount = -funding_source_amount else: funding_source_amount = funding_source_amount counterparty_inventory = SimpleInventory() counterparty_inventory += counterparty_amount def add_counterparty_posting(amount, extra_meta=[], account=FIXME_ACCOUNT + ':A'): nonlocal counterparty_inventory if amount.number == ZERO: return counterparty_inventory -= amount if negate_counterparty_amounts: amount = -amount meta = counterparty_metadata + extra_meta if account == self.assets_account: meta.extend(assets_account_metadata) transaction.postings.append( Posting( meta=collections.OrderedDict(meta), account=account, units=amount, cost=None, price=None, flag=None, )) if fee_amount.number != ZERO: if negate_counterparty_amounts and transaction_type_enum != 'EBAY_SALE': amount = -fee_amount else: amount = fee_amount transaction.postings.append( Posting( meta=collections.OrderedDict(), account=self.fee_account, units=amount, cost=None, price=None, flag=None, )) if 'itemDetails' in data: for item in data['itemDetails']['itemList']: units = parse_amount(item['itemTotalPrice']) if 'quantity' in item: quantity = D(item['quantity']) else: quantity = None if units.number == ZERO and 'price' in item: units = parse_amount(item['price']) extra_meta = [ (self.prefix + '_item_name', item['name']), ] for key in ('url', 'number', 'description'): value = item.get(key, None) if value: extra_meta.append((self.prefix + '_item_%s' % key, value)) if quantity is not None: extra_meta.append((self.prefix + '_item_quantity', quantity)) add_counterparty_posting(amount=units, extra_meta=extra_meta) if 'discounts' in item: for discount in item['discounts']: units = -parse_amount(discount['price']) add_counterparty_posting(amount=units, extra_meta=[ (self.prefix + '_item_discount', discount['name']), ]) for key in ('salesTax', 'shippingAmount'): if key in data['itemDetails']: units = parse_amount(data['itemDetails'][key]) add_counterparty_posting(amount=units, extra_meta=[ (self.prefix + '_item_type', key), ]) if 'discount' in data['itemDetails']: for discount in data['itemDetails']['discount']: units = -parse_amount(discount['value']) add_counterparty_posting(amount=units, extra_meta=[ (self.prefix + '_item_discount', discount['name']), ]) counterparty_inventory_copy = counterparty_inventory.copy() for currency in counterparty_inventory_copy: add_counterparty_posting( Amount( currency=currency, number=counterparty_inventory_copy[currency]), account=counterparty_remainder_account) funding_source_inventory = SimpleInventory() funding_source_inventory += funding_source_amount funding_source_account = FIXME_ACCOUNT if transaction_type_enum in ('SEND_MONEY_RECEIVED', 'EBAY_SALE'): funding_source_account = self.assets_account assert 'fundingSource' not in data funding_source_metadata = assets_account_metadata if 'fundingSource' in data: for source in data['fundingSource']['fundingSourceList']: meta = collections.OrderedDict() # type: Meta account = FIXME_ACCOUNT source_type = source['type'] if (source_type == 'BALANCE' or (transaction_type_enum == 'SEND_MONEY_SENT' and source_type != 'CREDIT_CARD')): # For SEND_MONEY_SENT, sources other than CREDIT_CARD # are actually handled by a separate transfer transaction. account = self.assets_account meta.update(assets_account_metadata) else: for key, meta_suffix in [ ('issuer_product_description', 'funding_source_description'), ('institution', 'funding_source_institution'), ('last4', 'funding_source_last4'), ]: if key in source: meta[self.prefix + '_' + meta_suffix] = source[key] # FIXME handle currencyCode units = parse_amount(source['amount']) if negate_funding_source_amounts: units = -units funding_source_inventory -= units transaction.postings.append( Posting( meta=meta, account=account, units=units, cost=None, price=None, flag=None, )) for currency in funding_source_inventory: transaction.postings.append( Posting( account=funding_source_account, units=Amount(currency=currency, number=funding_source_inventory[currency]), cost=None, price=None, flag=None, meta=collections.OrderedDict(funding_source_metadata), )) return ImportResult( date=transaction.date, info=dict( type='application/json', filename=json_path, ), entries=[transaction])
def test_integers(self): rdr = self.get(D('1'), D('222'), D('33')) self.assertEqual('444', rdr.format(D('444')))
serialised = loads(dumps(serialise(txn))) assert serialised == json_txn txn = txn._replace(payee=None) serialised = loads(dumps(serialise(txn))) assert serialised == json_txn @pytest.mark.parametrize( "amount_cost_price,amount_string", [ ((A("100 USD"), None, None), "100 USD"), ( ( A("100 USD"), CostSpec(D("10"), None, "EUR", None, None, False), None, ), "100 USD {10 EUR}", ), ( ( A("100 USD"), CostSpec(D("10"), None, "EUR", None, None, False), A("11 EUR"), ), "100 USD {10 EUR} @ 11 EUR", ), ((A("100 USD"), None, A("11 EUR")), "100 USD @ 11 EUR"), ( (
def test_fractional(self): rdr = self.get(D('1.23'), D('1.2345'), D('2.345')) self.assertEqual('1 ', rdr.format(D('1'))) self.assertEqual('2.3456', rdr.format(D('2.34567890')))
def test_parse_number(): assert parse_number("5/2") == D("2.5") assert parse_number("5") == D("5") assert parse_number("12.345") == D("12.345")
def test_mixed(self): rdr = self.get(D('1000'), D('0.12334')) self.assertEqual(' 1 ', rdr.format(D('1')))
def test_insert_entry_transaction(tmpdir): file_content = dedent( """ 2016-02-26 * "Uncle Boons" "Eating out alone" Liabilities:US:Chase:Slate -24.84 USD Expenses:Food:Restaurant 24.84 USD """ ) samplefile = tmpdir.mkdir('fava_util_file3').join('example.beancount') samplefile.write(file_content) postings = [ data.Posting( 'Liabilities:US:Chase:Slate', amount.Amount(D('-10.00'), 'USD'), None, None, None, None, ), data.Posting( 'Expenses:Food', amount.Amount(D('10.00'), 'USD'), None, None, None, None, ), ] transaction = data.Transaction( None, datetime.date(2016, 1, 1), '*', 'new payee', 'narr', None, None, postings, ) insert_entry(transaction, [str(samplefile)], {}) assert samplefile.read() == dedent( """ 2016-02-26 * "Uncle Boons" "Eating out alone" Liabilities:US:Chase:Slate -24.84 USD Expenses:Food:Restaurant 24.84 USD 2016-01-01 * "new payee" "narr" Liabilities:US:Chase:Slate -10.00 USD Expenses:Food 10.00 USD """ ) options = [ InsertEntryOption( datetime.date(2015, 1, 1), re.compile('.*:Food'), str(samplefile), 2, ), InsertEntryOption( datetime.date(2015, 1, 2), re.compile('.*:FOOO'), str(samplefile), 2, ), InsertEntryOption( datetime.date(2017, 1, 1), re.compile('.*:Food'), str(samplefile), 6, ), ] insert_entry(transaction, [str(samplefile)], {'insert-entry': options}) assert samplefile.read() == dedent( """ 2016-01-01 * "new payee" "narr" Liabilities:US:Chase:Slate -10.00 USD Expenses:Food 10.00 USD 2016-02-26 * "Uncle Boons" "Eating out alone" Liabilities:US:Chase:Slate -24.84 USD Expenses:Food:Restaurant 24.84 USD 2016-01-01 * "new payee" "narr" Liabilities:US:Chase:Slate -10.00 USD Expenses:Food 10.00 USD """ ) options = [ InsertEntryOption( datetime.date(2015, 1, 1), re.compile('.*:Slate'), str(samplefile), 5, ), InsertEntryOption( datetime.date(2015, 1, 2), re.compile('.*:FOOO'), str(samplefile), 2, ), ] insert_entry(transaction, [str(samplefile)], {'insert-entry': options}) assert samplefile.read() == dedent( """ 2016-01-01 * "new payee" "narr" Liabilities:US:Chase:Slate -10.00 USD Expenses:Food 10.00 USD 2016-01-01 * "new payee" "narr" Liabilities:US:Chase:Slate -10.00 USD Expenses:Food 10.00 USD 2016-02-26 * "Uncle Boons" "Eating out alone" Liabilities:US:Chase:Slate -24.84 USD Expenses:Food:Restaurant 24.84 USD 2016-01-01 * "new payee" "narr" Liabilities:US:Chase:Slate -10.00 USD Expenses:Food 10.00 USD """ )
def test_zero_integers(self): rdr = self.get(D('0.1234')) self.assertEqual('1 ', rdr.format(D('1')))
def t_DECIMAL(self, token): r"([0-9]+\.[0-9]*|[0-9]*\.[0-9]+)" token.value = D(token.value) return token
def test_reduce_relative(self): # Test with a few different cost currencies. test_holdings = list(itertools.starmap(holdings.Holding, [ (None, D('1'), 'BLA', D('200'), 'USD', D('10'), D('1000'), D('1100'), None), (None, D('1'), 'BLA', D('200'), 'USD', D('10'), D('3000'), D('300'), None), (None, D('1'), 'BLA', D('200'), 'CAD', D('10'), D('500'), D('600'), None), ])) converted_holdings = holdings.reduce_relative(test_holdings) expected_holdings = list(itertools.starmap(holdings.Holding, [ (None, D('1'), 'BLA', D('200'), 'USD', D('0.5'), D('0.75'), D('300'), None), (None, D('1'), 'BLA', D('200'), 'USD', D('0.5'), D('0.25'), D('1100'), None), (None, D('1'), 'BLA', D('200'), 'CAD', D('1'), D('1'), D('600'), None), ])) self.assertEqual(expected_holdings, converted_holdings) # Test with a single cost currency (and some Nones), ensure the total is 100%. test_holdings = list(itertools.starmap(holdings.Holding, [ (None, D('1'), 'BLA', D('200'), 'USD', D('10'), D('1000'), D('1100'), None), (None, D('1'), 'BLA', D('200'), 'USD', D('10'), D('3000'), D('300'), None), (None, D('1'), 'BLA', D('200'), None, None, None, D('600'), None), ])) converted_holdings = holdings.reduce_relative(test_holdings) expected_holdings = list(itertools.starmap(holdings.Holding, [ (None, D('1'), 'BLA', D('200'), 'USD', D('0.5'), D('0.75'), D('300'), None), (None, D('1'), 'BLA', D('200'), 'USD', D('0.5'), D('0.25'), D('1100'), None), (None, D('1'), 'BLA', D('200'), None, None, None, D('600'), None), ])) self.assertEqual(expected_holdings, converted_holdings) self.assertEqual(D('1'), sum(holding.market_value or ZERO for holding in converted_holdings))