def compose(db, source, order_match_id): tx0_hash, tx1_hash = order_match_id[:64], order_match_id[ 64:] # UTF-8 encoding means that the indices are doubled. destination, btc_quantity, escrowed_asset, escrowed_quantity, order_match, problems = validate( db, source, order_match_id, util.last_block(db)['block_index']) if problems: raise exceptions.ComposeError(problems) # Warn if down to the wire. time_left = order_match['match_expire_index'] - util.last_block( db)['block_index'] if time_left < 4: logging.warning( 'WARNING: Only {} blocks until that order match expires. The payment might not make into the blockchain in time.' .format(time_left)) if 10 - time_left < 4: logging.warning( 'WARNING: Order match has only {} confirmation(s).'.format( 10 - time_left)) tx0_hash_bytes, tx1_hash_bytes = binascii.unhexlify( bytes(tx0_hash, 'utf-8')), binascii.unhexlify(bytes(tx1_hash, 'utf-8')) data = struct.pack(config.TXTYPE_FORMAT, ID) data += struct.pack(FORMAT, tx0_hash_bytes, tx1_hash_bytes) return (source, [(destination, btc_quantity)], data)
def compose (db, source, quantity_per_unit, asset, dividend_asset): dividend_total, outputs, problems, fee = validate(db, source, quantity_per_unit, asset, dividend_asset, util.last_block(db)['block_index']) if problems: raise exceptions.ComposeError(problems) logging.info('Total quantity to be distributed in dividends: {} {}'.format(util.devise(db, dividend_total, dividend_asset, 'output'), dividend_asset)) if dividend_asset == config.BTC: return (source, [(output['address'], output['dividend_quantity']) for output in outputs], None) asset_id = util.get_asset_id(db, asset, util.last_block(db)['block_index']) dividend_asset_id = util.get_asset_id(db, dividend_asset, util.last_block(db)['block_index']) data = struct.pack(config.TXTYPE_FORMAT, ID) data += struct.pack(FORMAT_2, quantity_per_unit, asset_id, dividend_asset_id) return (source, [], data)
def compose (db, source, quantity_per_unit, asset, dividend_asset): dividend_total, outputs, problems, fee = validate(db, source, quantity_per_unit, asset, dividend_asset, util.last_block(db)['block_index']) if problems: raise exceptions.ComposeError(problems) logging.info('Total quantity to be distributed in dividends: {} {}'.format(util.devise(db, dividend_total, dividend_asset, 'output'), dividend_asset)) if dividend_asset == config.BTC: return (source, [(output['address'], output['dividend_quantity']) for output in outputs], None) asset_id = util.get_asset_id(asset, util.last_block(db)['block_index']) dividend_asset_id = util.get_asset_id(dividend_asset, util.last_block(db)['block_index']) data = struct.pack(config.TXTYPE_FORMAT, ID) data += struct.pack(FORMAT_2, quantity_per_unit, asset_id, dividend_asset_id) return (source, [], data)
def get_running_info(): latestBlockIndex = backend.getblockcount(self.proxy) try: check.database_state(db, latestBlockIndex) except exceptions.DatabaseError: caught_up = False else: caught_up = True try: last_block = util.last_block(db) except: last_block = {'block_index': None, 'block_hash': None, 'block_time': None} try: last_message = util.last_message(db) except: last_message = None return { 'db_caught_up': caught_up, 'bitcoin_block_count': latestBlockIndex, 'last_block': last_block, 'last_message_index': last_message['message_index'] if last_message else -1, 'running_testnet': config.TESTNET, 'running_testcoin': config.TESTCOIN, 'version_major': config.VERSION_MAJOR, 'version_minor': config.VERSION_MINOR, 'version_revision': config.VERSION_REVISION }
def market (give_asset, get_asset): # Your Pending Orders Matches. addresses = [] for bunch in bitcoin.get_wallet(): addresses.append(bunch[:2][0]) filters = [ ('tx0_address', 'IN', addresses), ('tx1_address', 'IN', addresses) ] awaiting_btcs = util.api('get_order_matches', {'filters': filters, 'filterop': 'OR', 'status': 'pending'}) table = PrettyTable(['Matched Order ID', 'Time Left']) for order_match in awaiting_btcs: order_match = format_order_match(db, order_match) table.add_row(order_match) print('Your Pending Order Matches') print(table) print('\n') # Open orders. orders = util.api('get_orders', {'status': 'open'}) table = PrettyTable(['Give Quantity', 'Give Asset', 'Price', 'Price Assets', 'Required BTC Fee', 'Provided BTC Fee', 'Time Left', 'Tx Hash']) for order in orders: if give_asset and order['give_asset'] != give_asset: continue if get_asset and order['get_asset'] != get_asset: continue order = format_order(order) table.add_row(order) print('Open Orders') table = table.get_string(sortby='Price') print(table) print('\n') # Open bets. bets = util.api('get_bets', {'status': 'open'}) table = PrettyTable(['Bet Type', 'Feed Address', 'Deadline', 'Target Value', 'Leverage', 'Wager', 'Odds', 'Time Left', 'Tx Hash']) for bet in bets: bet = format_bet(bet) table.add_row(bet) print('Open Bets') print(table) print('\n') # Feeds broadcasts = util.api('get_broadcasts', {'status': 'valid'}) table = PrettyTable(['Feed Address', 'Timestamp', 'Text', 'Value', 'Fee Fraction']) seen_addresses = [] for broadcast in broadcasts: # Only show feeds with broadcasts in the last two weeks. last_block_time = util.last_block(db)['block_time'] if broadcast['timestamp'] + config.TWO_WEEKS < last_block_time: continue # Always show only the latest broadcast from a feed address. if broadcast['source'] not in seen_addresses: feed = format_feed(broadcast) table.add_row(feed) seen_addresses.append(broadcast['source']) else: continue print('Feeds') print(table)
def compose(db, source, asset, quantity, tag): validate(db, source, None, asset, quantity, util.last_block(db)['block_index']) data = pack(asset, quantity) return (source, [], data)
def run(self): logger.debug('Starting API Status Poller.') global current_api_status_code, current_api_status_response_json db = database.get_connection(integrity_check=False) while self.stop_event.is_set() != True: try: # Check version. if time.time() - self.last_version_check >= 60 * 60: # One hour since last check. logger.debug('Checking version.') code = 10 check.version(util.last_block(db)['block_index']) self.last_version_check = time.time() # Check that bitcoind is running, communicable, and caught up with the blockchain. # Check that the database has caught up with bitcoind. if time.time() - self.last_database_check > 10 * 60: # Ten minutes since last check. code = 11 logger.debug('Checking backend state.') check.backend_state(self.proxy) code = 12 logger.debug('Checking database state.') check.database_state(db, backend.getblockcount(self.proxy)) self.last_database_check = time.time() except (check.VersionError, check.BackendError, exceptions.DatabaseError) as e: exception_name = e.__class__.__name__ exception_text = str(e) logger.debug("API Status Poller: %s", exception_text) jsonrpc_response = jsonrpc.exceptions.JSONRPCServerError(message=exception_name, data=exception_text) current_api_status_code = code current_api_status_response_json = jsonrpc_response.json.encode() else: current_api_status_code = None current_api_status_response_json = None time.sleep(config.BACKEND_POLL_INTERVAL)
def database_state(db, blockcount): """Checks {} database to see if is caught up with backend.""".format( config.XCP_NAME) if util.last_block(db)['block_index'] + 1 < blockcount: raise exceptions.DatabaseError('{} database is behind backend.'.format( config.XCP_NAME)) logger.debug('Database state check passed.') return
def compose(db, source, destination, asset, quantity): if asset == config.BTC: return (source, [(destination, quantity)], None) validate(db, source, destination, asset, quantity, util.last_block(db)['block_index']) data = pack(asset, quantity) return (source, [(destination, None)], data)
def compose (db, source, fraction, asset): call_price, callback_total, outputs, problems = validate(db, source, fraction, asset, util.last_block(db)['block_time'], util.last_block(db)['block_index'], parse=False) if problems: raise exceptions.ComposeError(problems) logging.info('Total quantity to be called back: {} {}'.format(util.devise(db, callback_total, asset, 'output'), asset)) asset_id = util.get_asset_id(asset, util.last_block(db)['block_index']) data = struct.pack(config.TXTYPE_FORMAT, ID) data += struct.pack(FORMAT, fraction, asset_id) return (source, [], data)
def compose (db, source, fraction, asset): call_price, callback_total, outputs, problems = validate(db, source, fraction, asset, util.last_block(db)['block_time'], util.last_block(db)['block_index'], parse=False) if problems: raise exceptions.ComposeError(problems) logging.info('Total quantity to be called back: {} {}'.format(util.devise(db, callback_total, asset, 'output'), asset)) asset_id = util.get_asset_id(db, asset, util.last_block(db)['block_index']) data = struct.pack(config.TXTYPE_FORMAT, ID) data += struct.pack(FORMAT, fraction, asset_id) return (source, [], data)
def compose (db, source, give_asset, give_quantity, get_asset, get_quantity, expiration, fee_required): cursor = db.cursor() # Check balance. if give_asset != config.BTC: balances = list(cursor.execute('''SELECT * FROM balances WHERE (address = ? AND asset = ?)''', (source, give_asset))) if (not balances or balances[0]['quantity'] < give_quantity): raise exceptions.ComposeError('insufficient funds') problems = validate(db, source, give_asset, give_quantity, get_asset, get_quantity, expiration, fee_required, util.last_block(db)['block_index']) if problems: raise exceptions.ComposeError(problems) give_id = util.get_asset_id(db, give_asset, util.last_block(db)['block_index']) get_id = util.get_asset_id(db, get_asset, util.last_block(db)['block_index']) data = struct.pack(config.TXTYPE_FORMAT, ID) data += struct.pack(FORMAT, give_id, give_quantity, get_id, get_quantity, expiration, fee_required) cursor.close() return (source, [], data)
def unpack(data_hex): data = binascii.unhexlify(data_hex) message_type_id = struct.unpack(config.TXTYPE_FORMAT, data[:4])[0] message = data[4:] for message_type in API_TRANSACTIONS: if message_type_id == sys.modules['lib.messages.{}'.format(message_type)].ID: unpack_method = sys.modules['lib.messages.{}'.format(message_type)].unpack unpacked = unpack_method(db, message, util.last_block(db)['block_index']) return message_type_id, unpacked
def market (give_asset, get_asset): # Your Pending Orders Matches. awaiting_btcs = util.get_order_matches(db, status='pending', is_mine=True) table = PrettyTable(['Matched Order ID', 'Time Left']) for order_match in awaiting_btcs: order_match = format_order_match(db, order_match) table.add_row(order_match) print('Your Pending Order Matches') print(table) print('\n') # Open orders. orders = util.get_orders(db, status='valid', show_expired=False, show_empty=False) table = PrettyTable(['Give Quantity', 'Give Asset', 'Price', 'Price Assets', 'Required BTC Fee', 'Provided BTC Fee', 'Time Left', 'Tx Hash']) for order in orders: if give_asset and order['give_asset'] != give_asset: continue if get_asset and order['get_asset'] != get_asset: continue order = format_order(order) table.add_row(order) print('Open Orders') table = table.get_string(sortby='Price') print(table) print('\n') # Open bets. bets = util.get_bets(db, status='valid', show_empty=False) table = PrettyTable(['Bet Type', 'Feed Address', 'Deadline', 'Target Value', 'Leverage', 'Wager', 'Odds', 'Time Left', 'Tx Hash']) for bet in bets: bet = format_bet(bet) table.add_row(bet) print('Open Bets') print(table) print('\n') # Feeds broadcasts = util.get_broadcasts(db, status='valid', order_by='timestamp', order_dir='desc') table = PrettyTable(['Feed Address', 'Timestamp', 'Text', 'Value', 'Fee Fraction']) seen_addresses = [] for broadcast in broadcasts: # Only show feeds with broadcasts in the last two weeks. last_block_time = util.last_block(db)['block_time'] if broadcast['timestamp'] + config.TWO_WEEKS < last_block_time: continue # Always show only the latest broadcast from a feed address. if broadcast['source'] not in seen_addresses: feed = format_feed(broadcast) table.add_row(feed) seen_addresses.append(broadcast['source']) else: continue print('Feeds') print(table)
def market (give_asset, get_asset): # Your Pending Orders Matches. awaiting_btcs = util.get_order_matches(db, status='pending', is_mine=True) table = PrettyTable(['Matched Order ID', 'Time Left']) for order_match in awaiting_btcs: order_match = format_order_match(db, order_match) table.add_row(order_match) print('Your Pending Order Matches') print(table) print('\n') # Open orders. orders = util.get_orders(db, status='open', show_expired=False) table = PrettyTable(['Give Quantity', 'Give Asset', 'Price', 'Price Assets', 'Required BTC Fee', 'Provided BTC Fee', 'Time Left', 'Tx Hash']) for order in orders: if give_asset and order['give_asset'] != give_asset: continue if get_asset and order['get_asset'] != get_asset: continue order = format_order(order) table.add_row(order) print('Open Orders') table = table.get_string(sortby='Price') print(table) print('\n') # Open bets. bets = util.get_bets(db, status='open') table = PrettyTable(['Bet Type', 'Feed Address', 'Deadline', 'Target Value', 'Leverage', 'Wager', 'Odds', 'Time Left', 'Tx Hash']) for bet in bets: bet = format_bet(bet) table.add_row(bet) print('Open Bets') print(table) print('\n') # Feeds broadcasts = util.get_broadcasts(db, status='valid', order_by='timestamp', order_dir='desc') table = PrettyTable(['Feed Address', 'Timestamp', 'Text', 'Value', 'Fee Fraction']) seen_addresses = [] for broadcast in broadcasts: # Only show feeds with broadcasts in the last two weeks. last_block_time = util.last_block(db)['block_time'] if broadcast['timestamp'] + config.TWO_WEEKS < last_block_time: continue # Always show only the latest broadcast from a feed address. if broadcast['source'] not in seen_addresses: feed = format_feed(broadcast) table.add_row(feed) seen_addresses.append(broadcast['source']) else: continue print('Feeds') print(table)
def compose(db, source, possible_moves, wager, move_random_hash, expiration): problems = validate(db, source, possible_moves, wager, move_random_hash, expiration, util.last_block(db)['block_index']) if problems: raise exceptions.ComposeError(problems) data = struct.pack(config.TXTYPE_FORMAT, ID) data += struct.pack(FORMAT, possible_moves, wager, binascii.unhexlify(move_random_hash), expiration) return (source, [], data)
def market (give_asset, get_asset, depthonly=False): # Open Orders if depthonly: depth(give_asset,get_asset,"Open Orders to Give " + give_asset + ", Get " + get_asset + " (Bids)") depth(get_asset,give_asset,"Open Orders to Give " + get_asset + ", Get " + give_asset + " (Offers)") else: depth(give_asset,get_asset) # Open bets. bets = util.get_bets(db, validity='Valid', show_empty=False) table = PrettyTable(['Bet Type', 'Feed Address', 'Deadline', 'Target Value', 'Leverage', 'Wager', 'Odds', 'Time Left', 'Tx Hash']) for bet in bets: bet = format_bet(bet) table.add_row(bet) print('Open Bets') if os.name == 'nt': table = windows(table.get_string()) print(table) print('\n') # Feeds broadcasts = util.get_broadcasts(db, validity='Valid', order_by='timestamp', order_dir='desc') table = PrettyTable(['Feed Address', 'Timestamp', 'Text', 'Value', 'Fee Multiplier']) seen_addresses = [] for broadcast in broadcasts: # Only show feeds with broadcasts in the last two weeks. last_block_time = util.last_block(db)['block_time'] if broadcast['timestamp'] + config.TWO_WEEKS < last_block_time: continue # Always show only the latest broadcast from a feed address. if broadcast['source'] not in seen_addresses: feed = format_feed(broadcast) table.add_row(feed) seen_addresses.append(broadcast['source']) else: continue print('Feeds') if os.name == 'nt': table = windows(table.get_string()) print(table) # Matched orders awaiting BTC payments from you. awaiting_btcs = util.get_order_matches(db, validity='Valid: awaiting BTC payment', is_mine=True) table = PrettyTable(['Matched Order ID', 'Time Left']) for order_match in awaiting_btcs: order_match = format_order_match(db, order_match) table.add_row(order_match) print('Order Matches Awaiting BTC Payment from You') if os.name == 'nt': table = windows(table.get_string()) print(table) print('\n')
def compose (db, source, quantity, overburn=False): cursor = db.cursor() destination = config.UNSPENDABLE problems = validate(db, source, destination, quantity, util.last_block(db)['block_index'], overburn=overburn) if problems: raise exceptions.ComposeError(problems) # Check that a maximum of 1 BTC total is burned per address. burns = list(cursor.execute('''SELECT * FROM burns WHERE (status = ? AND source = ?)''', ('valid', source))) already_burned = sum([burn['burned'] for burn in burns]) if quantity > (1 * config.UNIT - already_burned) and not overburn: raise exceptions.ComposeError('1 {} may be burned per address'.format(config.BTC)) cursor.close() return (source, [(destination, quantity)], None)
def compose (db, source, feed_address, bet_type, deadline, wager_quantity, counterwager_quantity, target_value, leverage, expiration): problems, leverage = validate(db, source, feed_address, bet_type, deadline, wager_quantity, counterwager_quantity, target_value, leverage, expiration, util.last_block(db)['block_index']) if util.date_passed(deadline): problems.append('deadline passed') if problems: raise exceptions.ComposeError(problems) data = struct.pack(config.TXTYPE_FORMAT, ID) data += struct.pack(FORMAT, bet_type, deadline, wager_quantity, counterwager_quantity, target_value, leverage, expiration) return (source, [(feed_address, None)], data)
def unpack(data_hex): data = binascii.unhexlify(data_hex) message_type_id = struct.unpack(config.TXTYPE_FORMAT, data[:4])[0] message = data[4:] for message_type in API_TRANSACTIONS: if message_type_id == sys.modules['lib.messages.{}'.format( message_type)].ID: unpack_method = sys.modules['lib.messages.{}'.format( message_type)].unpack unpacked = unpack_method( db, message, util.last_block(db)['block_index']) return message_type_id, unpacked
def compose(db, source, transfer_destination, asset, quantity, divisible, description): callable_, call_date, call_price = False, 0, 0.0 call_date, call_price, problems, fee, description, divisible, reissuance = validate( db, source, transfer_destination, asset, quantity, divisible, callable_, call_date, call_price, description, util.last_block(db)['block_index']) if problems: raise exceptions.ComposeError(problems) asset_id = util.generate_asset_id(asset, util.last_block(db)['block_index']) data = struct.pack(config.TXTYPE_FORMAT, ID) if len(description) <= 42: curr_format = FORMAT_2 + '{}p'.format(len(description) + 1) else: curr_format = FORMAT_2 + '{}s'.format(len(description)) data += struct.pack(curr_format, asset_id, quantity, 1 if divisible else 0, 1 if callable_ else 0, call_date or 0, call_price or 0.0, description.encode('utf-8')) if transfer_destination: destination_outputs = [(transfer_destination, None)] else: destination_outputs = [] return (source, destination_outputs, data)
def compose (db, source, timestamp, value, fee_fraction, text): # Store the fee fraction as an integer. fee_fraction_int = int(fee_fraction * 1e8) problems = validate(db, source, timestamp, value, fee_fraction_int, text, util.last_block(db)['block_index']) if problems: raise exceptions.ComposeError(problems) data = struct.pack(config.TXTYPE_FORMAT, ID) if len(text) <= 52: curr_format = FORMAT + '{}p'.format(len(text) + 1) else: curr_format = FORMAT + '{}s'.format(len(text)) data += struct.pack(curr_format, timestamp, value, fee_fraction_int, text.encode('utf-8')) return (source, [], data)
def compose(db, source, feed_address, bet_type, deadline, wager_quantity, counterwager_quantity, target_value, leverage, expiration): problems, leverage = validate(db, source, feed_address, bet_type, deadline, wager_quantity, counterwager_quantity, target_value, leverage, expiration, util.last_block(db)['block_index']) if util.date_passed(deadline): problems.append('deadline passed') if problems: raise exceptions.ComposeError(problems) data = struct.pack(config.TXTYPE_FORMAT, ID) data += struct.pack(FORMAT, bet_type, deadline, wager_quantity, counterwager_quantity, target_value, leverage, expiration) return (source, [(feed_address, None)], data)
def compose (db, source, order_match_id): tx0_hash, tx1_hash = util.parse_id(order_match_id) destination, btc_quantity, escrowed_asset, escrowed_quantity, order_match, problems = validate(db, source, order_match_id, util.last_block(db)['block_index']) if problems: raise exceptions.ComposeError(problems) # Warn if down to the wire. time_left = order_match['match_expire_index'] - util.last_block(db)['block_index'] if time_left < 4: logger.warning('Only {} blocks until that order match expires. The payment might not make into the blockchain in time.'.format(time_left)) if 10 - time_left < 4: logger.warning('Order match has only {} confirmation(s).'.format(10 - time_left)) tx0_hash_bytes, tx1_hash_bytes = binascii.unhexlify(bytes(tx0_hash, 'utf-8')), binascii.unhexlify(bytes(tx1_hash, 'utf-8')) data = struct.pack(config.TXTYPE_FORMAT, ID) data += struct.pack(FORMAT, tx0_hash_bytes, tx1_hash_bytes) return (source, [(destination, btc_quantity)], data)
def compose(db, source, timestamp, value, fee_fraction, text): # Store the fee fraction as an integer. fee_fraction_int = int(fee_fraction * 1e8) problems = validate(db, source, timestamp, value, fee_fraction_int, text, util.last_block(db)['block_index']) if problems: raise exceptions.ComposeError(problems) data = struct.pack(config.TXTYPE_FORMAT, ID) if len(text) <= 52: curr_format = FORMAT + '{}p'.format(len(text) + 1) else: curr_format = FORMAT + '{}s'.format(len(text)) data += struct.pack(curr_format, timestamp, value, fee_fraction_int, text.encode('utf-8')) return (source, [], data)
def compose (db, source, move, random, rps_match_id): tx0_hash, tx1_hash = util.parse_id(rps_match_id) txn, rps_match, problems = validate(db, source, move, random, rps_match_id) if problems: raise exceptions.ComposeError(problems) # Warn if down to the wire. time_left = rps_match['match_expire_index'] - util.last_block(db)['block_index'] if time_left < 4: logging.warning('WARNING: Only {} blocks until that rps match expires. The conclusion might not make into the blockchain in time.'.format(time_left)) tx0_hash_bytes = binascii.unhexlify(bytes(tx0_hash, 'utf-8')) tx1_hash_bytes = binascii.unhexlify(bytes(tx1_hash, 'utf-8')) random_bytes = binascii.unhexlify(bytes(random, 'utf-8')) data = struct.pack(config.TXTYPE_FORMAT, ID) data += struct.pack(FORMAT, move, random_bytes, tx0_hash_bytes, tx1_hash_bytes) return (source, [], data)
def compose (db, source, transfer_destination, asset, quantity, divisible, callable_, call_date, call_price, description): call_date, call_price, problems, fee, description, divisible, reissuance = validate(db, source, transfer_destination, asset, quantity, divisible, callable_, call_date, call_price, description, util.last_block(db)['block_index']) if problems: raise exceptions.ComposeError(problems) asset_id = util.generate_asset_id(asset, util.last_block(db)['block_index']) data = struct.pack(config.TXTYPE_FORMAT, ID) if len(description) <= 42: curr_format = FORMAT_2 + '{}p'.format(len(description) + 1) else: curr_format = FORMAT_2 + '{}s'.format(len(description)) data += struct.pack(curr_format, asset_id, quantity, 1 if divisible else 0, 1 if callable_ else 0, call_date or 0, call_price or 0.0, description.encode('utf-8')) if transfer_destination: destination_outputs = [(transfer_destination, None)] else: destination_outputs = [] return (source, destination_outputs, data)
def get_running_info(): latestBlockIndex = backend.getblockcount(self.proxy) try: check.database_state(db, latestBlockIndex) except exceptions.DatabaseError: caught_up = False else: caught_up = True try: last_block = util.last_block(db) except: last_block = { 'block_index': None, 'block_hash': None, 'block_time': None } try: last_message = util.last_message(db) except: last_message = None return { 'db_caught_up': caught_up, 'bitcoin_block_count': latestBlockIndex, 'last_block': last_block, 'last_message_index': last_message['message_index'] if last_message else -1, 'running_testnet': config.TESTNET, 'running_testcoin': config.TESTCOIN, 'version_major': config.VERSION_MAJOR, 'version_minor': config.VERSION_MINOR, 'version_revision': config.VERSION_REVISION }
def compose(db, source, move, random, rps_match_id): tx0_hash, tx1_hash = util.parse_id(rps_match_id) txn, rps_match, problems = validate(db, source, move, random, rps_match_id) if problems: raise exceptions.ComposeError(problems) # Warn if down to the wire. time_left = rps_match['match_expire_index'] - util.last_block( db)['block_index'] if time_left < 4: logger.warning( 'Only {} blocks until that rps match expires. The conclusion might not make into the blockchain in time.' .format(time_left)) tx0_hash_bytes = binascii.unhexlify(bytes(tx0_hash, 'utf-8')) tx1_hash_bytes = binascii.unhexlify(bytes(tx1_hash, 'utf-8')) random_bytes = binascii.unhexlify(bytes(random, 'utf-8')) data = struct.pack(config.TXTYPE_FORMAT, ID) data += struct.pack(FORMAT, move, random_bytes, tx0_hash_bytes, tx1_hash_bytes) return (source, [], data)
def format_order (order): give_amount = util.devise(db, D(order['give_amount']), order['give_asset'], 'output') get_amount = util.devise(db, D(order['get_amount']), order['get_asset'], 'output') give_remaining = util.devise(db, D(order['give_remaining']), order['give_asset'], 'output') get_remaining = util.devise(db, D(order['get_remaining']), order['get_asset'], 'output') give_asset = order['give_asset'] get_asset = order['get_asset'] if get_asset < give_asset: price = util.devise(db, D(order['get_amount']) / D(order['give_amount']), 'price', 'output') price_assets = get_asset + '/' + give_asset + ' ask' else: price = util.devise(db, D(order['give_amount']) / D(order['get_amount']), 'price', 'output') price_assets = give_asset + '/' + get_asset + ' bid' if order['fee_required']: fee = str(order['fee_required'] / config.UNIT) else: fee = str(order['fee_provided'] / config.UNIT) return [D(give_remaining), give_asset, price, price_assets, fee, util.last_block(db)['block_index'] - order['expire_index'], order['tx_hash']]
def run(self): logger.debug('Starting API Status Poller.') global current_api_status_code, current_api_status_response_json db = database.get_connection(integrity_check=False) while self.stop_event.is_set() != True: try: # Check version. if time.time( ) - self.last_version_check >= 60 * 60: # One hour since last check. logger.debug('Checking version.') code = 10 check.version(util.last_block(db)['block_index']) self.last_version_check = time.time() # Check that bitcoind is running, communicable, and caught up with the blockchain. # Check that the database has caught up with bitcoind. if time.time( ) - self.last_database_check > 10 * 60: # Ten minutes since last check. code = 11 logger.debug('Checking backend state.') check.backend_state(self.proxy) code = 12 logger.debug('Checking database state.') check.database_state(db, backend.getblockcount(self.proxy)) self.last_database_check = time.time() except (check.VersionError, check.BackendError, exceptions.DatabaseError) as e: exception_name = e.__class__.__name__ exception_text = str(e) logger.debug("API Status Poller: %s", exception_text) jsonrpc_response = jsonrpc.exceptions.JSONRPCServerError( message=exception_name, data=exception_text) current_api_status_code = code current_api_status_response_json = jsonrpc_response.json.encode( ) else: current_api_status_code = None current_api_status_response_json = None time.sleep(config.BACKEND_POLL_INTERVAL)
def compose(db, source, quantity, overburn=False): cursor = db.cursor() destination = config.UNSPENDABLE problems = validate(db, source, destination, quantity, util.last_block(db)['block_index'], overburn=overburn) if problems: raise exceptions.ComposeError(problems) # Check that a maximum of 1 BTC total is burned per address. burns = list( cursor.execute( '''SELECT * FROM burns WHERE (status = ? AND source = ?)''', ('valid', source))) already_burned = sum([burn['burned'] for burn in burns]) if quantity > (1 * config.UNIT - already_burned) and not overburn: raise exceptions.ComposeError('1 {} may be burned per address'.format( config.BTC)) cursor.close() return (source, [(destination, quantity)], None)
def follow(db, proxy): cursor = db.cursor() # Initialise. initialise(db) # Get index of last block. try: block_index = util.last_block(db)['block_index'] + 1 # Reparse all transactions if minor version has changed. minor_version = cursor.execute( 'PRAGMA user_version').fetchall()[0]['user_version'] if minor_version != config.VERSION_MINOR: logger.info( 'Client minor version number mismatch ({} ≠ {}).'.format( minor_version, config.VERSION_MINOR)) reparse(db, quiet=False) logger.info('Resuming parsing.') except exceptions.DatabaseError: logger.warning('New database.') block_index = config.BLOCK_FIRST # Get index of last transaction. tx_index = get_next_tx_index(db) not_supported = { } # No false positives. Use a dict to allow for O(1) lookups not_supported_sorted = collections.deque() # ^ Entries in form of (block_index, tx_hash), oldest first. Allows for easy removal of past, unncessary entries mempool_initialised = False # a reorg can happen without the block count increasing, or even for that # matter, with the block count decreasing. This should only delay # processing of the new blocks a bit. while True: starttime = time.time() # Get block count. # If the backend is unreachable and `config.FORCE` is set, just sleep # and try again repeatedly. try: block_count = backend.getblockcount(proxy) except (ConnectionRefusedError, http.client.CannotSendRequest) as e: if config.FORCE: time.sleep(config.BACKEND_POLL_INTERVAL) continue else: raise e # Get new blocks. if block_index <= block_count: # Backwards check for incorrect blocks due to chain reorganisation, and stop when a common parent is found. current_index = block_index requires_rollback = False while True: if current_index == config.BLOCK_FIRST: break logger.debug('Checking that block {} is not an orphan.'.format( current_index)) # Backend parent hash. current_hash_bin = backend.getblockhash(proxy, current_index) current_cblock = backend.getblock(proxy, current_hash_bin) backend_parent = bitcoinlib.core.b2lx( current_cblock.hashPrevBlock) # DB parent hash. blocks = list( cursor.execute( '''SELECT * FROM blocks WHERE block_index = ?''', (current_index - 1, ))) if len(blocks) != 1: # For empty DB. break db_parent = blocks[0]['block_hash'] # Compare. assert type(db_parent) == str assert type(backend_parent) == str if db_parent == backend_parent: break else: current_index -= 1 requires_rollback = True # Rollback for reorganisation. if requires_rollback: # Record reorganisation. logger.warning('Blockchain reorganisation at block {}.'.format( current_index)) log.message(db, block_index, 'reorg', None, {'block_index': current_index}) # Rollback the DB. reparse(db, block_index=current_index - 1, quiet=True) block_index = current_index tx_index = get_next_tx_index(db) continue # Get and parse transactions in this block (atomically). block_hash_bin = backend.getblockhash(proxy, current_index) block = backend.getblock(proxy, block_hash_bin) block_hash = bitcoinlib.core.b2lx(block_hash_bin) previous_block_hash = bitcoinlib.core.b2lx(block.hashPrevBlock) block_time = block.nTime txhash_list = backend.get_txhash_list(block) with db: # List the block. cursor.execute( '''INSERT INTO blocks( block_index, block_hash, block_time, previous_block_hash, difficulty) VALUES(?,?,?,?,?)''', (block_index, block_hash, block_time, previous_block_hash, block.difficulty)) # List the transactions in the block. for tx_hash in txhash_list: # TODO: use rpc._batch to get all transactions with one RPC call tx_index = list_tx(db, proxy, block_hash, block_index, block_time, tx_hash, tx_index) # Parse the transactions in the block. parse_block(db, block_index, block_time) # When newly caught up, check for conservation of assets. if block_index == block_count: check.asset_conservation(db) # Remove any non‐supported transactions older than ten blocks. while len(not_supported_sorted ) and not_supported_sorted[0][0] <= block_index - 10: tx_h = not_supported_sorted.popleft()[1] del not_supported[tx_h] logger.info('Block: %s (%ss)' % (str(block_index), "{:.2f}".format( time.time() - starttime, 3))) # Increment block index. block_count = backend.getblockcount(proxy) block_index += 1 else: # First mempool fill for session? if mempool_initialised: logger.debug('Updating mempool.') else: logger.debug('Initialising mempool.') # Get old counterpartyd mempool. old_mempool = list(cursor.execute('''SELECT * FROM mempool''')) old_mempool_hashes = [ message['tx_hash'] for message in old_mempool ] # Fake values for fake block. curr_time = int(time.time()) mempool_tx_index = tx_index # For each transaction in Bitcoin Core mempool, if it’s new, create # a fake block, a fake transaction, capture the generated messages, # and then save those messages. # Every transaction in mempool is parsed independently. (DB is rolled back after each one.) mempool = [] util.MEMPOOL = backend.getrawmempool(proxy) for tx_hash in util.MEMPOOL: tx_hash = bitcoinlib.core.b2lx(tx_hash) # If already in counterpartyd mempool, copy to new one. if tx_hash in old_mempool_hashes: for message in old_mempool: if message['tx_hash'] == tx_hash: mempool.append((tx_hash, message)) # If already skipped, skip it again. elif tx_hash not in not_supported: # Else: list, parse and save it. try: with db: # List the fake block. cursor.execute( '''INSERT INTO blocks( block_index, block_hash, block_time) VALUES(?,?,?)''', (config.MEMPOOL_BLOCK_INDEX, config.MEMPOOL_BLOCK_HASH, curr_time)) # List transaction. try: # Sometimes the transactions can’t be found: `{'code': -5, 'message': 'No information available about transaction'} Is txindex enabled in Bitcoind?` mempool_tx_index = list_tx( db, proxy, None, block_index, curr_time, tx_hash, mempool_tx_index) except backend.BitcoindError: raise MempoolError # Parse transaction. cursor.execute( '''SELECT * FROM transactions \ WHERE tx_hash = ?''', (tx_hash, )) transactions = list(cursor) if transactions: assert len(transactions) == 1 transaction = transactions[0] supported = parse_tx(db, transaction) if not supported: not_supported[tx_hash] = '' not_supported_sorted.append( (block_index, tx_hash)) else: # If a transaction hasn’t been added to the # table `transactions`, then it’s not a # Counterparty transaction. not_supported[tx_hash] = '' not_supported_sorted.append( (block_index, tx_hash)) raise MempoolError # Save transaction and side‐effects in memory. cursor.execute( '''SELECT * FROM messages WHERE block_index = ?''', (config.MEMPOOL_BLOCK_INDEX, )) for message in list(cursor): mempool.append((tx_hash, message)) # Rollback. raise MempoolError except MempoolError: pass # Re‐write mempool messages to database. with db: cursor.execute('''DELETE FROM mempool''') for message in mempool: tx_hash, new_message = message new_message['tx_hash'] = tx_hash cursor.execute( '''INSERT INTO mempool VALUES(:tx_hash, :command, :category, :bindings, :timestamp)''', (new_message)) # Wait mempool_initialised = True db.wal_checkpoint(mode=apsw.SQLITE_CHECKPOINT_PASSIVE) time.sleep(config.BACKEND_POLL_INTERVAL) cursor.close()
def construct (db, proxy, tx_info, encoding='auto', fee_per_kb=config.DEFAULT_FEE_PER_KB, regular_dust_size=config.DEFAULT_REGULAR_DUST_SIZE, multisig_dust_size=config.DEFAULT_MULTISIG_DUST_SIZE, op_return_value=config.DEFAULT_OP_RETURN_VALUE, exact_fee=None, fee_provided=0, provided_pubkeys=None, allow_unconfirmed_inputs=False): block_index = util.last_block(db)['block_index'] (source, destination_outputs, data) = tx_info # Sanity checks. if exact_fee and not isinstance(exact_fee, int): raise exceptions.TransactionError('Exact fees must be in satoshis.') if not isinstance(fee_provided, int): raise exceptions.TransactionError('Fee provided must be in satoshis.') '''Destinations''' # Destination outputs. # Replace multi‐sig addresses with multi‐sig pubkeys. Check that the # destination output isn’t a dust output. Set null values to dust size. destination_outputs_new = [] for (address, value) in destination_outputs: # Value. if script.is_multisig(address): dust_size = multisig_dust_size else: dust_size = regular_dust_size if value == None: value = dust_size elif value < dust_size: raise exceptions.TransactionError('Destination output is dust.') # Address. script.validate(address) if script.is_multisig(address): destination_outputs_new.append((backend.multisig_pubkeyhashes_to_pubkeys(proxy, address, provided_pubkeys), value)) else: destination_outputs_new.append((address, value)) destination_outputs = destination_outputs_new destination_btc_out = sum([value for address, value in destination_outputs]) '''Data''' # Data encoding methods (choose and validate). if data: if encoding == 'auto': if len(data) <= config.OP_RETURN_MAX_SIZE: encoding = 'multisig' # BTCGuild isn’t mining `OP_RETURN`?! else: encoding = 'multisig' elif encoding not in ('pubkeyhash', 'multisig', 'opreturn'): raise exceptions.TransactionError('Unknown encoding‐scheme.') # Divide data into chunks. if data: if encoding == 'pubkeyhash': # Prefix is also a suffix here. chunk_size = 20 - 1 - 8 elif encoding == 'multisig': # Two pubkeys, minus length byte, minus prefix, minus two nonces, # minus two sign bytes. chunk_size = (33 * 2) - 1 - 8 - 2 - 2 elif encoding == 'opreturn': chunk_size = config.OP_RETURN_MAX_SIZE if len(data) > chunk_size: raise exceptions.TransactionError('One `OP_RETURN` output per transaction.') data_array = list(chunks(data, chunk_size)) else: data_array = [] # Data outputs. if data: if encoding == 'multisig': data_value = multisig_dust_size elif encoding == 'opreturn': data_value = op_return_value else: # Pay‐to‐PubKeyHash, e.g. data_value = regular_dust_size data_output = (data_array, data_value) else: data_output = None data_btc_out = sum([data_value for data_chunk in data_array]) '''Inputs''' # Source. # If public key is necessary for construction of (unsigned) # transaction, either use the public key provided, or derive it from a # private key retrieved from wallet. if source: script.validate(source) dust_return_pubkey = get_dust_return_pubkey(proxy, source, provided_pubkeys, encoding) # Calculate collective size of outputs, for fee calculation. if encoding == 'multisig': data_output_size = 81 # 71 for the data elif encoding == 'opreturn': data_output_size = 90 # 80 for the data else: data_output_size = 25 + 9 # Pay‐to‐PubKeyHash (25 for the data?) outputs_size = ((25 + 9) * len(destination_outputs)) + (len(data_array) * data_output_size) # Get inputs. unspent = backend.get_unspent_txouts(proxy, source) unspent = backend.sort_unspent_txouts(unspent, allow_unconfirmed_inputs) logger.debug('Sorted UTXOs: {}'.format([print_coin(coin) for coin in unspent])) inputs = [] btc_in = 0 change_quantity = 0 sufficient_funds = False final_fee = fee_per_kb for coin in unspent: logger.debug('New input: {}'.format(print_coin(coin))) inputs.append(coin) btc_in += round(coin['amount'] * config.UNIT) # If exact fee is specified, use that. Otherwise, calculate size of tx # and base fee on that (plus provide a minimum fee for selling BTC). if exact_fee: final_fee = exact_fee else: size = 181 * len(inputs) + outputs_size + 10 necessary_fee = (int(size / 1000) + 1) * fee_per_kb final_fee = max(fee_provided, necessary_fee) assert final_fee >= 1 * fee_per_kb # Check if good. btc_out = destination_btc_out + data_btc_out change_quantity = btc_in - (btc_out + final_fee) logger.debug('Change quantity: {} BTC'.format(change_quantity / config.UNIT)) # If change is necessary, must not be a dust output. if change_quantity == 0 or change_quantity >= regular_dust_size: sufficient_funds = True break if not sufficient_funds: # Approximate needed change, fee by with most recently calculated # quantities. total_btc_out = btc_out + max(change_quantity, 0) + final_fee raise exceptions.BalanceError('Insufficient {} at address {}. (Need approximately {} {}.) To spend unconfirmed coins, use the flag `--unconfirmed`. (Unconfirmed coins cannot be spent from multi‐sig addresses.)'.format(config.BTC, source, total_btc_out / config.UNIT, config.BTC)) '''Finish''' # Change output. if change_quantity: if script.is_multisig(source): change_address = backend.multisig_pubkeyhashes_to_pubkeys(proxy, source, provided_pubkeys) else: change_address = source change_output = (change_address, change_quantity) else: change_output = None # Serialise inputs and outputs. unsigned_tx = serialise(block_index, encoding, inputs, destination_outputs, data_output, change_output, dust_return_pubkey=dust_return_pubkey) unsigned_tx_hex = binascii.hexlify(unsigned_tx).decode('utf-8') '''Sanity Check''' from lib import blocks # Desired transaction info. (desired_source, desired_destination_outputs, desired_data) = tx_info desired_source = script.make_canonical(desired_source) desired_destination = script.make_canonical(desired_destination_outputs[0][0]) if desired_destination_outputs else '' # NOTE: Include change in destinations for BTC transactions. # if change_output and not desired_data and desired_destination != config.UNSPENDABLE: # if desired_destination == '': # desired_destination = desired_source # else: # desired_destination += '-{}'.format(desired_source) # NOTE if desired_data == None: desired_data = b'' # Parsed transaction info. try: parsed_source, parsed_destination, x, y, parsed_data = blocks.get_tx_info2(proxy, unsigned_tx_hex) except exceptions.BTCOnlyError: # Skip BTC‐only transactions. return unsigned_tx_hex desired_source = script.make_canonical(desired_source) # Check desired info against parsed info. if (desired_source, desired_destination, desired_data) != (parsed_source, parsed_destination, parsed_data): raise exceptions.TransactionError('constructed transaction does not parse correctly') return unsigned_tx_hex
def get_tx_info(tx_hex): source, destination, btc_amount, fee, data = blocks.get_tx_info(tx_hex, util.last_block(db)['block_index']) return source, destination, btc_amount, fee, util.hexlify(data)
def get_rows(db, table, filters=None, filterop='AND', order_by=None, order_dir=None, start_block=None, end_block=None, status=None, limit=1000, offset=0, show_expired=True): """Filters results based on a filter data structure (as used by the API)""" if filters == None: filters = [] def value_to_marker(value): # if value is an array place holder is (?,?,?,..) if isinstance(value, list): return '''({})'''.format(','.join(['?' for e in range(0, len(value))])) else: return '''?''' # TODO: Document that op can be anything that SQLite3 accepts. if not table or table.lower() not in API_TABLES: raise APIError('Unknown table') if filterop and filterop.upper() not in ['OR', 'AND']: raise APIError('Invalid filter operator (OR, AND)') if order_dir and order_dir.upper() not in ['ASC', 'DESC']: raise APIError('Invalid order direction (ASC, DESC)') if not isinstance(limit, int): raise APIError('Invalid limit') elif limit > 1000: raise APIError('Limit should be lower or equal to 1000') if not isinstance(offset, int): raise APIError('Invalid offset') # TODO: accept an object: {'field1':'ASC', 'field2': 'DESC'} if order_by and not re.compile('^[a-z0-9_]+$').match(order_by): raise APIError('Invalid order_by, must be a field name') if isinstance(filters, dict): #single filter entry, convert to a one entry list filters = [filters,] elif not isinstance(filters, list): filters = [] # TODO: Document this! (Each filter can be an ordered list.) new_filters = [] for filter_ in filters: if type(filter_) in (list, tuple) and len(filter_) in [3, 4]: new_filter = {'field': filter_[0], 'op': filter_[1], 'value': filter_[2]} if len(filter_) == 4: new_filter['case_sensitive'] = filter_[3] new_filters.append(new_filter) elif type(filter_) == dict: new_filters.append(filter_) else: raise APIError('Unknown filter type') filters = new_filters # validate filter(s) for filter_ in filters: for field in ['field', 'op', 'value']: #should have all fields if field not in filter_: raise APIError("A specified filter is missing the '%s' field" % field) if not isinstance(filter_['value'], (str, int, float, list)): raise APIError("Invalid value for the field '%s'" % filter_['field']) if isinstance(filter_['value'], list) and filter_['op'].upper() not in ['IN', 'NOT IN']: raise APIError("Invalid value for the field '%s'" % filter_['field']) if filter_['op'].upper() not in ['=', '==', '!=', '>', '<', '>=', '<=', 'IN', 'LIKE', 'NOT IN', 'NOT LIKE']: raise APIError("Invalid operator for the field '%s'" % filter_['field']) if 'case_sensitive' in filter_ and not isinstance(filter_['case_sensitive'], bool): raise APIError("case_sensitive must be a boolean") # SELECT statement = '''SELECT * FROM {}'''.format(table) # WHERE bindings = [] conditions = [] for filter_ in filters: case_sensitive = False if 'case_sensitive' not in filter_ else filter_['case_sensitive'] if filter_['op'] == 'LIKE' and case_sensitive == False: filter_['field'] = '''UPPER({})'''.format(filter_['field']) filter_['value'] = filter_['value'].upper() marker = value_to_marker(filter_['value']) conditions.append('''{} {} {}'''.format(filter_['field'], filter_['op'], marker)) if isinstance(filter_['value'], list): bindings += filter_['value'] else: bindings.append(filter_['value']) # AND filters more_conditions = [] if table not in ['balances', 'order_matches', 'bet_matches']: if start_block != None: more_conditions.append('''block_index >= ?''') bindings.append(start_block) if end_block != None: more_conditions.append('''block_index <= ?''') bindings.append(end_block) elif table in ['order_matches', 'bet_matches']: if start_block != None: more_conditions.append('''tx0_block_index >= ?''') bindings.append(start_block) if end_block != None: more_conditions.append('''tx1_block_index <= ?''') bindings.append(end_block) # status if isinstance(status, list) and len(status) > 0: more_conditions.append('''status IN {}'''.format(value_to_marker(status))) bindings += status elif isinstance(status, str) and status != '': more_conditions.append('''status == ?''') bindings.append(status) # legacy filters if not show_expired and table == 'orders': #Ignore BTC orders one block early. expire_index = util.last_block(db)['block_index'] + 1 more_conditions.append('''((give_asset == ? AND expire_index > ?) OR give_asset != ?)''') bindings += [config.BTC, expire_index, config.BTC] if (len(conditions) + len(more_conditions)) > 0: statement += ''' WHERE''' all_conditions = [] if len(conditions) > 0: all_conditions.append('''({})'''.format(''' {} '''.format(filterop.upper()).join(conditions))) if len(more_conditions) > 0: all_conditions.append('''({})'''.format(''' AND '''.join(more_conditions))) statement += ''' {}'''.format(''' AND '''.join(all_conditions)) # ORDER BY if order_by != None: statement += ''' ORDER BY {}'''.format(order_by) if order_dir != None: statement += ''' {}'''.format(order_dir.upper()) # LIMIT if limit: statement += ''' LIMIT {}'''.format(limit) if offset: statement += ''' OFFSET {}'''.format(offset) return db_query(db, statement, tuple(bindings))
def database_state(db, blockcount): """Checks {} database to see if is caught up with backend.""".format(config.XCP_NAME) if util.last_block(db)['block_index'] + 1 < blockcount: raise exceptions.DatabaseError('{} database is behind backend.'.format(config.XCP_NAME)) logger.debug('Database state check passed.') return
def format_order_match (db, order_match): order_match_id = order_match['tx0_hash'] + order_match['tx1_hash'] order_match_time_left = order_match['match_expire_index'] - util.last_block(db)['block_index'] return [order_match_id, order_match_time_left]
def get_tx_info(tx_hex): source, destination, btc_amount, fee, data = blocks.get_tx_info( self.proxy, tx_hex, util.last_block(db)['block_index']) return source, destination, btc_amount, fee, util.hexlify(data)
def construct(db, proxy, tx_info, encoding='auto', fee_per_kb=config.DEFAULT_FEE_PER_KB, regular_dust_size=config.DEFAULT_REGULAR_DUST_SIZE, multisig_dust_size=config.DEFAULT_MULTISIG_DUST_SIZE, op_return_value=config.DEFAULT_OP_RETURN_VALUE, exact_fee=None, fee_provided=0, provided_pubkeys=None, allow_unconfirmed_inputs=False): block_index = util.last_block(db)['block_index'] (source, destination_outputs, data) = tx_info # Sanity checks. if exact_fee and not isinstance(exact_fee, int): raise exceptions.TransactionError('Exact fees must be in satoshis.') if not isinstance(fee_provided, int): raise exceptions.TransactionError('Fee provided must be in satoshis.') '''Destinations''' # Destination outputs. # Replace multi‐sig addresses with multi‐sig pubkeys. Check that the # destination output isn’t a dust output. Set null values to dust size. destination_outputs_new = [] for (address, value) in destination_outputs: # Value. if script.is_multisig(address): dust_size = multisig_dust_size else: dust_size = regular_dust_size if value == None: value = dust_size elif value < dust_size: raise exceptions.TransactionError('Destination output is dust.') # Address. script.validate(address) if script.is_multisig(address): destination_outputs_new.append( (backend.multisig_pubkeyhashes_to_pubkeys( proxy, address, provided_pubkeys), value)) else: destination_outputs_new.append((address, value)) destination_outputs = destination_outputs_new destination_btc_out = sum( [value for address, value in destination_outputs]) '''Data''' # Data encoding methods (choose and validate). if data: if encoding == 'auto': if len(data) <= config.OP_RETURN_MAX_SIZE: encoding = 'multisig' # BTCGuild isn’t mining `OP_RETURN`?! else: encoding = 'multisig' elif encoding not in ('pubkeyhash', 'multisig', 'opreturn'): raise exceptions.TransactionError('Unknown encoding‐scheme.') # Divide data into chunks. if data: if encoding == 'pubkeyhash': # Prefix is also a suffix here. chunk_size = 20 - 1 - 8 elif encoding == 'multisig': # Two pubkeys, minus length byte, minus prefix, minus two nonces, # minus two sign bytes. chunk_size = (33 * 2) - 1 - 8 - 2 - 2 elif encoding == 'opreturn': chunk_size = config.OP_RETURN_MAX_SIZE if len(data) > chunk_size: raise exceptions.TransactionError( 'One `OP_RETURN` output per transaction.') data_array = list(chunks(data, chunk_size)) else: data_array = [] # Data outputs. if data: if encoding == 'multisig': data_value = multisig_dust_size elif encoding == 'opreturn': data_value = op_return_value else: # Pay‐to‐PubKeyHash, e.g. data_value = regular_dust_size data_output = (data_array, data_value) else: data_output = None data_btc_out = sum([data_value for data_chunk in data_array]) '''Inputs''' # Source. # If public key is necessary for construction of (unsigned) # transaction, either use the public key provided, or derive it from a # private key retrieved from wallet. if source: script.validate(source) dust_return_pubkey = get_dust_return_pubkey(proxy, source, provided_pubkeys, encoding) # Calculate collective size of outputs, for fee calculation. if encoding == 'multisig': data_output_size = 81 # 71 for the data elif encoding == 'opreturn': data_output_size = 90 # 80 for the data else: data_output_size = 25 + 9 # Pay‐to‐PubKeyHash (25 for the data?) outputs_size = ((25 + 9) * len(destination_outputs)) + (len(data_array) * data_output_size) # Get inputs. unspent = backend.get_unspent_txouts(proxy, source) unspent = backend.sort_unspent_txouts(unspent, allow_unconfirmed_inputs) logger.debug('Sorted UTXOs: {}'.format( [print_coin(coin) for coin in unspent])) inputs = [] btc_in = 0 change_quantity = 0 sufficient_funds = False final_fee = fee_per_kb for coin in unspent: logger.debug('New input: {}'.format(print_coin(coin))) inputs.append(coin) btc_in += round(coin['amount'] * config.UNIT) # If exact fee is specified, use that. Otherwise, calculate size of tx # and base fee on that (plus provide a minimum fee for selling BTC). if exact_fee: final_fee = exact_fee else: size = 181 * len(inputs) + outputs_size + 10 necessary_fee = (int(size / 1000) + 1) * fee_per_kb final_fee = max(fee_provided, necessary_fee) assert final_fee >= 1 * fee_per_kb # Check if good. btc_out = destination_btc_out + data_btc_out change_quantity = btc_in - (btc_out + final_fee) logger.debug('Change quantity: {} BTC'.format(change_quantity / config.UNIT)) # If change is necessary, must not be a dust output. if change_quantity == 0 or change_quantity >= regular_dust_size: sufficient_funds = True break if not sufficient_funds: # Approximate needed change, fee by with most recently calculated # quantities. total_btc_out = btc_out + max(change_quantity, 0) + final_fee raise exceptions.BalanceError( 'Insufficient {} at address {}. (Need approximately {} {}.) To spend unconfirmed coins, use the flag `--unconfirmed`. (Unconfirmed coins cannot be spent from multi‐sig addresses.)' .format(config.BTC, source, total_btc_out / config.UNIT, config.BTC)) '''Finish''' # Change output. if change_quantity: if script.is_multisig(source): change_address = backend.multisig_pubkeyhashes_to_pubkeys( proxy, source, provided_pubkeys) else: change_address = source change_output = (change_address, change_quantity) else: change_output = None # Serialise inputs and outputs. unsigned_tx = serialise(block_index, encoding, inputs, destination_outputs, data_output, change_output, dust_return_pubkey=dust_return_pubkey) unsigned_tx_hex = binascii.hexlify(unsigned_tx).decode('utf-8') '''Sanity Check''' from lib import blocks # Desired transaction info. (desired_source, desired_destination_outputs, desired_data) = tx_info desired_source = script.make_canonical(desired_source) desired_destination = script.make_canonical( desired_destination_outputs[0] [0]) if desired_destination_outputs else '' # NOTE: Include change in destinations for BTC transactions. # if change_output and not desired_data and desired_destination != config.UNSPENDABLE: # if desired_destination == '': # desired_destination = desired_source # else: # desired_destination += '-{}'.format(desired_source) # NOTE if desired_data == None: desired_data = b'' # Parsed transaction info. try: parsed_source, parsed_destination, x, y, parsed_data = blocks.get_tx_info2( proxy, unsigned_tx_hex) except exceptions.BTCOnlyError: # Skip BTC‐only transactions. return unsigned_tx_hex desired_source = script.make_canonical(desired_source) # Check desired info against parsed info. if (desired_source, desired_destination, desired_data) != (parsed_source, parsed_destination, parsed_data): raise exceptions.TransactionError( 'constructed transaction does not parse correctly') return unsigned_tx_hex
def get_rows(db, table, filters=None, filterop='AND', order_by=None, order_dir=None, start_block=None, end_block=None, status=None, limit=1000, offset=0, show_expired=True): """Filters results based on a filter data structure (as used by the API)""" if filters == None: filters = [] def value_to_marker(value): # if value is an array place holder is (?,?,?,..) if isinstance(value, list): return '''({})'''.format(','.join( ['?' for e in range(0, len(value))])) else: return '''?''' # TODO: Document that op can be anything that SQLite3 accepts. if not table or table.lower() not in API_TABLES: raise APIError('Unknown table') if filterop and filterop.upper() not in ['OR', 'AND']: raise APIError('Invalid filter operator (OR, AND)') if order_dir and order_dir.upper() not in ['ASC', 'DESC']: raise APIError('Invalid order direction (ASC, DESC)') if not isinstance(limit, int): raise APIError('Invalid limit') elif limit > 1000: raise APIError('Limit should be lower or equal to 1000') if not isinstance(offset, int): raise APIError('Invalid offset') # TODO: accept an object: {'field1':'ASC', 'field2': 'DESC'} if order_by and not re.compile('^[a-z0-9_]+$').match(order_by): raise APIError('Invalid order_by, must be a field name') if isinstance(filters, dict): #single filter entry, convert to a one entry list filters = [ filters, ] elif not isinstance(filters, list): filters = [] # TODO: Document this! (Each filter can be an ordered list.) new_filters = [] for filter_ in filters: if type(filter_) in (list, tuple) and len(filter_) in [3, 4]: new_filter = { 'field': filter_[0], 'op': filter_[1], 'value': filter_[2] } if len(filter_) == 4: new_filter['case_sensitive'] = filter_[3] new_filters.append(new_filter) elif type(filter_) == dict: new_filters.append(filter_) else: raise APIError('Unknown filter type') filters = new_filters # validate filter(s) for filter_ in filters: for field in ['field', 'op', 'value']: #should have all fields if field not in filter_: raise APIError("A specified filter is missing the '%s' field" % field) if not isinstance(filter_['value'], (str, int, float, list)): raise APIError("Invalid value for the field '%s'" % filter_['field']) if isinstance(filter_['value'], list) and filter_['op'].upper() not in ['IN', 'NOT IN']: raise APIError("Invalid value for the field '%s'" % filter_['field']) if filter_['op'].upper() not in [ '=', '==', '!=', '>', '<', '>=', '<=', 'IN', 'LIKE', 'NOT IN', 'NOT LIKE' ]: raise APIError("Invalid operator for the field '%s'" % filter_['field']) if 'case_sensitive' in filter_ and not isinstance( filter_['case_sensitive'], bool): raise APIError("case_sensitive must be a boolean") # SELECT statement = '''SELECT * FROM {}'''.format(table) # WHERE bindings = [] conditions = [] for filter_ in filters: case_sensitive = False if 'case_sensitive' not in filter_ else filter_[ 'case_sensitive'] if filter_['op'] == 'LIKE' and case_sensitive == False: filter_['field'] = '''UPPER({})'''.format(filter_['field']) filter_['value'] = filter_['value'].upper() marker = value_to_marker(filter_['value']) conditions.append('''{} {} {}'''.format(filter_['field'], filter_['op'], marker)) if isinstance(filter_['value'], list): bindings += filter_['value'] else: bindings.append(filter_['value']) # AND filters more_conditions = [] if table not in ['balances', 'order_matches', 'bet_matches']: if start_block != None: more_conditions.append('''block_index >= ?''') bindings.append(start_block) if end_block != None: more_conditions.append('''block_index <= ?''') bindings.append(end_block) elif table in ['order_matches', 'bet_matches']: if start_block != None: more_conditions.append('''tx0_block_index >= ?''') bindings.append(start_block) if end_block != None: more_conditions.append('''tx1_block_index <= ?''') bindings.append(end_block) # status if isinstance(status, list) and len(status) > 0: more_conditions.append('''status IN {}'''.format( value_to_marker(status))) bindings += status elif isinstance(status, str) and status != '': more_conditions.append('''status == ?''') bindings.append(status) # legacy filters if not show_expired and table == 'orders': #Ignore BTC orders one block early. expire_index = util.last_block(db)['block_index'] + 1 more_conditions.append( '''((give_asset == ? AND expire_index > ?) OR give_asset != ?)''') bindings += [config.BTC, expire_index, config.BTC] if (len(conditions) + len(more_conditions)) > 0: statement += ''' WHERE''' all_conditions = [] if len(conditions) > 0: all_conditions.append('''({})'''.format(''' {} '''.format( filterop.upper()).join(conditions))) if len(more_conditions) > 0: all_conditions.append('''({})'''.format( ''' AND '''.join(more_conditions))) statement += ''' {}'''.format(''' AND '''.join(all_conditions)) # ORDER BY if order_by != None: statement += ''' ORDER BY {}'''.format(order_by) if order_dir != None: statement += ''' {}'''.format(order_dir.upper()) # LIMIT if limit: statement += ''' LIMIT {}'''.format(limit) if offset: statement += ''' OFFSET {}'''.format(offset) return db_query(db, statement, tuple(bindings))
def format_bet (bet): odds = D(bet['counterwager_quantity']) / D(bet['wager_quantity']) if not bet['target_value']: target_value = None else: target_value = bet['target_value'] if not bet['leverage']: leverage = None else: leverage = util.devise(db, D(bet['leverage']) / 5040, 'leverage', 'output') return [util.BET_TYPE_NAME[bet['bet_type']], bet['feed_address'], util.isodt(bet['deadline']), target_value, leverage, str(bet['wager_remaining'] / config.UNIT) + ' XCP', util.devise(db, odds, 'odds', 'output'), bet['expire_index'] - util.last_block(db)['block_index'], bet['tx_hash']]
def follow(db, proxy): cursor = db.cursor() # Initialise. initialise(db) # Get index of last block. try: block_index = util.last_block(db)['block_index'] + 1 # Reparse all transactions if minor version has changed. minor_version = cursor.execute('PRAGMA user_version').fetchall()[0]['user_version'] if minor_version != config.VERSION_MINOR: logger.info('Client minor version number mismatch ({} ≠ {}).'.format(minor_version, config.VERSION_MINOR)) reparse(db, quiet=False) logger.info('Resuming parsing.') except exceptions.DatabaseError: logger.warning('New database.') block_index = config.BLOCK_FIRST # Get index of last transaction. tx_index = get_next_tx_index(db) not_supported = {} # No false positives. Use a dict to allow for O(1) lookups not_supported_sorted = collections.deque() # ^ Entries in form of (block_index, tx_hash), oldest first. Allows for easy removal of past, unncessary entries mempool_initialised = False # a reorg can happen without the block count increasing, or even for that # matter, with the block count decreasing. This should only delay # processing of the new blocks a bit. while True: starttime = time.time() # Get block count. # If the backend is unreachable and `config.FORCE` is set, just sleep # and try again repeatedly. try: block_count = backend.getblockcount(proxy) except (ConnectionRefusedError, http.client.CannotSendRequest) as e: if config.FORCE: time.sleep(config.BACKEND_POLL_INTERVAL) continue else: raise e # Get new blocks. if block_index <= block_count: # Backwards check for incorrect blocks due to chain reorganisation, and stop when a common parent is found. current_index = block_index requires_rollback = False while True: if current_index == config.BLOCK_FIRST: break logger.debug('Checking that block {} is not an orphan.'.format(current_index)) # Backend parent hash. current_hash_bin = backend.getblockhash(proxy, current_index) current_cblock = backend.getblock(proxy, current_hash_bin) backend_parent = bitcoinlib.core.b2lx(current_cblock.hashPrevBlock) # DB parent hash. blocks = list(cursor.execute('''SELECT * FROM blocks WHERE block_index = ?''', (current_index - 1,))) if len(blocks) != 1: # For empty DB. break db_parent = blocks[0]['block_hash'] # Compare. assert type(db_parent) == str assert type(backend_parent) == str if db_parent == backend_parent: break else: current_index -= 1 requires_rollback = True # Rollback for reorganisation. if requires_rollback: # Record reorganisation. logger.warning('Blockchain reorganisation at block {}.'.format(current_index)) log.message(db, block_index, 'reorg', None, {'block_index': current_index}) # Rollback the DB. reparse(db, block_index=current_index-1, quiet=True) block_index = current_index tx_index = get_next_tx_index(db) continue # Get and parse transactions in this block (atomically). block_hash_bin = backend.getblockhash(proxy, current_index) block = backend.getblock(proxy, block_hash_bin) block_hash = bitcoinlib.core.b2lx(block_hash_bin) previous_block_hash = bitcoinlib.core.b2lx(block.hashPrevBlock) block_time = block.nTime txhash_list = backend.get_txhash_list(block) with db: # List the block. cursor.execute('''INSERT INTO blocks( block_index, block_hash, block_time, previous_block_hash, difficulty) VALUES(?,?,?,?,?)''', (block_index, block_hash, block_time, previous_block_hash, block.difficulty) ) # List the transactions in the block. for tx_hash in txhash_list: # TODO: use rpc._batch to get all transactions with one RPC call tx_index = list_tx(db, proxy, block_hash, block_index, block_time, tx_hash, tx_index) # Parse the transactions in the block. parse_block(db, block_index, block_time) # When newly caught up, check for conservation of assets. if block_index == block_count: check.asset_conservation(db) # Remove any non‐supported transactions older than ten blocks. while len(not_supported_sorted) and not_supported_sorted[0][0] <= block_index - 10: tx_h = not_supported_sorted.popleft()[1] del not_supported[tx_h] logger.info('Block: %s (%ss)'%(str(block_index), "{:.2f}".format(time.time() - starttime, 3))) # Increment block index. block_count = backend.getblockcount(proxy) block_index += 1 else: # First mempool fill for session? if mempool_initialised: logger.debug('Updating mempool.') else: logger.debug('Initialising mempool.') # Get old counterpartyd mempool. old_mempool = list(cursor.execute('''SELECT * FROM mempool''')) old_mempool_hashes = [message['tx_hash'] for message in old_mempool] # Fake values for fake block. curr_time = int(time.time()) mempool_tx_index = tx_index # For each transaction in Bitcoin Core mempool, if it’s new, create # a fake block, a fake transaction, capture the generated messages, # and then save those messages. # Every transaction in mempool is parsed independently. (DB is rolled back after each one.) mempool = [] util.MEMPOOL = backend.getrawmempool(proxy) for tx_hash in util.MEMPOOL: tx_hash = bitcoinlib.core.b2lx(tx_hash) # If already in counterpartyd mempool, copy to new one. if tx_hash in old_mempool_hashes: for message in old_mempool: if message['tx_hash'] == tx_hash: mempool.append((tx_hash, message)) # If already skipped, skip it again. elif tx_hash not in not_supported: # Else: list, parse and save it. try: with db: # List the fake block. cursor.execute('''INSERT INTO blocks( block_index, block_hash, block_time) VALUES(?,?,?)''', (config.MEMPOOL_BLOCK_INDEX, config.MEMPOOL_BLOCK_HASH, curr_time) ) # List transaction. try: # Sometimes the transactions can’t be found: `{'code': -5, 'message': 'No information available about transaction'} Is txindex enabled in Bitcoind?` mempool_tx_index = list_tx(db, proxy, None, block_index, curr_time, tx_hash, mempool_tx_index) except backend.BitcoindError: raise MempoolError # Parse transaction. cursor.execute('''SELECT * FROM transactions \ WHERE tx_hash = ?''', (tx_hash,)) transactions = list(cursor) if transactions: assert len(transactions) == 1 transaction = transactions[0] supported = parse_tx(db, transaction) if not supported: not_supported[tx_hash] = '' not_supported_sorted.append((block_index, tx_hash)) else: # If a transaction hasn’t been added to the # table `transactions`, then it’s not a # Counterparty transaction. not_supported[tx_hash] = '' not_supported_sorted.append((block_index, tx_hash)) raise MempoolError # Save transaction and side‐effects in memory. cursor.execute('''SELECT * FROM messages WHERE block_index = ?''', (config.MEMPOOL_BLOCK_INDEX,)) for message in list(cursor): mempool.append((tx_hash, message)) # Rollback. raise MempoolError except MempoolError: pass # Re‐write mempool messages to database. with db: cursor.execute('''DELETE FROM mempool''') for message in mempool: tx_hash, new_message = message new_message['tx_hash'] = tx_hash cursor.execute('''INSERT INTO mempool VALUES(:tx_hash, :command, :category, :bindings, :timestamp)''', (new_message)) # Wait mempool_initialised = True db.wal_checkpoint(mode=apsw.SQLITE_CHECKPOINT_PASSIVE) time.sleep(config.BACKEND_POLL_INTERVAL) cursor.close()
def database (db, blockcount): """Checks {} database to see if the {} server has caught up with Bitcoind.""".format(config.XCP_NAME, config.XCP_CLIENT) if util.last_block(db)['block_index'] + 1 < blockcount: raise exceptions.DatabaseError('{} database is behind Bitcoind. Is the {} server running?'.format(config.XCP_NAME, config.XCP_CLIENT)) return
def format_order (order): give_quantity = util.devise(db, D(order['give_quantity']), order['give_asset'], 'output') get_quantity = util.devise(db, D(order['get_quantity']), order['get_asset'], 'output') give_remaining = util.devise(db, D(order['give_remaining']), order['give_asset'], 'output') get_remaining = util.devise(db, D(order['get_remaining']), order['get_asset'], 'output') give_asset = order['give_asset'] get_asset = order['get_asset'] if get_asset < give_asset: price = util.devise(db, D(order['get_quantity']) / D(order['give_quantity']), 'price', 'output') price_assets = get_asset + '/' + give_asset + ' ask' else: price = util.devise(db, D(order['give_quantity']) / D(order['get_quantity']), 'price', 'output') price_assets = give_asset + '/' + get_asset + ' bid' return [D(give_remaining), give_asset, price, price_assets, str(order['fee_required'] / config.UNIT), str(order['fee_provided'] / config.UNIT), order['expire_index'] - util.last_block(db)['block_index'], order['tx_hash']]
def compose (db, source, asset, quantity, tag): validate(db, source, None, asset, quantity, util.last_block(db)['block_index']) data = pack(asset, quantity) return (source, [], data)