def _setup_data_transfer_client(self, store_config, passive_port, passive_bind, node_type, nat_type, wan_ip): # Setup handlers for callbacks registered via the API. handlers = { "complete": self._transfer_complete_handlers, "accept": self._transfer_request_handlers } wallet = BtcTxStore(testnet=False, dryrun=True) wif = self.get_key() node_id = address_to_node_id(wallet.get_address(wif)) #dht_node = SimDHT(node_id=node_id) dht_node = self self._data_transfer = FileTransfer( net=Net( net_type="direct", node_type=node_type, nat_type=nat_type, dht_node=dht_node, debug=1, passive_port=passive_port, passive_bind=passive_bind, wan_ip=wan_ip ), wif=wif, store_config=store_config, handlers=handlers ) # Setup success callback values. self._data_transfer.success_value = (self.sync_get_wan_ip(), self.port) self.process_data_transfers()
def _setup_data_transfer_client(self, store_config, passive_port, passive_bind, node_type, nat_type, wan_ip): # Setup handlers for callbacks registered via the API. handlers = { "complete": self._transfer_complete_handlers, "request": self._transfer_request_handlers } self._data_transfer = FileTransfer( net=Net( net_type="direct", node_type=node_type, nat_type=nat_type, dht_node=SimDHT(), # Replace with self.server later on. debug=1, passive_port=passive_port, passive_bind=passive_bind, wan_ip=wan_ip ), # FIXME use same key as dht wif=BtcTxStore(testnet=True, dryrun=True).create_key(), store_config=store_config, handlers=handlers ) # Setup success callback values. self._data_transfer.success_value = (self.sync_get_wan_ip(), self.port) self.process_data_transfers()
def _setup_data_transfer_client(self, store_config, passive_port, passive_bind, node_type, nat_type): result = self.sync_get_transport_info() # Setup handlers for callbacks registered via the API. handlers = { "complete": self._transfer_complete_handlers, "accept": self._transfer_request_handlers, "start": self._transfer_start_handlers } wif = self.get_key() dht_node = self self._data_transfer = FileTransfer( net=Net( net_type="direct", node_type=node_type, nat_type=nat_type, dht_node=dht_node, debug=1, passive_port=passive_port, passive_bind=passive_bind, wan_ip=result["wan"][0] if result else None ), wif=wif, store_config=store_config, handlers=handlers ) # Setup success callback values. self._data_transfer.success_value = result self.process_data_transfers()
def setUp(self): self.test_storage_dir = tempfile.mkdtemp() # Sample node. self.wallet = btctxstore.BtcTxStore(testnet=False, dryrun=True) self.wif = self.wallet.get_key(self.wallet.create_wallet()) self.node_id = address_to_node_id(self.wallet.get_address(self.wif)) self.store_config = { os.path.join(self.test_storage_dir, "storage"): {"limit": 0} } # dht_node = pyp2p.dht_msg.DHT(node_id=node_id) self.dht_node = storjnode.network.Node( self.wif, bootstrap_nodes=DEFAULT_BOOTSTRAP_NODES, disable_data_transfer=True ) # Transfer client. self.client = FileTransfer( pyp2p.net.Net( node_type="simultaneous", nat_type="preserving", net_type="direct", passive_port=0, dht_node=self.dht_node, debug=1 ), BandwidthLimit(), wif=self.wif, store_config=self.store_config ) # Accept all transfers. def accept_handler(contract_id, src_unl, data_id, file_size): return 1 # Add accept handler. self.client.handlers["accept"].add(accept_handler) # Accept UNL requests. enable_unl_requests(self.dht_node)
def test_queued(): from crochet import setup setup() # Alice sample node. alice_wallet = BtcTxStore(testnet=False, dryrun=True) alice_wif = alice_wallet.create_key() alice_node_id = address_to_node_id(alice_wallet.get_address(alice_wif)) alice_dht = pyp2p.dht_msg.DHT( node_id=alice_node_id, networking=0 ) alice = FileTransfer( pyp2p.net.Net( net_type="direct", node_type="passive", nat_type="preserving", passive_port=63400, dht_node=alice_dht, wan_ip="8.8.8.8", debug=1 ), BandwidthLimit(), wif=alice_wif, store_config={tempfile.mkdtemp(): None}, ) # Bob sample node. bob_wallet = BtcTxStore(testnet=False, dryrun=True) bob_wif = bob_wallet.create_key() bob_node_id = address_to_node_id(bob_wallet.get_address(bob_wif)) bob_dht = pyp2p.dht_msg.DHT( node_id=bob_node_id, networking=0 ) bob = FileTransfer( pyp2p.net.Net( net_type="direct", node_type="passive", nat_type="preserving", passive_port=63401, dht_node=bob_dht, wan_ip="8.8.8.8", debug=1 ), BandwidthLimit(), wif=bob_wif, store_config={tempfile.mkdtemp(): None} ) # Simulate Alice + Bob "connecting" alice_dht.add_relay_link(bob_dht) bob_dht.add_relay_link(alice_dht) # Accept all transfers. def accept_handler(contract_id, src_unl, data_id, file_size): return 1 # Add accept handler. alice.handlers["accept"].add(accept_handler) bob.handlers["accept"].add(accept_handler) # Create file we're suppose to be uploading. data_id = ("5feceb66ffc86f38d952786c6d696c" "79c2dbc239dd4e91b46729d73a27fb57e9") path = os.path.join(list(alice.store_config)[0], data_id) if not os.path.exists(path): with open(path, "w") as fp: fp.write("0") # Alice wants to upload data to Bob. upload_contract_id = alice.data_request( "download", data_id, 0, bob.net.unl.value ) # Delete source file. def callback_builder(path, alice, bob, data_id): def callback(client, contract_id, con): print("Upload succeeded") print("Removing content and downloading back") os.remove(path) # Fix transfers. bob.handlers["complete"] = [] # Synchronize cons and check con.unl. time.sleep(1) clients = {"alice": alice, "bob": bob} for client in list({"alice": alice, "bob": bob}): print() print(client) clients[client].net.synchronize() nodes_out = clients[client].net.outbound nodes_in = clients[client].net.inbound for node in nodes_out + nodes_in: print(node["con"].unl) print(clients[client].cons) # Queued transfer: download_contract_id = alice.data_request( "upload", data_id, 0, bob.net.unl.value ) print("Download contract ID =") print(download_contract_id) # Indicate Bob's download succeeded. def alice_callback(val): print("Download succeeded") global queue_succeeded queue_succeeded = 1 def alice_errback(val): print("Download failed! Error:") print(val) # Hook upload from bob. d = alice.defers[download_contract_id] d.addCallback(alice_callback) d.addErrback(alice_errback) return callback # Register callback for bob (when he's downloaded the data.) bob.handlers["complete"] = [ callback_builder(path, alice, bob, data_id) ] # d = alice.defers[upload_contract_id] # d.addCallback(callback_builder(path, alice, bob, data_id)) # Main event loop. timeout = time.time() + 40 while not queue_succeeded and time.time() < timeout: for client in [alice, bob]: if client == alice: _log.debug("Alice") else: _log.debug("Bob") process_transfers(client) time.sleep(1) if not queue_succeeded: print("\a") for client in [alice, bob]: client.net.stop() assert(queue_succeeded == 1)
class Node(object): """Storj network layer implementation. Provides a blocking dict like interface to the DHT for ease of use. """ def __init__(self, # kademlia DHT args key, ksize=20, port=None, bootstrap_nodes=None, dht_storage=None, max_messages=1024, refresh_neighbours_interval=WALK_TIMEOUT, # data transfer args disable_data_transfer=True, store_config=None, passive_port=None, passive_bind=None, # FIXME use utils.get_inet_facing_ip ? node_type="unknown", # FIMME what is this ? nat_type="unknown", # FIXME what is this ? wan_ip=None): # FIXME replace with sync_get_wan_ip calls """Create a blocking storjnode instance. Args: key (str): Bitcoin wif/hwif for auth, encryption and node id. ksize (int): The k parameter from the kademlia paper. port (port): Port to for incoming packages, randomly by default. bootstrap_nodes [(ip, port), ...]: Known network node addresses as. dht_storage: implements :interface:`~kademlia.storage.IStorage` max_messages (int): Max unprecessed messages, additional dropped. refresh_neighbours_interval (float): Auto refresh neighbours. disable_data_transfer: Disable data transfer for this node. store_config: Dict of storage paths to optional attributes. limit: The dir size limit in bytes, 0 for no limit. use_folder_tree: Files organized in a folder tree (always on for fat partitions). passive_port (int): Port to receive inbound TCP connections on. passive_bind (ip): LAN IP to receive inbound TCP connections on. node_type: TODO doc string nat_type: TODO doc string wan_ip: TODO doc string """ self.disable_data_transfer = bool(disable_data_transfer) self._transfer_request_handlers = set() self._transfer_complete_handlers = set() # set default store config if None given if store_config is None: store_config = storjnode.storage.manager.DEFAULT_STORE_CONFIG # validate port (randomish user port by default) port = port or random.choice(range(1024, 49151)) assert(0 <= port < 2 ** 16) self.port = port # passive port (randomish user port by default) passive_port = passive_port or random.choice(range(1024, 49151)) assert(0 <= port < 2 ** 16) # FIXME chance of same port and passive_port being the same # FIXME exclude ports already being used on the machine # passive bind # FIXME just use storjnode.util.get_inet_facing_ip ? passive_bind = passive_bind or "0.0.0.0" assert(valid_ip(passive_bind)) # validate bootstrap_nodes if bootstrap_nodes is None: bootstrap_nodes = DEFAULT_BOOTSTRAP_NODES # pragma: no cover for address in bootstrap_nodes: assert(isinstance(address, tuple) or isinstance(address, list)) assert(len(address) == 2) other_ip, other_port = address assert(valid_ip(other_ip)) assert(isinstance(other_port, int)) assert(0 <= other_port < 2 ** 16) # start services self._setup_server(key, ksize, dht_storage, max_messages, refresh_neighbours_interval, bootstrap_nodes) if not self.disable_data_transfer: self._setup_data_transfer_client( store_config, passive_port, passive_bind, node_type, nat_type, wan_ip ) self._setup_message_dispatcher() def _setup_message_dispatcher(self): self._message_handlers = set() self._message_dispatcher_thread_stop = False self._message_dispatcher_thread = threading.Thread( target=self._message_dispatcher_loop ) self._message_dispatcher_thread.start() def _setup_server(self, key, ksize, storage, max_messages, refresh_neighbours_interval, bootstrap_nodes): self.server = StorjServer( key, ksize=ksize, storage=storage, max_messages=max_messages, refresh_neighbours_interval=refresh_neighbours_interval ) self.server.listen(self.port) self.server.bootstrap(bootstrap_nodes) def _setup_data_transfer_client(self, store_config, passive_port, passive_bind, node_type, nat_type, wan_ip): # Setup handlers for callbacks registered via the API. handlers = { "complete": self._transfer_complete_handlers, "request": self._transfer_request_handlers } self._data_transfer = FileTransfer( net=Net( net_type="direct", node_type=node_type, nat_type=nat_type, dht_node=SimDHT(), # Replace with self.server later on. debug=1, passive_port=passive_port, passive_bind=passive_bind, wan_ip=wan_ip ), # FIXME use same key as dht wif=BtcTxStore(testnet=True, dryrun=True).create_key(), store_config=store_config, handlers=handlers ) # Setup success callback values. self._data_transfer.success_value = (self.sync_get_wan_ip(), self.port) self.process_data_transfers() def stop(self): """Stop storj node.""" self._message_dispatcher_thread_stop = True self._message_dispatcher_thread.join() self.server.stop() if not self.disable_data_transfer: self._data_transfer.net.stop() ################## # node interface # ################## def refresh_neighbours(self): self.server.refresh_neighbours() def get_known_peers(self): """Returns list of hex encoded node ids.""" peers = list(self.server.get_known_peers()) return list(map(lambda n: binascii.hexlify(n.id), peers)) def get_key(self): """Returns Bitcoin wif for auth, encryption and node id""" return self.server.key def get_id(self): """Returns 160bit node id as bytes.""" return self.server.get_id() def get_hex_id(self): return self.server.get_hex_id() ######################## # networking interface # ######################## @wait_for(timeout=QUERY_TIMEOUT) def sync_has_public_ip(self): """Find out if this node has a public IP or is behind a NAT. The may false positive if you run other nodes on your local network. Returns: True if local IP is internet visible, otherwise False. Raises: crochet.TimeoutError after storjnode.network.server.QUERY_TIMEOUT """ return self.async_has_public_ip() def async_has_public_ip(self): """Find out if this node has a public IP or is behind a NAT. The may false positive if you run other nodes on your local network. Returns: A twisted.internet.defer.Deferred that resloves to True if local IP is internet visible, otherwise False. """ return self.server.has_public_ip() @wait_for(timeout=QUERY_TIMEOUT) def sync_get_wan_ip(self): """Get the WAN IP of this Node. Retruns: The WAN IP or None. Raises: crochet.TimeoutError after storjnode.network.server.QUERY_TIMEOUT """ return self.async_get_wan_ip() def async_get_wan_ip(self): """Get the WAN IP of this Node. Retruns: A twisted.internet.defer.Deferred that resloves to The WAN IP or None. """ return self.server.get_wan_ip() ###################################### # depricated data transfer interface # ###################################### def move_to_storage(self, path): if self.disable_data_transfer: raise Exception("Data transfer disabled!") # FIXME remove and have callers use storage service instead return self._data_transfer.move_file_to_storage(path) def get_unl(self): if self.disable_data_transfer: raise Exception("Data transfer disabled!") return self._data_transfer.net.unl.value @run_in_reactor def process_data_transfers(self): if self.disable_data_transfer: raise Exception("Data transfer disabled!") time.sleep(5) # Give enough time to copy UNL. LoopingCall(process_transfers, self._data_transfer).start(0.002, now=True) ########################### # data transfer interface # ########################### def async_request_data_transfer(self, data_id, peer_unl, direction): """Request data be transfered to or from a peer. Args: data_id: The sha256 sum of the data to be transfered. peer_unl: The node UNL of the peer to get the data from. direction: "send" to peer or "receive" from peer Returns: A twisted.internet.defer.Deferred that resloves to own transport address (ip, port) if successfull else None Raises: RequestDenied: If the peer denied your request to transfer data. TransferError: If the data not transfered for other reasons. """ return self._data_transfer.simple_data_request(data_id, peer_unl, direction) if self.disable_data_transfer: raise Exception("Data transfer disabled!") @wait_for(timeout=QUERY_TIMEOUT) def sync_request_data_transfer(self, data_id, peer_unl, direction): """Request data be transfered to or from a peer. This call will block until the data has been transfered full or failed. Args: data_id: The sha256 sum of the data to be transfered. peer_unl: The node UNL of the peer to get the data from. direction: "send" to peer or "receive" from peer Raises: RequestDenied: If the peer denied your request to transfer data. TransferError: If the data not transfered for other reasons. """ self.async_request_data_transfer(data_id, peer_unl, direction) def add_transfer_request_handler(self, handler): """Add an allow transfer request handler. If any handler returns True the transfer request will be accepted. The handler must be callable and accept four arguments (node, requester_id, data_id, direction). The direction parameter will be the oposatle of the requesters direction. Example: def on_transfer_request(node, requester_id, data_id, direction): # This handler will accept everything but send nothing. if direction == "receive": print("Accepting data: {0}".format(data_id)) return True elif direction == "send": print("Refusing to send data {0}.".format(data_id)) return False node = Node() node.add_allow_transfer_handler(on_transfer_request) """ self._transfer_request_handlers.add(handler) def remove_transfer_request_handler(self, handler): """Remove a allow transfer request handler from the Node. Raises: KeyError if handler was not previously added. """ self._transfer_complete_handlers.remove(handler) def add_transfer_complete_handler(self, handler): """Add a transfer complete handler. The handler must be callable and accept four arguments (node, requester_id, data_id, direction). The direction parameter will be the oposatle of the requesters direction. Example: def on_transfer_complete(node, requester_id, data_id, direction): if direction == "receive": print("Received: {0}".format(data_id) elif direction == "send": print("Sent: {0}".format(data_id) node = Node() node.add_transfer_complete_handler(on_transfer_complete) """ self._transfer_complete_handlers.add(handler) def remove_transfer_complete_handler(self, handler): """Remove a transfer complete handler. Raises: A twisted.internet.defer.Deferred that resloves to KeyError if handler was not previously added. """ self._transfer_complete_handlers.remove(handler) ##################### # network utilities # ##################### def async_map_network(self, outfile=None, attempts=2): """Create a map of the network. Output to outfile: { ID : {"transport": (ip, address), "neighbours": [ID, ...]}, } Args: outfile: A file like object to write mapping data to. attempts: How often to attempt to contact each node. Returns: A twisted.internet.defer.Deferred that resloves to (num_nodes_found, num_nodes_unreached) """ return self.server.map_network(outfile=outfile, attempts=attempts) @wait_for(timeout=WALK_TIMEOUT*20) def sync_map_network(self, outfile=None, attempts=3): return self.async_map_network(outfile=outfile, attempts=attempts) ####################### # messaging interface # ####################### def async_direct_message(self, nodeid, message): """Send direct message to a node and return a defered result. Spidercrawls the network to find the node and sends the message directly. This will fail if the node is behind a NAT and doesn't have a public ip. Args: nodeid: 160bit nodeid of the reciever as bytes message: iu-msgpack-python serializable message data Returns: A twisted.internet.defer.Deferred that resloves to own transport address (ip, port) if successfull else None """ return self.server.direct_message(nodeid, message) @wait_for(timeout=WALK_TIMEOUT) def direct_message(self, nodeid, message): """Send direct message to a node and block until complete. Spidercrawls the network to find the node and sends the message directly. This will fail if the node is behind a NAT and doesn't have a public ip. Args: nodeid: 160bit nodeid of the reciever as bytes message: iu-msgpack-python serializable message data Returns: Own transport address (ip, port) if successfull else None Raises: crochet.TimeoutError after storjnode.network.server.WALK_TIMEOUT """ return self.server.direct_message(nodeid, message) def relay_message(self, nodeid, message): """Send relay message to a node. Queues a message to be relayed accross the network. Relay messages are sent to the node nearest the receiver in the routing table that accepts the relay message. This continues until it reaches the destination or the nearest node to the receiver is reached. Because messages are always relayed only to reachable nodes in the current routing table, there is a fare chance nodes behind a NAT can be reached if it is connected to the network. Args: nodeid: 160bit nodeid of the reciever as bytes message: iu-msgpack-python serializable message data Returns: True if message was added to relay queue, otherwise False. """ return self.server.relay_message(nodeid, message) def _dispatch_message(self, received, handler): try: source = received["source"].id if received["source"] else None handler(source, received["message"]) except Exception as e: msg = "Message handler raised exception: {0}" log.error(msg.format(repr(e))) def _message_dispatcher_loop(self): while not self._message_dispatcher_thread_stop: for received in self.server.get_messages(): for handler in self._message_handlers: self._dispatch_message(received, handler) time.sleep(0.05) def add_message_handler(self, handler): """Add message handler to be call when a message is received. The handler must be callable and accept two arguments. The first argument is the source id and the second the message. The source id will be None if it was a relay message. Example: node = Node() def on_message(source_id, message): t = "relay" if source_id is None else "direct" print("Received {0} message: {1}".format(t, message)) node.add_message_handler(handler) """ self._message_handlers.add(handler) def remove_message_handler(self, handler): """Remove a message handler from the Node. Raises: KeyError if handler was not previously added. """ self._message_handlers.remove(handler) ############################## # non blocking DHT interface # ############################## def async_get(self, key, default=None): """Get a key if the network has it. Returns: A twisted.internet.defer.Deferred that resloves to None if not found, the value otherwise. """ # FIXME return default if not found (add to kademlia) return self.server.get(key) def async_set(self, key, value): """Set the given key to the given value in the network. Returns: A twisted.internet.defer.Deferred that resloves when set. """ self.server.set(key, value) ############################### # blocking DHT dict interface # ############################### @wait_for(timeout=WALK_TIMEOUT) def get(self, key, default=None): """D.get(k[,d]) -> D[k] if k in D, else d. d defaults to None. Raises: crochet.TimeoutError after storjnode.network.server.WALK_TIMEOUT """ return self.async_get(key, default=default) @wait_for(timeout=WALK_TIMEOUT) def __setitem__(self, key, value): """x.__setitem__(i, y) <==> x[i]=y Raises: crochet.TimeoutError after storjnode.network.server.WALK_TIMEOUT """ self.async_set(key, value) def __getitem__(self, key): """x.__getitem__(y) <==> x[y]""" result = self.get(key, KeyError(key)) if isinstance(result, KeyError): raise result return result def __contains__(self, k): """D.__contains__(k) -> True if D has a key k, else False""" try: self[k] return True except KeyError: return False def has_key(self, k): """D.has_key(k) -> True if D has a key k, else False""" return k in self def setdefault(self, key, default=None): """D.setdefault(k[,d]) -> D.get(k,d), also set D[k]=d if k not in D""" if key not in self: self[key] = default return self[key] def update(self, e=None, **f): """D.update([e, ]**f) -> None. Update D from dict/iterable e and f. If e present and has a .keys() method, does: for k in e: D[k] = e[k] If e present and lacks .keys() method, does: for (k, v) in e: D[k] = v In either case, this is followed by: for k in f: D[k] = f[k] """ if e and "keys" in dir(e): for k in e: self[k] = e[k] else: for (k, v) in e: self[k] = v for k in f: self[k] = f[k]
store_config = { os.path.join(test_storage_dir, "storage"): {"limit": 0} } dht_node = storjnode.network.Node( wif, bootstrap_nodes=DEFAULT_BOOTSTRAP_NODES, disable_data_transfer=True ) # Transfer client. client = FileTransfer( pyp2p.net.Net( net_type="direct", passive_port=0, dht_node=dht_node, debug=1 ), BandwidthLimit(), wif=wif, store_config=store_config ) # Accept all transfers. def accept_handler(contract_id, src_unl, data_id, file_size): return 1 # Add accept handler. client.handlers["accept"].add(accept_handler) def completion_handler(success_value, contract_id=None, con=None): sending_data = False
class TestFileTransfer(unittest.TestCase): def setUp(self): self.test_storage_dir = tempfile.mkdtemp() # Sample node. self.wallet = btctxstore.BtcTxStore(testnet=False, dryrun=True) self.wif = self.wallet.get_key(self.wallet.create_wallet()) self.node_id = address_to_node_id(self.wallet.get_address(self.wif)) self.store_config = { os.path.join(self.test_storage_dir, "storage"): {"limit": 0} } # dht_node = pyp2p.dht_msg.DHT(node_id=node_id) self.dht_node = storjnode.network.Node( self.wif, bootstrap_nodes=DEFAULT_BOOTSTRAP_NODES, disable_data_transfer=True ) # Transfer client. self.client = FileTransfer( pyp2p.net.Net( node_type="simultaneous", nat_type="preserving", net_type="direct", passive_port=0, dht_node=self.dht_node, debug=1 ), wif=self.wif, store_config=self.store_config ) # Accept all transfers. def accept_handler(contract_id, src_unl, data_id, file_size): return 1 # Add accept handler. self.client.handlers["accept"].add(accept_handler) def tearDown(self): shutil.rmtree(self.test_storage_dir) self.client.net.dht_node.stop() def test_con_by_contract_id(self): contract_id = "something" con = 1 self.client.con_info[con] = { contract_id: {} } assert(self.client.get_con_by_contract_id(contract_id) == con) def test_move_file_to_storage(self): junk, path = tempfile.mkstemp() with open(path, "rw+") as fp: fp.write("1") data_id = storjnode.storage.shard.get_id(fp) self.client.move_file_to_storage(path) self.client.remove_file_from_storage(data_id) def test_cleanup_transfers(self): con = "con" contract_id = "contract_id" data_id = "data_id" self.client.contracts[contract_id] = { "data_id": data_id, "host_unl": self.client.net.unl.value, "src_unl": self.client.net.unl.value, "dest_unl": self.client.net.unl.value } self.client.handshake[contract_id] = {} self.client.defers[contract_id] = {} self.client.con_transfer[con] = {} self.client.con_info[con] = {} self.client.cleanup_transfers(con, contract_id) def test_data_request(self): # Sending data to ourselves. self.client.data_request( "upload", "something", 100, self.client.net.unl.value ) # Already download this. data_id = "something" self.client.downloading[data_id] = 1 try: self.client.data_request( "upload", data_id, 100, "another hosts unl" ) assert(0) except: return def test_simple_data_request(self): self.client.simple_data_request( "something", self.client.net.unl.value, "receive" ) def test_invalid_our_syn(self): self.client.simple_data_request( "something", "invalid unl", "receive" ) @unittest.skip("Disable because too slow: move to node test code") def test_multiple_transfers(self): def make_random_file(file_size=1024 * 100, directory=self.test_storage_dir): content = os.urandom(file_size) file_name = hashlib.sha256(content[0:64]).hexdigest() path = storjnode.util.full_path(os.path.join(directory, file_name)) with open(path, "wb") as fp: fp.write(content) return { "path": path, "content": content } # print("Giving nodes some time to find peers.") time.sleep(storjnode.network.WALK_TIMEOUT) self.dht_node.refresh_neighbours() time.sleep(storjnode.network.WALK_TIMEOUT) _log.debug("Net started") # Make random file rand_file_infos = [make_random_file()] # Move file to storage directory. file_infos = [ self.client.move_file_to_storage(rand_file_infos[0]["path"]) ] # Delete original file. os.remove(rand_file_infos[0]["path"]) _log.debug("Testing upload") # Upload file from storage. for file_info in file_infos: self.client.data_request( "download", file_info["data_id"], 0, TEST_NODE["unl"] ) # Process file transfers. duration = 15 timeout = time.time() + duration while time.time() <= timeout or self.client.is_queued(): process_transfers(self.client) time.sleep(0.002) # Check upload exists. for i in range(0, 1): url = TEST_NODE["web"] + file_infos[i]["data_id"] r = requests.get(url, timeout=3) if r.status_code != 200: _log.debug(r.status_code) assert(0) else: assert(r.content == rand_file_infos[i]["content"]) _log.debug("File upload succeeded.") # Delete storage file copy. self.client.remove_file_from_storage(file_infos[0]["data_id"]) # Download file from storage. _log.debug("Testing download.") for file_info in file_infos: self.client.data_request( "upload", file_info["data_id"], 0, TEST_NODE["unl"] ) # Process file transfers. duration = 15 timeout = time.time() + duration while time.time() <= timeout or self.client.is_queued(): process_transfers(self.client) time.sleep(0.002) # Check we received this file. for i in range(0, 1): path = storjnode.storage.manager.find(self.store_config, file_infos[i]["data_id"]) if not os.path.isfile(path): assert(0) else: with open(path, "r") as fp: content = fp.read() assert(content == rand_file_infos[i]["content"]) # Delete storage file copy. self.client.remove_file_from_storage(file_infos[0]["data_id"]) _log.debug("Download succeeded.") # Test cleanup transfers. for con in list(self.client.con_info): con.close() for contract_id in list(self.client.con_info[con]): self.client.cleanup_transfers(con, contract_id)
class Node(object): """Storj network layer implementation. Provides a blocking dict like interface to the DHT for ease of use. """ def __init__(self, # kademlia DHT args key, ksize=20, port=None, bootstrap_nodes=None, dht_storage=None, max_messages=1024, refresh_neighbours_interval=WALK_TIMEOUT, # data transfer args disable_data_transfer=True, store_config=None, passive_port=None, passive_bind=None, # FIXME use utils.get_inet_facing_ip ? node_type="unknown", # FIMME what is this ? nat_type="unknown", # FIXME what is this ? ): """Create a blocking storjnode instance. Args: key (str): Bitcoin wif/hwif for auth, encryption and node id. ksize (int): The k parameter from the kademlia paper. port (port): Port to for incoming packages, randomly by default. bootstrap_nodes [(ip, port), ...]: Known network node addresses as. dht_storage: implements :interface:`~kademlia.storage.IStorage` max_messages (int): Max unprecessed messages, additional dropped. refresh_neighbours_interval (float): Auto refresh neighbours. disable_data_transfer: Disable data transfer for this node. store_config: Dict of storage paths to optional attributes. limit: The dir size limit in bytes, 0 for no limit. use_folder_tree: Files organized in a folder tree (always on for fat partitions). passive_port (int): Port to receive inbound TCP connections on. passive_bind (ip): LAN IP to receive inbound TCP connections on. node_type: TODO doc string nat_type: TODO doc string """ self.disable_data_transfer = bool(disable_data_transfer) self._transfer_request_handlers = set() self._transfer_complete_handlers = set() self._transfer_start_handlers = set() # set default store config if None given if store_config is None: store_config = storjnode.storage.manager.DEFAULT_STORE_CONFIG # validate port (randomish user port by default) port = port or random.choice(range(1024, 49151)) assert(util.valid_port(port)) self.port = port # passive port (randomish user port by default) passive_port = passive_port or random.choice(range(1024, 49151)) assert(util.valid_port(passive_port)) # FIXME chance of same port and passive_port being the same # FIXME exclude ports already being used on the machine # passive bind # FIXME just use storjnode.util.get_inet_facing_ip ? passive_bind = passive_bind or "0.0.0.0" assert(util.valid_ip(passive_bind)) # validate bootstrap_nodes if bootstrap_nodes is None: bootstrap_nodes = DEFAULT_BOOTSTRAP_NODES # pragma: no cover for address in bootstrap_nodes: assert(isinstance(address, tuple) or isinstance(address, list)) assert(len(address) == 2) other_ip, other_port = address assert(util.valid_ip(other_ip)) assert(isinstance(other_port, int)) assert(0 <= other_port < 2 ** 16) # start services self._setup_server(key, ksize, dht_storage, max_messages, refresh_neighbours_interval, bootstrap_nodes) # Process incoming messages. self._setup_message_dispatcher() if not self.disable_data_transfer: self._setup_data_transfer_client( store_config, passive_port, passive_bind, node_type, nat_type ) self.add_message_handler(process_unl_requests) self.bandwidth_test = BandwidthTest( self.get_key(), self._data_transfer, self ) def _setup_message_dispatcher(self): self._message_handlers = set() self._message_dispatcher_thread_stop = False self._message_dispatcher_thread = threading.Thread( target=self._message_dispatcher_loop ) self._message_dispatcher_thread.start() def _setup_server(self, key, ksize, storage, max_messages, refresh_neighbours_interval, bootstrap_nodes): self.server = Server( key, self.port, ksize=ksize, storage=storage, max_messages=max_messages, refresh_neighbours_interval=refresh_neighbours_interval ) self.server.listen(self.port) self.server.bootstrap(bootstrap_nodes) def _setup_data_transfer_client(self, store_config, passive_port, passive_bind, node_type, nat_type): result = self.sync_get_transport_info() # Setup handlers for callbacks registered via the API. handlers = { "complete": self._transfer_complete_handlers, "accept": self._transfer_request_handlers, "start": self._transfer_start_handlers } wif = self.get_key() dht_node = self self._data_transfer = FileTransfer( net=Net( net_type="direct", node_type=node_type, nat_type=nat_type, dht_node=dht_node, debug=1, passive_port=passive_port, passive_bind=passive_bind, wan_ip=result["wan"][0] if result else None ), wif=wif, store_config=store_config, handlers=handlers ) # Setup success callback values. self._data_transfer.success_value = result self.process_data_transfers() def stop(self): """Stop storj node.""" self._message_dispatcher_thread_stop = True self._message_dispatcher_thread.join() self.server.stop() if not self.disable_data_transfer: self._data_transfer.net.stop() ################## # node interface # ################## def refresh_neighbours(self): self.server.refresh_neighbours() def get_known_peers(self): """Returns list of know peers. Returns: iterable of kademlia.node.Node """ return self.server.get_known_peers() def get_neighbours(self): return self.server.get_neighbours() def get_key(self): """Returns Bitcoin wif for auth, encryption and node id""" return self.server.key def get_id(self): """Returns 160bit node id as bytes.""" return self.server.get_id() def get_address(self): return self.server.get_address() ######################## # networking interface # ######################## @wait_for(timeout=QUERY_TIMEOUT) def sync_has_public_ip(self): """Find out if this node has a public IP or is behind a NAT. The may false positive if you run other nodes on your local network. Returns: True if local IP is internet visible, otherwise False. Raises: crochet.TimeoutError after storjnode.network.server.QUERY_TIMEOUT """ return self.async_has_public_ip() def async_has_public_ip(self): """Find out if this node has a public IP or is behind a NAT. The may false positive if you run other nodes on your local network. Returns: A twisted.internet.defer.Deferred that resloves to True if local IP is internet visible, otherwise False. """ def handle(result): if result is None: return False return result["wan"] == result["lan"] return self.async_get_transport_info().addCallback(handle) @wait_for(timeout=QUERY_TIMEOUT) def sync_get_wan_ip(self): """Get the WAN IP of this Node. Retruns: The WAN IP or None. Raises: crochet.TimeoutError after storjnode.network.server.QUERY_TIMEOUT """ return self.async_get_wan_ip() def async_get_wan_ip(self): """Get the WAN IP of this Node. Retruns: A twisted.internet.defer.Deferred that resloves to The WAN IP or None. """ def handle(result): return result["wan"][0] return self.async_get_transport_info().addCallback(handle) def async_get_transport_info(self): return self.server.get_transport_info() @wait_for(timeout=QUERY_TIMEOUT) def sync_get_transport_info(self): return self.async_get_transport_info() ###################################### # depricated data transfer interface # ###################################### def move_to_storage(self, path): if self.disable_data_transfer: raise Exception("Data transfer disabled!") # FIXME remove and have callers use storage service instead return self._data_transfer.move_file_to_storage(path) def get_unl_by_node_id(self, node_id): """Get the WAN IP of this Node. Returns: A twisted.internet.defer.Deferred that resolves to The UNL on success or None. """ # UNL request. _log.debug("In get UNL by node id") unl_req = OrderedDict([ (u"type", u"unl_request"), (u"requester", self.get_address()) ]) # Sign UNL request. unl_req = sign(unl_req, self.get_key()) # Handle responses for this request. def handler_builder(self, d, their_node_id, wif): def handler(node, src_id, msg): # Is this a response to our request? try: msg = OrderedDict(msg) # Not a UNL response. if msg[u"type"] != u"unl_response": _log.debug("unl response: type !=") return # Invalid UNL. their_unl = UNL(value=msg[u"unl"]).deconstruct() if their_unl is None: _log.debug("unl response:their unl !=") return # Invalid signature. if not verify_signature(msg, wif, their_node_id): _log.debug("unl response: their sig") return # Everything passed: fire callback. d.callback(msg[u"unl"]) # Remove this callback. node.remove_message_handler(handler) except (ValueError, KeyError) as e: _log.debug(str(e)) _log.debug("Protocol: invalid JSON") return handler # Build message handler. d = defer.Deferred() handler = handler_builder(self, d, node_id, self.get_key()) # Register new handler for this UNL request. self.add_message_handler(handler) # Send our get UNL request to node. self.relay_message(node_id, unl_req) # Return a new deferred. return d def get_unl(self): if self.disable_data_transfer: raise Exception("Data transfer disabled!") return self._data_transfer.net.unl.value @run_in_reactor def process_data_transfers(self): if self.disable_data_transfer: raise Exception("Data transfer disabled!") def process_transfers_error(ret): print("An unknown error occured in process_transfers deferred") print(ret) d = LoopingCall( process_transfers, self._data_transfer ).start(0.002, now=True) d.addErrback(process_transfers_error) def test_bandwidth(self, test_node_id): """Tests the bandwidth between yourself and a remote peer. Only one test can be active at any given time! If a test is already active: the deferred will call an errback that resolves to an exception (the callback won't be called) and the request won't go through. :param test_node_id: binary node_id as returned from get_id. :return: a deferred that returns this: {'download': 1048576, 'upload': 1048576} to a callback or raises an error to an errback on failure. ^ Note that the units are in bytes so if you want fancy measurement in kbs or mbs you will have to convert it. E.g.: def show_bandwidth(results): print(results) def handle_error(results): print(results) d = test_bandwidth ... d.addCallback(show_bandwidth) d.addErrback(handle_error) Todo: I am basically coding this function in a hurry so I don't delay your work Fabian. There should probably be a decorator to wrap functions that need a UNL (as the code bellow is similar to the request_data_transfer function.) """ if self.disable_data_transfer: raise Exception("Data transfer disabled!") # Get a deferred for their UNL. d = self.get_unl_by_node_id(node_id) # Make data request when we have their UNL. def callback(peer_unl): return self.bandwidth_test.start(peer_unl) # Add callback to UNL deferred. d.addCallback(callback) # Return deferred. return d ########################### # data transfer interface # ########################### def async_request_data_transfer(self, data_id, node_id, direction): """Request data be transfered to or from a peer. Args: data_id: The sha256 sum of the data to be transfered. node_id: Binary node id of the target to receive message. direction: "send" to peer or "receive" from peer Returns: A twisted.internet.defer.Deferred that resloves to own transport address (ip, port) if successfull else None Raises: RequestDenied: If the peer denied your request to transfer data. TransferError: If the data not transfered for other reasons. """ if self.disable_data_transfer: raise Exception("Data transfer disabled!") # Get a deferred for their UNL. d = self.get_unl_by_node_id(node_id) # Make data request when we have their UNL. def callback_builder(data_id, direction): def callback(peer_unl): # Deferred. return self._data_transfer.simple_data_request( data_id, peer_unl, direction ) return callback # Add callback to UNL deferred. d.addCallback(callback_builder(data_id, direction)) # Return deferred. return d @wait_for(timeout=QUERY_TIMEOUT) def sync_request_data_transfer(self, data_id, peer_unl, direction): """Request data be transfered to or from a peer. This call will block until the data has been transferred full or failed. Maybe this should be changed to match the params for the other handlers. No - because the contract isn't available yet -- that's what accept determines. Args: data_id: The sha256 sum of the data to be transfered. peer_unl: The node UNL of the peer to get the data from. direction: "send" to peer or "receive" from peer Raises: RequestDenied: If the peer denied your request to transfer data. TransferError: If the data not transfered for other reasons. """ self.async_request_data_transfer(data_id, peer_unl, direction) def add_transfer_start_handler(self, handler): self._transfer_start_handlers.add(handler) def remove_transfer_start_handler(self, handler): self._transfer_start_handlers.remove(handler) def add_transfer_request_handler(self, handler): """Add an allow transfer request handler. If any handler returns True the transfer request will be accepted. The handler must be callable and accept four arguments (src_unl, data_id, direction, file_size). src_unl = The UNL of the source node ending the transfer request data_id = The shard ID of the data to download or upload direction = Direction from the perspective of the requester: e.g. send (upload data_id to requester) or receive (download data_id from requester) file_size = The size of the file they wish to transfer Example: def on_transfer_request(node_unl, data_id, direction, file_size): # This handler will accept everything but send nothing. if direction == "receive": print("Accepting data: {0}".format(data_id)) return True elif direction == "send": print("Refusing to send data {0}.".format(data_id)) return False node = Node() node.add_allow_transfer_handler(on_transfer_request) """ self._transfer_request_handlers.add(handler) def remove_transfer_request_handler(self, handler): """Remove a allow transfer request handler from the Node. Raises: KeyError if handler was not previously added. """ self._transfer_complete_handlers.remove(handler) def add_transfer_complete_handler(self, handler): """Add a transfer complete handler. The handler must be callable and accept four arguments (node_id, data_id, direction). TO DO: this has changed completely. node_id = The node_ID we sent the transfer request to. (May be our node_id if the request was sent to us.) data_id = The shard to download or upload. direction = The direction of the transfer (e.g. send or receive.) Example: def on_transfer_complete(node_id, data_id, direction): if direction == "receive": print("Received: {0}".format(data_id) elif direction == "send": print("Sent: {0}".format(data_id) node = Node() node.add_transfer_complete_handler(on_transfer_complete) """ self._transfer_complete_handlers.add(handler) def remove_transfer_complete_handler(self, handler): """Remove a transfer complete handler. Raises: A twisted.internet.defer.Deferred that resloves to KeyError if handler was not previously added. """ self._transfer_complete_handlers.remove(handler) ####################### # messaging interface # ####################### def async_direct_message(self, nodeid, message): """Send direct message to a node and return a defered result. Spidercrawls the network to find the node and sends the message directly. This will fail if the node is behind a NAT and doesn't have a public ip. Args: nodeid: 160bit nodeid of the reciever as bytes message: iu-msgpack-python serializable message data Returns: A twisted.internet.defer.Deferred that resloves to own transport address (ip, port) if successfull else None """ return self.server.direct_message(nodeid, message) @wait_for(timeout=WALK_TIMEOUT) def direct_message(self, nodeid, message): """Send direct message to a node and block until complete. Spidercrawls the network to find the node and sends the message directly. This will fail if the node is behind a NAT and doesn't have a public ip. Args: nodeid: 160bit nodeid of the reciever as bytes message: iu-msgpack-python serializable message data Returns: Own transport address (ip, port) if successfull else None Raises: crochet.TimeoutError after storjnode.network.server.WALK_TIMEOUT """ return self.server.direct_message(nodeid, message) def relay_message(self, nodeid, message): """Send relay message to a node. Queues a message to be relayed accross the network. Relay messages are sent to the node nearest the receiver in the routing table that accepts the relay message. This continues until it reaches the destination or the nearest node to the receiver is reached. Because messages are always relayed only to reachable nodes in the current routing table, there is a fare chance nodes behind a NAT can be reached if it is connected to the network. Args: nodeid: 160bit nodeid of the reciever as bytes message: iu-msgpack-python serializable message data Returns: True if message was added to relay queue, otherwise False. """ return self.server.relay_message(nodeid, message) def _dispatch_message(self, received, handler): try: source = received["source"].id if received["source"] else None return handler(self, source, received["message"]) except Exception as e: txt = """Message handler raised exception: {0}\n\n{1}""" _log.error(txt.format(repr(e), traceback.format_exc())) def _message_dispatcher_loop(self): while not self._message_dispatcher_thread_stop: messages = self.server.get_messages() for received in messages: for handler in self._message_handlers.copy(): self._dispatch_message(received, handler) time.sleep(0.002) def add_message_handler(self, handler): """Add message handler to be call when a message is received. The handler must be callable and accept two arguments. The first is the calling node itself, the second argument is the source id and the third the message. The source id will be None if it was a relay message. Returns: The given handler. Example: node = Node() def on_message(node, source_id, message): t = "relay" if source_id is None else "direct" print("Received {0} message: {1}".format(t, message)) node.add_message_handler(handler) """ self._message_handlers.add(handler) return handler def remove_message_handler(self, handler): """Remove a message handler from the Node. Raises: KeyError if handler was not previously added. """ self._message_handlers.remove(handler) ############################## # non blocking DHT interface # ############################## def async_get(self, key, default=None): """Get a key if the network has it. Returns: A twisted.internet.defer.Deferred that resloves to None if not found, the value otherwise. """ # FIXME return default if not found (add to kademlia) return self.server.get(key) def async_set(self, key, value): """Set the given key to the given value in the network. Returns: A twisted.internet.defer.Deferred that resloves when set. """ self.server.set(key, value) ############################### # blocking DHT dict interface # ############################### @wait_for(timeout=WALK_TIMEOUT) def get(self, key, default=None): """D.get(k[,d]) -> D[k] if k in D, else d. d defaults to None. Raises: crochet.TimeoutError after storjnode.network.server.WALK_TIMEOUT """ return self.async_get(key, default=default) @wait_for(timeout=WALK_TIMEOUT) def __setitem__(self, key, value): """x.__setitem__(i, y) <==> x[i]=y Raises: crochet.TimeoutError after storjnode.network.server.WALK_TIMEOUT """ self.async_set(key, value) def __getitem__(self, key): """x.__getitem__(y) <==> x[y]""" result = self.get(key, KeyError(key)) if isinstance(result, KeyError): raise result return result def __contains__(self, k): """D.__contains__(k) -> True if D has a key k, else False""" try: self[k] return True except KeyError: return False def has_key(self, k): """D.has_key(k) -> True if D has a key k, else False""" return k in self def setdefault(self, key, default=None): """D.setdefault(k[,d]) -> D.get(k,d), also set D[k]=d if k not in D""" if key not in self: self[key] = default return self[key] def update(self, e=None, **f): """D.update([e, ]**f) -> None. Update D from dict/iterable e and f. If e present and has a .keys() method, does: for k in e: D[k] = e[k] If e present and lacks .keys() method, does: for (k, v) in e: D[k] = v In either case, this is followed by: for k in f: D[k] = f[k] """ if e and "keys" in dir(e): for k in e: self[k] = e[k] else: for (k, v) in e: self[k] = v for k in f: self[k] = f[k]
def test_multiple_transfers(self): def make_random_file(file_size=1024 * 100, directory=self.test_storage_dir): content = os.urandom(file_size) file_name = hashlib.sha256(content[0:64]).hexdigest() path = storjnode.util.full_path(os.path.join(directory, file_name)) with open(path, "wb") as fp: fp.write(content) return { "path": path, "content": content } # Sample node. wallet = btctxstore.BtcTxStore(testnet=True, dryrun=True) wif = wallet.get_key(wallet.create_wallet()) store_config = { os.path.join(self.test_storage_dir, "storage"): {"limit": 0} } client = FileTransfer( pyp2p.net.Net( node_type="simultaneous", nat_type="preserving", net_type="direct", passive_port=60400, dht_node=pyp2p.dht_msg.DHT(), debug=1 ), wif=wif, store_config=store_config ) _log.debug("Net started") # Make random file rand_file_infos = [make_random_file()] # Move file to storage directory. file_infos = [ client.move_file_to_storage(rand_file_infos[0]["path"]) ] # Delete original file. os.remove(rand_file_infos[0]["path"]) _log.debug("Testing upload") # Upload file from storage. for file_info in file_infos: client.data_request( "upload", file_info["data_id"], 0, TEST_NODE["unl"] ) # Process file transfers. duration = 15 timeout = time.time() + duration while time.time() <= timeout or client.is_queued(): process_transfers(client) time.sleep(0.002) # Check upload exists. for i in range(0, 1): url = TEST_NODE["web"] + file_infos[i]["data_id"] r = requests.get(url, timeout=3) if r.status_code != 200: _log.debug(r.status_code) assert(0) else: assert(r.content == rand_file_infos[i]["content"]) _log.debug("File upload succeeded.") # Delete storage file copy. client.remove_file_from_storage(file_infos[0]["data_id"]) # Download file from storage. _log.debug("Testing download.") for file_info in file_infos: client.data_request( "download", file_info["data_id"], 0, TEST_NODE["unl"] ) # Process file transfers. duration = 15 timeout = time.time() + duration while time.time() <= timeout or client.is_queued(): process_transfers(client) time.sleep(0.002) # Check we received this file. for i in range(0, 1): path = storage.manager.find(store_config, file_infos[i]["data_id"]) if not os.path.isfile(path): assert(0) else: with open(path, "r") as fp: content = fp.read() assert(content == rand_file_infos[i]["content"]) # Delete storage file copy. client.remove_file_from_storage(file_infos[0]["data_id"]) # Stop networking. client.net.stop() _log.debug("Download succeeded.")
def setUp(self): # Alice self.alice_wallet = BtcTxStore(testnet=False, dryrun=True) self.alice_wif = "L18vBLrz3A5QxJ6K4bUraQQZm6BAdjuAxU83e16y3x7eiiHTApHj" self.alice_node_id = address_to_node_id( self.alice_wallet.get_address(self.alice_wif) ) self.alice_dht_node = pyp2p.dht_msg.DHT( node_id=self.alice_node_id, networking=0 ) self.alice_storage = tempfile.mkdtemp() self.alice = FileTransfer( pyp2p.net.Net( net_type="direct", node_type="passive", nat_type="preserving", passive_port=0, dht_node=self.alice_dht_node, wan_ip="8.8.8.8", debug=1 ), BandwidthLimit(), wif=self.alice_wif, store_config={self.alice_storage: None} ) # Bob self.bob_wallet = BtcTxStore(testnet=False, dryrun=True) self.bob_wif = "L3DBWWbuL3da2x7qAmVwBpiYKjhorJuAGobecCYQMCV7tZMAnDsr" self.bob_node_id = address_to_node_id( self.bob_wallet.get_address(self.bob_wif)) self.bob_dht_node = pyp2p.dht_msg.DHT( node_id=self.bob_node_id, networking=0 ) self.bob_storage = tempfile.mkdtemp() self.bob = FileTransfer( pyp2p.net.Net( net_type="direct", node_type="passive", nat_type="preserving", passive_port=0, dht_node=self.bob_dht_node, wan_ip="8.8.8.8", debug=1 ), BandwidthLimit(), wif=self.bob_wif, store_config={self.bob_storage: None} ) # Accept all transfers. def accept_handler(contract_id, src_unl, data_id, file_size): return 1 # Add accept handler. self.alice.handlers["accept"].add(accept_handler) self.bob.handlers["accept"].add(accept_handler) # Link DHT nodes. self.alice_dht_node.add_relay_link(self.bob_dht_node) self.bob_dht_node.add_relay_link(self.alice_dht_node) # Bypass sending messages for client. def send_msg(dict_obj, unl): print("Skipped sending message in test") print(dict_obj) print(unl) # Install send msg hooks. self.alice.send_msg = send_msg self.bob.send_msg = send_msg # Bypass sending relay messages for clients. def relay_msg(node_id, msg): print("Skipping relay message in test") print(node_id) print(msg) # Install relay msg hooks. if self.alice.net.dht_node is not None: self.alice.net.dht_node.relay_message = relay_msg if self.bob.net.dht_node is not None: self.bob.net.dht_node.relay_message = relay_msg # Bypass UNL.connect for clients. def unl_connect(their_unl, events, force_master=1, hairpin=1, nonce="0" * 64): print("Skipping UNL.connect!") print("Their unl = ") print(their_unl) print("Events = ") print(events) print("Force master = ") print(force_master) print("Hairpin = ") print(hairpin) print("Nonce = ") print(nonce) # Install UNL connect hooks. self.alice.net.unl.connect = unl_connect self.bob.net.unl.connect = unl_connect # Record syn. data_id = u"5feceb66ffc86f38d952786c6d696c79" data_id += u"c2dbc239dd4e91b46729d73a27fb57e9" self.syn = OrderedDict([ (u"status", u"SYN"), (u"data_id", data_id), (u"file_size", 100), (u"host_unl", self.alice.net.unl.value), (u"dest_unl", self.bob.net.unl.value), (u"src_unl", self.alice.net.unl.value) ])
class TestFileHandshake(unittest.TestCase): def setUp(self): # Alice self.alice_wallet = BtcTxStore(testnet=False, dryrun=True) self.alice_wif = "L18vBLrz3A5QxJ6K4bUraQQZm6BAdjuAxU83e16y3x7eiiHTApHj" self.alice_node_id = address_to_node_id( self.alice_wallet.get_address(self.alice_wif) ) self.alice_dht_node = pyp2p.dht_msg.DHT( node_id=self.alice_node_id, networking=0 ) self.alice_storage = tempfile.mkdtemp() self.alice = FileTransfer( pyp2p.net.Net( net_type="direct", node_type="passive", nat_type="preserving", passive_port=0, dht_node=self.alice_dht_node, wan_ip="8.8.8.8", debug=1 ), BandwidthLimit(), wif=self.alice_wif, store_config={self.alice_storage: None} ) # Bob self.bob_wallet = BtcTxStore(testnet=False, dryrun=True) self.bob_wif = "L3DBWWbuL3da2x7qAmVwBpiYKjhorJuAGobecCYQMCV7tZMAnDsr" self.bob_node_id = address_to_node_id( self.bob_wallet.get_address(self.bob_wif)) self.bob_dht_node = pyp2p.dht_msg.DHT( node_id=self.bob_node_id, networking=0 ) self.bob_storage = tempfile.mkdtemp() self.bob = FileTransfer( pyp2p.net.Net( net_type="direct", node_type="passive", nat_type="preserving", passive_port=0, dht_node=self.bob_dht_node, wan_ip="8.8.8.8", debug=1 ), BandwidthLimit(), wif=self.bob_wif, store_config={self.bob_storage: None} ) # Accept all transfers. def accept_handler(contract_id, src_unl, data_id, file_size): return 1 # Add accept handler. self.alice.handlers["accept"].add(accept_handler) self.bob.handlers["accept"].add(accept_handler) # Link DHT nodes. self.alice_dht_node.add_relay_link(self.bob_dht_node) self.bob_dht_node.add_relay_link(self.alice_dht_node) # Bypass sending messages for client. def send_msg(dict_obj, unl): print("Skipped sending message in test") print(dict_obj) print(unl) # Install send msg hooks. self.alice.send_msg = send_msg self.bob.send_msg = send_msg # Bypass sending relay messages for clients. def relay_msg(node_id, msg): print("Skipping relay message in test") print(node_id) print(msg) # Install relay msg hooks. if self.alice.net.dht_node is not None: self.alice.net.dht_node.relay_message = relay_msg if self.bob.net.dht_node is not None: self.bob.net.dht_node.relay_message = relay_msg # Bypass UNL.connect for clients. def unl_connect(their_unl, events, force_master=1, hairpin=1, nonce="0" * 64): print("Skipping UNL.connect!") print("Their unl = ") print(their_unl) print("Events = ") print(events) print("Force master = ") print(force_master) print("Hairpin = ") print(hairpin) print("Nonce = ") print(nonce) # Install UNL connect hooks. self.alice.net.unl.connect = unl_connect self.bob.net.unl.connect = unl_connect # Record syn. data_id = u"5feceb66ffc86f38d952786c6d696c79" data_id += u"c2dbc239dd4e91b46729d73a27fb57e9" self.syn = OrderedDict([ (u"status", u"SYN"), (u"data_id", data_id), (u"file_size", 100), (u"host_unl", self.alice.net.unl.value), (u"dest_unl", self.bob.net.unl.value), (u"src_unl", self.alice.net.unl.value) ]) def tearDown(self): self.alice.net.stop() self.bob.net.stop() def test_message_flow(self): print("") print("Testing message flow") print("") # Create file we're suppose to be uploading. path = os.path.join(self.alice_storage, self.syn[u"data_id"]) if not os.path.exists(path): with open(path, "w") as fp: fp.write("0") # Clear existing contracts. self.clean_slate_all() # Alice: build SYN. contract_id = self.alice.simple_data_request( data_id=self.syn[u"data_id"], node_unl=self.bob.net.unl.value, direction=u"send" ) syn = self.alice.contracts[contract_id] self.assertIsInstance(syn, OrderedDict) print(self.alice.net.unl.value) print(self.bob.net.unl.value) print(syn) # Bob: process SYN, build SYN-ACK. syn_ack = process_syn(self.bob, syn) self.assertIsInstance(syn_ack, OrderedDict) # Alice: process SYN-ACK, build ACK. ack = process_syn_ack(self.alice, syn_ack) self.assertIsInstance(ack, OrderedDict) # Bob: process ack. fin = process_ack(self.bob, ack) self.assertTrue(fin == 1) print("") print("Done testing message flow") print("") def clean_slate(self, client): client.contracts = {} client.cons = [] client.defers = {} client.handshake = {} client.con_info = {} client.con_transfer = {} client.downloading = {} def clean_slate_all(self): for client in [self.alice, self.bob]: self.clean_slate(client) def test_sign_syn(self): print("") print("Testing sign syn") print("") self.clean_slate_all() syn = copy.deepcopy(self.syn) signed_syn = self.alice.sign_contract(syn) print(signed_syn) print(self.alice.is_valid_contract_sig(signed_syn)) node_id = self.alice.net.dht_node.get_id() print(node_id) self.assertEqual( self.alice.is_valid_contract_sig(signed_syn, node_id), 1 ) node_id = parse_node_id_from_unl(self.alice.net.unl.value) self.assertEqual( self.alice.is_valid_contract_sig(signed_syn, node_id), 1 ) print(node_id) self.assertTrue(syn[u"src_unl"] == self.alice.net.unl.value) print("Bob's perspective") assert(self.bob.is_valid_contract_sig(signed_syn, node_id)) print("----") print(signed_syn) print("") print("End sign syn") print("") def test_process_syn(self): print("") print("Testing process syn") print("") self.clean_slate_all() syn = copy.deepcopy(self.syn) # Create file we're suppose to be uploading. path = os.path.join(self.alice_storage, syn[u"data_id"]) if not os.path.exists(path): with open(path, "w") as fp: fp.write("0") # Test accept SYN with a handler. def request_handler(contract_id, src_unl, data_id, file_size): return 1 self.bob.handlers["accept"] = [request_handler] syn = copy.deepcopy(self.syn) self.assertIsInstance(process_syn( self.bob, self.alice.sign_contract(syn), enable_accept_handlers=1 ), OrderedDict) del syn["signature"] # Test reject SYN with a handler. def request_handler(contract_id, src_unl, data_id, file_size): return 0 self.bob.handlers["accept"] = [request_handler] syn = copy.deepcopy(self.syn) self.assertTrue(process_syn( self.bob, self.alice.sign_contract(syn), enable_accept_handlers=1 ) == -2) del syn["signature"] # Our UNL is incorrect. syn = copy.deepcopy(self.syn) syn[u"dest_unl"] = self.alice.net.unl.value self.assertTrue(process_syn( self.bob, self.alice.sign_contract(syn), enable_accept_handlers=0 ) == -3) syn[u"dest_unl"] = self.bob.net.unl.value del syn["signature"] # Their sig is invalid. syn = copy.deepcopy(self.syn) syn[u"signature"] = "x" self.assertTrue(process_syn( self.bob, syn, enable_accept_handlers=0 ) == -4) del syn["signature"] # Handshake state is incorrect. syn = copy.deepcopy(self.syn) syn = self.alice.sign_contract(syn) contract_id = self.bob.contract_id(syn) self.bob.handshake[contract_id] = "SYN" self.assertTrue(process_syn( self.bob, syn, enable_accept_handlers=0 ) == -5) del self.bob.handshake[contract_id] # This should pass. self.assertIsInstance(process_syn( self.bob, syn, enable_accept_handlers=0 ), OrderedDict) print("") print("Ending process syn") print("") def test_valid_syn_ack(self): print("") print("Testing process syn-ack") print("") self.clean_slate_all() syn = self.alice.sign_contract(copy.deepcopy(self.syn)) syn_ack = OrderedDict([(u'status', u'SYN-ACK'), (u'syn', syn)]) syn_ack = self.bob.sign_contract(syn_ack) # Clear any old contracts that might exist. self.alice.contracts = {} # Create file we're suppose to be uploading. path = os.path.join(self.alice_storage, syn_ack[u"syn"][u"data_id"]) if not os.path.exists(path): with open(path, "w") as fp: fp.write("0") # Syn not in message. syn_ack_2 = copy.deepcopy(syn_ack) del syn_ack_2[u"syn"] self.assertTrue(process_syn_ack(self.alice, syn_ack_2) == -1) # Invalid fields. syn_ack_2 = copy.deepcopy(syn_ack) syn_ack_2[u"xxx"] = "0" self.assertTrue(process_syn_ack(self.alice, syn_ack_2) == -2) # Not a reply to something we sent. syn_ack_2 = copy.deepcopy(syn_ack) self.assertTrue(process_syn_ack(self.alice, syn_ack_2) == -3) # Save original SYN as a contract. contract_id = self.alice.contract_id(syn_ack_2[u"syn"]) self.alice.contracts[contract_id] = syn_ack_2[u"syn"] # Is SYN valid. syn_ack_2 = copy.deepcopy(syn_ack) syn_ack_2[u"syn"][u"file_size"] = "10" contract_id = self.alice.contract_id(syn_ack_2[u"syn"]) self.alice.contracts[contract_id] = syn_ack_2[u"syn"] self.assertTrue(process_syn_ack(self.alice, syn_ack_2) == -4) # Did we sign this? syn_ack_2 = copy.deepcopy(syn_ack) syn_ack_2[u"syn"][u"signature"] = "x" contract_id = self.alice.contract_id(syn_ack_2[u"syn"]) self.alice.contracts[contract_id] = syn_ack_2[u"syn"] self.assertTrue(process_syn_ack(self.alice, syn_ack_2) == -5) # Check their sig is valid. syn_ack_2 = copy.deepcopy(syn_ack) syn_ack_2[u"signature"] = "x" contract_id = self.alice.contract_id(syn_ack_2[u"syn"]) self.alice.contracts[contract_id] = syn_ack_2[u"syn"] self.assertTrue(process_syn_ack(self.alice, syn_ack_2) == -6) # Check handshake state is valid. syn_ack_2 = copy.deepcopy(syn_ack) self.alice.handshake = {} ret = process_syn_ack(self.alice, syn_ack_2) print("ERror 1") print(ret) self.assertTrue(ret == -7) self.alice.handshake[contract_id] = { u"state": u"ACK", u"timestamp": time.time() } contract_id = self.alice.contract_id(syn_ack_2[u"syn"]) self.alice.contracts[contract_id] = syn_ack_2[u"syn"] self.assertTrue(process_syn_ack(self.alice, syn_ack_2) == -8) self.alice.handshake[contract_id] = { u"state": u"SYN", u"timestamp": time.time() } # This should pass. syn_ack_2 = copy.deepcopy(syn_ack) contract_id = self.alice.contract_id(syn_ack_2[u"syn"]) self.alice.contracts[contract_id] = syn_ack_2[u"syn"] ret = process_syn_ack(self.alice, syn_ack_2) print(ret) self.assertIsInstance(ret, OrderedDict) print("") print("Ending process syn-ack") print("") def test_valid_ack(self): print("") print("Testing process ack") print("") self.clean_slate_all() syn = self.alice.sign_contract(copy.deepcopy(self.syn)) syn_ack = OrderedDict([(u'status', u'SYN-ACK'), (u'syn', syn)]) syn_ack = self.bob.sign_contract(syn_ack) ack = OrderedDict([(u'status', u'ACK'), (u'syn_ack', syn_ack)]) ack = self.alice.sign_contract(ack) # SYN ack not in message. ack_2 = copy.deepcopy(ack) del ack_2[u"syn_ack"] self.assertTrue(process_ack(self.bob, ack_2) == -1) # Invalid length. ack_2 = copy.deepcopy(ack) ack_2["yy"] = 1 self.assertTrue(process_ack(self.bob, ack_2) == -2) # Not a reply to our syn-ack. ack_2 = copy.deepcopy(ack) self.assertTrue(process_ack(self.bob, ack_2) == -3) # Our sig is invalid. ack_2 = copy.deepcopy(ack) ack_2[u"syn_ack"][u"signature"] = "x" contract_id = self.bob.contract_id(ack_2[u"syn_ack"][u"syn"]) self.bob.contracts[contract_id] = ack_2[u"syn_ack"][u"syn"] self.assertTrue(process_ack(self.bob, ack_2) == -4) # Contract ID not in handshakes. ack_2 = copy.deepcopy(ack) contract_id = self.bob.contract_id(ack_2[u"syn_ack"][u"syn"]) self.bob.contracts[contract_id] = ack_2[u"syn_ack"][u"syn"] self.alice.handshake = {} self.assertTrue(process_ack(self.bob, ack_2) == -5) # Handshake state is invalid. ack_2 = copy.deepcopy(ack) contract_id = self.bob.contract_id(ack_2[u"syn_ack"][u"syn"]) self.bob.contracts[contract_id] = ack_2[u"syn_ack"][u"syn"] self.bob.handshake[contract_id] = { u"state": "SYN", u"timestamp": time.time() } self.assertTrue(process_ack(self.bob, ack_2) == -6) # This should pass. ack_2 = copy.deepcopy(ack) contract_id = self.bob.contract_id(ack_2[u"syn_ack"][u"syn"]) self.bob.contracts[contract_id] = ack_2[u"syn_ack"][u"syn"] self.bob.handshake[contract_id] = { u"state": "SYN-ACK", u"timestamp": time.time() } ret = process_ack(self.bob, ack_2) print(ret) self.assertTrue(ret == 1) print("") print("Ending process ack") print("") def test_valid_rst(self): print("") print("Testing process rst") print("") self.clean_slate_all() syn = self.alice.sign_contract(copy.deepcopy(self.syn)) # Rest contract state. self.bob.contracts = {} contract_id = self.alice.contract_id(syn) rst = OrderedDict([ (u"status", u"RST"), (u"contract_id", contract_id), (u"src_unl", self.bob.net.unl.value) ]) # Contract ID not in message. rst_2 = copy.deepcopy(rst) del rst_2["contract_id"] self.assertTrue(process_rst(self.alice, rst_2) == -1) # SRC UNL not in message. rst_2 = copy.deepcopy(rst) del rst_2["src_unl"] self.assertTrue(process_rst(self.alice, rst_2) == -2) # Contract not found. rst_2 = copy.deepcopy(rst) self.assertTrue(process_rst(self.alice, rst_2) == -3) # UNLs don't match for this contract. self.alice.contracts[contract_id] = syn rst_2 = copy.deepcopy(rst) rst_2[u"src_unl"] = self.alice.net.unl.value self.assertTrue(process_rst(self.alice, rst_2) == -4) # Sig doesn't match for this contract. rst_2 = copy.deepcopy(rst) self.assertTrue(process_rst(self.alice, rst_2) == -5) # This should pass. rst_2 = copy.deepcopy(rst) rst_2 = self.bob.sign_contract(rst_2) self.assertTrue(process_rst(self.alice, rst_2) == 1) # Setup callback. def callback(ret): global callbacks_work callbacks_work = 1 # Check defer callbacks. d = defer.Deferred() self.alice.defers[contract_id] = d d.addErrback(callback) self.assertTrue(process_rst(self.alice, rst_2) == 1) self.assertTrue(callbacks_work == 1) print("") print("Ending process rst") print("") def test_valid_syn(self): print("") print("Testing is_valid_syn") print("") self.clean_slate_all() # Non existing fields. syn = {} self.assertTrue(is_valid_syn(self.alice, syn) == -1) # Invalid number of fields. syn = copy.deepcopy(self.syn) syn["test"] = "test" self.assertTrue(is_valid_syn( self.alice, self.alice.sign_contract(syn)) == -2 ) del syn["test"] del syn["signature"] # The data ID is wrong. syn["data_id"] = "x" self.assertTrue(is_valid_syn( self.alice, self.alice.sign_contract(syn)) == -3 ) syn["data_id"] = hashlib.sha256(b"0").hexdigest() del syn["signature"] # Syn is too big. """ syn[u"file_size"] = int("9" * (5242880 + 10)) self.assertTrue(is_valid_syn( self.alice, self.alice.sign_contract(syn) ) == -4) syn[u"file_size"] = 1 """ # Invalid UNLs. syn["host_unl"] = "0" self.assertTrue(is_valid_syn( self.alice, self.alice.sign_contract(syn)) == -6 ) syn["host_unl"] = self.alice.net.unl.value del syn["signature"] # Invalid file size. syn["file_size"] = str("0") self.assertTrue(is_valid_syn( self.alice, self.alice.sign_contract(syn)) == -7 ) syn["file_size"] = 20 del syn["signature"] # We're the host and we don't have this file. self.assertTrue(is_valid_syn( self.alice, self.alice.sign_contract(syn)) == -8 ) del syn["signature"] # We're not the host. We're downloading this. # and we already have the file. syn[u"host_unl"] = self.bob.net.unl.value path = os.path.join(self.alice_storage, syn[u"data_id"]) if not os.path.exists(path): with open(path, "w") as fp: fp.write("0") self.assertTrue(is_valid_syn( self.alice, self.alice.sign_contract(syn)) == -9 ) del syn["signature"] # We're not the host and we're already downloading this os.remove(path) self.alice.downloading[syn[u"data_id"]] = path self.assertTrue(is_valid_syn( self.alice, self.alice.sign_contract(syn) ) == -10) del self.alice.downloading[syn[u"data_id"]] del syn["signature"] # This should pass. self.assertTrue(is_valid_syn( self.alice, self.alice.sign_contract(syn) ) == 1) print("") print("Ending is_valid_syn") print("")
def test_multiple_transfers(self): def make_random_file(file_size=1024 * 100, directory=self.test_storage_dir): content = os.urandom(file_size) file_name = hashlib.sha256(content[0:64]).hexdigest() path = storjnode.util.full_path(os.path.join(directory, file_name)) with open(path, "wb") as fp: fp.write(content) return {"path": path, "content": content} # Sample node. wallet = btctxstore.BtcTxStore(testnet=True, dryrun=True) wif = wallet.get_key(wallet.create_wallet()) store_config = { os.path.join(self.test_storage_dir, "storage"): { "limit": 0 } } client = FileTransfer(pyp2p.net.Net(node_type="simultaneous", nat_type="preserving", net_type="direct", passive_port=60400, dht_node=pyp2p.dht_msg.DHT(), debug=1), wif=wif, store_config=store_config) _log.debug("Net started") # Make random file rand_file_infos = [make_random_file()] # Move file to storage directory. file_infos = [client.move_file_to_storage(rand_file_infos[0]["path"])] # Delete original file. os.remove(rand_file_infos[0]["path"]) _log.debug("Testing upload") # Upload file from storage. for file_info in file_infos: client.data_request("upload", file_info["data_id"], 0, TEST_NODE["unl"]) # Process file transfers. duration = 15 timeout = time.time() + duration while time.time() <= timeout or client.is_queued(): process_transfers(client) time.sleep(0.002) # Check upload exists. for i in range(0, 1): url = TEST_NODE["web"] + file_infos[i]["data_id"] r = requests.get(url, timeout=3) if r.status_code != 200: _log.debug(r.status_code) assert (0) else: assert (r.content == rand_file_infos[i]["content"]) _log.debug("File upload succeeded.") # Delete storage file copy. client.remove_file_from_storage(file_infos[0]["data_id"]) # Download file from storage. _log.debug("Testing download.") for file_info in file_infos: client.data_request("download", file_info["data_id"], 0, TEST_NODE["unl"]) # Process file transfers. duration = 15 timeout = time.time() + duration while time.time() <= timeout or client.is_queued(): process_transfers(client) time.sleep(0.002) # Check we received this file. for i in range(0, 1): path = storage.manager.find(store_config, file_infos[i]["data_id"]) if not os.path.isfile(path): assert (0) else: with open(path, "r") as fp: content = fp.read() assert (content == rand_file_infos[i]["content"]) # Delete storage file copy. client.remove_file_from_storage(file_infos[0]["data_id"]) # Stop networking. client.net.stop() _log.debug("Download succeeded.")