def test_queued(): from crochet import setup setup() # Alice sample node. alice_wallet = BtcTxStore(testnet=False, dryrun=True) alice_wif = alice_wallet.create_key() alice_node_id = address_to_node_id(alice_wallet.get_address(alice_wif)) alice_dht = pyp2p.dht_msg.DHT( node_id=alice_node_id, networking=0 ) alice = FileTransfer( pyp2p.net.Net( net_type="direct", node_type="passive", nat_type="preserving", passive_port=63400, dht_node=alice_dht, wan_ip="8.8.8.8", debug=1 ), BandwidthLimit(), wif=alice_wif, store_config={tempfile.mkdtemp(): None}, ) # Bob sample node. bob_wallet = BtcTxStore(testnet=False, dryrun=True) bob_wif = bob_wallet.create_key() bob_node_id = address_to_node_id(bob_wallet.get_address(bob_wif)) bob_dht = pyp2p.dht_msg.DHT( node_id=bob_node_id, networking=0 ) bob = FileTransfer( pyp2p.net.Net( net_type="direct", node_type="passive", nat_type="preserving", passive_port=63401, dht_node=bob_dht, wan_ip="8.8.8.8", debug=1 ), BandwidthLimit(), wif=bob_wif, store_config={tempfile.mkdtemp(): None} ) # Simulate Alice + Bob "connecting" alice_dht.add_relay_link(bob_dht) bob_dht.add_relay_link(alice_dht) # Accept all transfers. def accept_handler(contract_id, src_unl, data_id, file_size): return 1 # Add accept handler. alice.handlers["accept"].add(accept_handler) bob.handlers["accept"].add(accept_handler) # Create file we're suppose to be uploading. data_id = ("5feceb66ffc86f38d952786c6d696c" "79c2dbc239dd4e91b46729d73a27fb57e9") path = os.path.join(list(alice.store_config)[0], data_id) if not os.path.exists(path): with open(path, "w") as fp: fp.write("0") # Alice wants to upload data to Bob. upload_contract_id = alice.data_request( "download", data_id, 0, bob.net.unl.value ) # Delete source file. def callback_builder(path, alice, bob, data_id): def callback(client, contract_id, con): print("Upload succeeded") print("Removing content and downloading back") os.remove(path) # Fix transfers. bob.handlers["complete"] = [] # Synchronize cons and check con.unl. time.sleep(1) clients = {"alice": alice, "bob": bob} for client in list({"alice": alice, "bob": bob}): print() print(client) clients[client].net.synchronize() nodes_out = clients[client].net.outbound nodes_in = clients[client].net.inbound for node in nodes_out + nodes_in: print(node["con"].unl) print(clients[client].cons) # Queued transfer: download_contract_id = alice.data_request( "upload", data_id, 0, bob.net.unl.value ) print("Download contract ID =") print(download_contract_id) # Indicate Bob's download succeeded. def alice_callback(val): print("Download succeeded") global queue_succeeded queue_succeeded = 1 def alice_errback(val): print("Download failed! Error:") print(val) # Hook upload from bob. d = alice.defers[download_contract_id] d.addCallback(alice_callback) d.addErrback(alice_errback) return callback # Register callback for bob (when he's downloaded the data.) bob.handlers["complete"] = [ callback_builder(path, alice, bob, data_id) ] # d = alice.defers[upload_contract_id] # d.addCallback(callback_builder(path, alice, bob, data_id)) # Main event loop. timeout = time.time() + 40 while not queue_succeeded and time.time() < timeout: for client in [alice, bob]: if client == alice: _log.debug("Alice") else: _log.debug("Bob") process_transfers(client) time.sleep(1) if not queue_succeeded: print("\a") for client in [alice, bob]: client.net.stop() assert(queue_succeeded == 1)
0, bob.net.unl.value ) def do_beep(ret): if ret != None: print("\a") d.addCallback(do_beep) """ alice.data_request( "upload", "f2ca1bb6c7e907d06dafe4687e579fce76b37e4e93b7605022da52e6ccc26fd2", 5, bob.net.unl.value ) """ # Main event loop. while 1: for client in [alice, bob]: if client == alice: _log.debug("Alice") else: _log.debug("Bob") process_transfers(client) time.sleep(0.002)
def process_transfer_thread(): while sending_data: process_transfers(client) time.sleep(0.002)
def test_multiple_transfers(self): def make_random_file(file_size=1024 * 100, directory=self.test_storage_dir): content = os.urandom(file_size) file_name = hashlib.sha256(content[0:64]).hexdigest() path = storjnode.util.full_path(os.path.join(directory, file_name)) with open(path, "wb") as fp: fp.write(content) return { "path": path, "content": content } # print("Giving nodes some time to find peers.") time.sleep(storjnode.network.WALK_TIMEOUT) self.dht_node.refresh_neighbours() time.sleep(storjnode.network.WALK_TIMEOUT) _log.debug("Net started") # Make random file rand_file_infos = [make_random_file()] # Move file to storage directory. file_infos = [ self.client.move_file_to_storage(rand_file_infos[0]["path"]) ] # Delete original file. os.remove(rand_file_infos[0]["path"]) _log.debug("Testing upload") # Upload file from storage. for file_info in file_infos: self.client.data_request( "download", file_info["data_id"], 0, TEST_NODE["unl"] ) # Process file transfers. duration = 15 timeout = time.time() + duration while time.time() <= timeout or self.client.is_queued(): process_transfers(self.client) time.sleep(0.002) # Check upload exists. for i in range(0, 1): url = TEST_NODE["web"] + file_infos[i]["data_id"] r = requests.get(url, timeout=3) if r.status_code != 200: _log.debug(r.status_code) assert(0) else: assert(r.content == rand_file_infos[i]["content"]) _log.debug("File upload succeeded.") # Delete storage file copy. self.client.remove_file_from_storage(file_infos[0]["data_id"]) # Download file from storage. _log.debug("Testing download.") for file_info in file_infos: self.client.data_request( "upload", file_info["data_id"], 0, TEST_NODE["unl"] ) # Process file transfers. duration = 15 timeout = time.time() + duration while time.time() <= timeout or self.client.is_queued(): process_transfers(self.client) time.sleep(0.002) # Check we received this file. for i in range(0, 1): path = storjnode.storage.manager.find(self.store_config, file_infos[i]["data_id"]) if not os.path.isfile(path): assert(0) else: with open(path, "r") as fp: content = fp.read() assert(content == rand_file_infos[i]["content"]) # Delete storage file copy. self.client.remove_file_from_storage(file_infos[0]["data_id"]) _log.debug("Download succeeded.") # Test cleanup transfers. for con in list(self.client.con_info): con.close() for contract_id in list(self.client.con_info[con]): self.client.cleanup_transfers(con, contract_id)
def test_bandwidth_test(self): # Alice sample node. alice_wallet = BtcTxStore(testnet=False, dryrun=True) alice_wif = alice_wallet.create_key() alice_node_id = address_to_node_id(alice_wallet.get_address(alice_wif)) alice_dht = pyp2p.dht_msg.DHT( node_id=alice_node_id, networking=0 ) alice_transfer = FileTransfer( pyp2p.net.Net( net_type="direct", node_type="passive", nat_type="preserving", passive_port=63600, debug=1, wan_ip="8.8.8.8", dht_node=alice_dht, ), wif=alice_wif, store_config={tempfile.mkdtemp(): None} ) _log.debug("Alice UNL") _log.debug(alice_transfer.net.unl.value) # Bob sample node. bob_wallet = BtcTxStore(testnet=False, dryrun=True) bob_wif = bob_wallet.create_key() bob_node_id = address_to_node_id(bob_wallet.get_address(bob_wif)) bob_dht = pyp2p.dht_msg.DHT( node_id=bob_node_id, networking=0 ) bob_transfer = FileTransfer( pyp2p.net.Net( net_type="direct", node_type="passive", nat_type="preserving", passive_port=63601, debug=1, wan_ip="8.8.8.8", dht_node=bob_dht ), wif=bob_wif, store_config={tempfile.mkdtemp(): None} ) # Link DHT nodes. alice_dht.add_relay_link(bob_dht) bob_dht.add_relay_link(alice_dht) _log.debug("Bob UNL") _log.debug(bob_transfer.net.unl.value) # Show bandwidth. def show_bandwidth(results): global test_success test_success = 1 _log.debug(results) # Test bandwidth between Alice and Bob. bob_test = BandwidthTest(bob_wif, bob_transfer, bob_dht, 0) alice_test = BandwidthTest(alice_wif, alice_transfer, alice_dht, 0) d = alice_test.start(bob_transfer.net.unl.value) d.addCallback(show_bandwidth) # Main event loop. # and not test_success end_time = time.time() + 60 while alice_test.active_test is not None and time.time() < end_time: for client in [alice_transfer, bob_transfer]: process_transfers(client) time.sleep(0.002) # End net. for client in [alice_transfer, bob_transfer]: client.net.stop() self.assertTrue(test_success == 1)
def test_multiple_transfers(self): def make_random_file(file_size=1024 * 100, directory=self.test_storage_dir): content = os.urandom(file_size) file_name = hashlib.sha256(content[0:64]).hexdigest() path = storjnode.util.full_path(os.path.join(directory, file_name)) with open(path, "wb") as fp: fp.write(content) return {"path": path, "content": content} # Sample node. wallet = btctxstore.BtcTxStore(testnet=False, dryrun=True) wif = wallet.get_key(wallet.create_wallet()) node_id = address_to_node_id(wallet.get_address(wif)) store_config = { os.path.join(self.test_storage_dir, "storage"): { "limit": 0 } } #dht_node = pyp2p.dht_msg.DHT(node_id=node_id) dht_node = storjnode.network.Node( wif, bootstrap_nodes=DEFAULT_BOOTSTRAP_NODES, disable_data_transfer=True) client = FileTransfer(pyp2p.net.Net(node_type="simultaneous", nat_type="preserving", net_type="direct", passive_port=60400, dht_node=dht_node, debug=1), wif=wif, store_config=store_config) #print("Giving nodes some time to find peers.") time.sleep(storjnode.network.WALK_TIMEOUT) dht_node.refresh_neighbours() time.sleep(storjnode.network.WALK_TIMEOUT) _log.debug("Net started") # Make random file rand_file_infos = [make_random_file()] # Move file to storage directory. file_infos = [client.move_file_to_storage(rand_file_infos[0]["path"])] # Delete original file. os.remove(rand_file_infos[0]["path"]) _log.debug("Testing upload") # Upload file from storage. for file_info in file_infos: client.data_request("upload", file_info["data_id"], 0, TEST_NODE["unl"]) # Process file transfers. duration = 15 timeout = time.time() + duration while time.time() <= timeout or client.is_queued(): process_transfers(client) time.sleep(0.002) # Check upload exists. for i in range(0, 1): url = TEST_NODE["web"] + file_infos[i]["data_id"] r = requests.get(url, timeout=3) if r.status_code != 200: _log.debug(r.status_code) assert (0) else: assert (r.content == rand_file_infos[i]["content"]) _log.debug("File upload succeeded.") # Delete storage file copy. client.remove_file_from_storage(file_infos[0]["data_id"]) # Download file from storage. _log.debug("Testing download.") for file_info in file_infos: client.data_request("download", file_info["data_id"], 0, TEST_NODE["unl"]) # Process file transfers. duration = 15 timeout = time.time() + duration while time.time() <= timeout or client.is_queued(): process_transfers(client) time.sleep(0.002) # Check we received this file. for i in range(0, 1): path = storage.manager.find(store_config, file_infos[i]["data_id"]) if not os.path.isfile(path): assert (0) else: with open(path, "r") as fp: content = fp.read() assert (content == rand_file_infos[i]["content"]) # Delete storage file copy. client.remove_file_from_storage(file_infos[0]["data_id"]) # Stop networking. dht_node.stop() _log.debug("Download succeeded.")