def transfer_file(self, share_ID, file_name, return_address, start_offset=0, end_offset=999999999999, block_skip=1): """Sends a piece of a file to a peer in pieces. Initiated by JSONRPC: get_file() start_offset: start sending file from this offset end_offset: if this bytes is reached, send no more. block_skip: send a block, then skip this many blocks, then send the next etc. """ #continue to send all file blocks full_file_path = os.path.join(self.storage_directory, share_ID, file_name) f = open(full_file_path, 'rb') f.seek(start_offset) block_size = 5 file_offset = start_offset logging.debug('file_name: %s start_offset: %d, end_offset: %d' %(file_name, start_offset, end_offset)) while (file_offset < end_offset): #@TODO: stop sending if given signal from return_address logging.debug('file_offset: %d' %(file_offset)) block_bytes = f.read(64*block_size) if block_bytes == "": logging.debug('no bytes read from file') break p_out_block = Packet() p_out_block.json_RPC_object = dict(jsonrpc="2.0", method="save_file_block", params=[1.0, None, [self.my_machine_ID, share_ID, file_name, file_offset]], ) #id=rpc_id p_out_block.binary_blob = block_bytes p_out_block.to_address = return_address self.send_block_choke(p_out_block, return_address, 3) time.sleep(0.002) file_offset+=block_size logging.debug('finished file transfer') f.close()
def transfer_file(self, share_ID, file_name, return_address, start_offset=0, end_offset=999999999999, block_skip=1): """Sends a piece of a file to a peer in pieces. Initiated by JSONRPC: get_file() start_offset: start sending file from this offset end_offset: if this bytes is reached, send no more. block_skip: send a block, then skip this many blocks, then send the next etc. """ #continue to send all file blocks full_file_path = os.path.join(self.storage_directory, share_ID, file_name) f = open(full_file_path, 'rb') f.seek(start_offset) block_size = 5 file_offset = start_offset logging.debug('file_name: %s start_offset: %d, end_offset: %d' % (file_name, start_offset, end_offset)) while (file_offset < end_offset ): #@TODO: stop sending if given signal from return_address logging.debug('file_offset: %d' % (file_offset)) block_bytes = f.read(64 * block_size) if block_bytes == "": logging.debug('no bytes read from file') break p_out_block = Packet() p_out_block.json_RPC_object = dict( jsonrpc="2.0", method="save_file_block", params=[ 1.0, None, [self.my_machine_ID, share_ID, file_name, file_offset] ], ) #id=rpc_id p_out_block.binary_blob = block_bytes p_out_block.to_address = return_address self.send_block_choke(p_out_block, return_address, 3) time.sleep(0.002) file_offset += block_size logging.debug('finished file transfer') f.close()
def push_update_to_peer(self, params, rpc_id, packet): ''' Loop through all blobs in storage directory and poll peer to see if they have it. If the peer does not have the file, send it to them. If machine_ID is None then send updates to all machines with share. If current_commit is not None, then a sync_new_commit is sent to each peer ''' [ver, congest, args] = params [share_ID, machine_ID, current_commit] = args logging.info('Pushing update for %s to %s and update to commit: %s' % (share_ID, machine_ID, current_commit)) #Find all machines to push this update to to_transfer = dict() if machine_ID == None: logging.debug('dict: %s' % (self.share_machine_dict)) if share_ID not in self.share_machine_dict: logging.error('No machines with this share') return logging.debug(self.share_machine_dict[share_ID]) to_machines = self.share_machine_dict[share_ID] else: to_machines = [machine_ID] #delete my_machine_ID from to_machines if (self.my_machine_ID in to_machines): to_machines.remove(self.my_machine_ID) if len(to_machines) == 0: logging.info( 'No other machines have this share. Updates will not be pushed.' ) return logging.debug('Machines to push to: %s' % (to_machines)) logging.debug('share path: %s' % (os.path.join(self.storage_directory, share_ID))) #send has_file query for all files. for root, dirs, files in os.walk( os.path.join(self.storage_directory, share_ID)): logging.debug('files: %s' % (files)) for name in files: logging.debug('name: %s' % (name)) for machine in to_machines: address = self.machine_address_dict[machine] rpc_id = random.randint( 1, 1000000) #TODO: rpc_id collisions could occur p = Packet() p.json_RPC_object = dict( jsonrpc="2.0", method="has_file", params=[ 1.0, None, [self.my_machine_ID, share_ID, name] ], id=rpc_id) p.to_address = address self.send_block_choke(p, address, 3) to_transfer.update( {rpc_id: [name, machine]}) #TODO: rpc_id collisions could occur logging.info('Sent has_file to %s with following RPC_IDs: %s' % (to_machines, to_transfer.keys())) time.sleep(3) #wait for responses from peer logging.debug('json_response_dict: %s' % (self.json_response_dict)) #send the file if the query comes back negative for item in to_transfer.items(): [rpc_id, [name, machine]] = item if rpc_id not in self.json_response_dict: continue resp_packet = self.json_response_dict.pop(rpc_id) resp_value = resp_packet.json_RPC_object["result"] if not resp_value: #peer doesn't have this file, so send it self.transfer_file(share_ID, name, self.machine_address_dict[machine]) del to_transfer[rpc_id] logging.info('No response for following files: %s' % (to_transfer.items())) if current_commit == None: return time.sleep(3) #wait for responses from peer #Tell machines to update to current commit for machine in to_machines: address = self.machine_address_dict[machine] rpc_id = random.randint( 1, 1000000) #TODO: rpc_id collisions could occur p = Packet() p.json_RPC_object = dict( jsonrpc="2.0", method="sync_new_commit", params=[1.0, None, [current_commit, share_ID]], id=rpc_id) p.to_address = address self.send_block_choke(p, address, 3)
def push_update_to_peer(self, params, rpc_id, packet): ''' Loop through all blobs in storage directory and poll peer to see if they have it. If the peer does not have the file, send it to them. If machine_ID is None then send updates to all machines with share. If current_commit is not None, then a sync_new_commit is sent to each peer ''' [ver, congest, args] = params [share_ID, machine_ID, current_commit] = args logging.info('Pushing update for %s to %s and update to commit: %s' %(share_ID, machine_ID, current_commit)) #Find all machines to push this update to to_transfer = dict() if machine_ID == None: logging.debug('dict: %s' %(self.share_machine_dict)) if share_ID not in self.share_machine_dict: logging.error('No machines with this share') return logging.debug(self.share_machine_dict[share_ID]) to_machines = self.share_machine_dict[share_ID] else: to_machines = [machine_ID] #delete my_machine_ID from to_machines if (self.my_machine_ID in to_machines): to_machines.remove(self.my_machine_ID) if len(to_machines)==0: logging.info('No other machines have this share. Updates will not be pushed.') return logging.debug('Machines to push to: %s'%(to_machines)) logging.debug('share path: %s' %(os.path.join(self.storage_directory, share_ID))) #send has_file query for all files. for root, dirs, files in os.walk(os.path.join(self.storage_directory, share_ID)): logging.debug('files: %s' %(files)) for name in files: logging.debug('name: %s' %(name)) for machine in to_machines: address = self.machine_address_dict[machine] rpc_id=random.randint(1,1000000) #TODO: rpc_id collisions could occur p = Packet() p.json_RPC_object = dict(jsonrpc="2.0", method="has_file", params=[1.0, None, [self.my_machine_ID, share_ID, name]], id=rpc_id) p.to_address = address self.send_block_choke(p, address, 3) to_transfer.update({rpc_id:[name, machine]}) #TODO: rpc_id collisions could occur logging.info('Sent has_file to %s with following RPC_IDs: %s' %(to_machines, to_transfer.keys())) time.sleep(3) #wait for responses from peer logging.debug('json_response_dict: %s'%(self.json_response_dict)) #send the file if the query comes back negative for item in to_transfer.items(): [rpc_id, [name, machine]] = item if rpc_id not in self.json_response_dict: continue resp_packet = self.json_response_dict.pop(rpc_id) resp_value = resp_packet.json_RPC_object["result"] if not resp_value: #peer doesn't have this file, so send it self.transfer_file(share_ID, name, self.machine_address_dict[machine]) del to_transfer[rpc_id] logging.info('No response for following files: %s' %(to_transfer.items())) if current_commit == None: return time.sleep(3) #wait for responses from peer #Tell machines to update to current commit for machine in to_machines: address = self.machine_address_dict[machine] rpc_id=random.randint(1,1000000) #TODO: rpc_id collisions could occur p = Packet() p.json_RPC_object = dict(jsonrpc="2.0", method="sync_new_commit", params=[1.0, None, [current_commit, share_ID]], id=rpc_id) p.to_address = address self.send_block_choke(p, address, 3)