def read_file(path, file_name): name_conn = rpyc.connect(name_node_IP, 5000, config={'allow_public_attrs': True}) name_node = name_conn.root block_locations = [] name_reply = name_node.read_file(path, block_locations) #block_locations = name_reply print(block_locations) if name_reply is 0: print("File could not be located") return for i in range(len(block_locations)): node_IPs = block_locations[i].split(",") name = block_locations[i][0] node_IPs = block_locations[i][1].split('{') node_IPs = node_IPs.split('}')[0] node_IPs = node_IPs.split(',') send_blocks.append([block_locations[i].replace("/", "!@!"), node_IPs]) fetched_bytes = BytesIO() fetched_bytes.write(name_reply) fetched_bytes.flush() fetched_bytes.seek(0) #temp = block_locations.split(',') receive_blocks = [] i = 0 ''' for i in range(0, len(block_locations), 2): receive_blocks.append([block_locations[i]]) node_IPs = block_locations[i+1].split('{')[1] node_IPs = node_IPs.split('}')[0] node_IPs = node_IPs.split(',') receive_blocks[i][0].append(node_IPs) ''' for block in receive_blocks: data_conn = rpyc.connect(block, 5000, config={'allow_public_attrs': True}) data_node = data_conn.root data_reply = Reply.Load(data_node.get_block(block)) fetched_bytes = BytesIO() fetched_bytes.write(data_reply.result) fetched_bytes.flush() fetched_bytes.seek(0) if not data_reply.error: with open(file_name, 'ab+') as dest: shutil.copyfileobj(fetched_bytes, dest, blocksize)
def delete_block(block_name): conn = rpyc.connect(data_node_IP, 5000, config={'allow_public_attrs': True}) data_node = conn.root.BlockStore() reply = Reply.Load(data_node.delete_block(block_name)) if reply.is_err(): print('Could not delete block ', block_name) print(reply.err) else: return
def make_file(bucket_name, file_name, to_path): s3 = boto3.resource( 's3' ) #, aws_access_key_id=ACCESS_ID, aws_secret_access_key=ACCESS_KEY) s3data = s3.Object(bucket_name=bucket_name, key=file_name).get() file_size = s3data['ContentLength'] print(file_size) name_conn = rpyc.connect(name_node_IP, 5000, config={'allow_public_attrs': True}) name_node = name_conn.root name_reply = name_node.make_file(file_size, to_path) print(file_size) print(name_reply) #block_locations = name_reply.result block_locations = name_reply send_blocks = [] #j = 1 #with open(file_path, 'rb+') as input: # block = [] # bytes = input.read() for i in range(0, len(block_locations), 2): node_IPs = block_locations[i + 1].split('{')[1] node_IPs = node_IPs.split('}')[0] node_IPs = node_IPs.split(',') send_blocks.append([block_locations[i].replace("/", "!@!"), node_IPs]) i = 0 try: for chunk in iter(lambda: s3data['Body'].read(block_size), b''): block_id = send_blocks[i][0] print(send_blocks[i][1][0]) data_conn = rpyc.connect(send_blocks[i][1][0], 5000, config={'allow_public_attrs': True}) data_node = data_conn.root #print("Now inserting: ", block_id) data_reply = Reply.Load( data_node.put_block(block_id, chunk, send_blocks[i][1])) print(data_reply.status) i += 1 except: pass
def send_block(file_name): # conn = rpyc.connect(data_node_IP, 5000, config={'allow_public_attrs': True}) conn = rpyc.connect(node_IPs[0], 5000, config={'allow_public_attrs': True}) print("Connecting with server...") data_node = conn.root.BlockStore() file_blocks = create_blocks(file_name) i = 1 for block in file_blocks: block_id = 'MobyBlock' + str(i) print("Now inserting: ", block_id) reply = Reply.Load(data_node.put_block(block_id, block, node_IPs)) print(reply.status) if reply.is_err(): print('Could not insert block', block_id) print(reply.err) break i += 1
def exposed_put_block(self, file_name, data, replica_node_ids): print('new file name', file_name) if file_name in self.block_id: return Reply.error('File name already exists') else: try: with open(file_name, 'wb') as f: f.write(data) except: self.block_id.remove(id) return Reply.error('Error saving block') self.save_block(file_name) # send out replicas replica_node_ids.pop(0) if len(replica_node_ids) > 0: done = 1 tries = 0 print("Sending replica to ", replica_node_ids[0]) while done == 1: c = rpyc.connect(replica_node_ids[0], 5000) next_node = c.root.BlockStore() reply = Reply.Load(next_node.put_block(file_name, data, replica_node_ids)) print(reply.status) if reply.status == 0: print("replica sent!") done = 0 else: print("node busy trying again") # wait 5 seconds and try again time.sleep(5) tries += 1 if tries > 4: # after 4 tries give up print("could not send block replica") break return Reply.reply()
def get_blocks(block_name, new_file_name): conn = rpyc.connect(data_node_IP, 5000, config={'allow_public_attrs': True}) data_node = conn.root.BlockStore() print("Connecting with server...") reply = Reply.Load(data_node.get_block(block_name)) if reply.is_err(): print('Could not get block ', block_name) print(reply.err) fetched_bytes = BytesIO() fetched_bytes.write(reply.result) fetched_bytes.flush() fetched_bytes.seek(0) print('Fetched: ', len(fetched_bytes.getbuffer())) print("Saving to: ", new_file_name) with open(new_file_name, 'wb') as dest: shutil.copyfileobj(fetched_bytes, dest, block_size) dest.close()