def before_read(self, entry): self._current_phy_UUID = disk_util.get_physical_disk_UUID( self._disks, self._disk_num, self._current_block) try: # First check availablity available_disks = entry.application_context["available_disks"] online, offline = util.sort_disks(available_disks) if self._current_phy_UUID not in online.keys(): raise util.DiskRefused(self._current_phy_UUID) self._block_mode = ReadFromDiskService.REGULAR self._disk_manager = disk_manager.DiskManager( self._disks, self._pollables, entry, service_util.create_get_block_contexts( self._disks, { self._current_phy_UUID: { "block_num": self._current_block, "password": self._volume["long_password"] } }), ) except util.DiskRefused as e: # probably got an error when trying to reach a certain BDS # ServiceSocket. We shall try to get the data from the rest of # the disks. Otherwise, two disks are down and theres nothing # we can do logging.debug( "%s:\t Couldn't connect to one of the BDSServers, %s: %s" % (entry, self._current_phy_UUID, e)) try: self._block_mode = ReadFromDiskService.RECONSTRUCT # create request info for all the other disks request_info = {} for disk_UUID in self._disks.keys(): if disk_UUID != self._current_phy_UUID: request_info[disk_UUID] = { "block_num": self._current_block, "password": self._volume["long_password"] } self._disk_manager = disk_manager.DiskManager( self._disks, self._pollables, entry, service_util.create_get_block_contexts( self._disks, request_info), ) except socket.error as e: # Got another bad connection (Connection refused most likely) raise RuntimeError(("%s:\t Couldn't connect to two of the" + "BDSServers, giving up: %s") % (entry, e)) entry.state = constants.SLEEPING_STATE return False # always need input, not an epsilon path
def handle_block(self): self._current_phy_UUID = disk_util.get_physical_disk_UUID( self._disks, self._disk_num, self._current_block) self._current_phy_parity_UUID = disk_util.get_parity_disk_UUID( self._disks, self._current_block) # first try writing the block regularly try: # First check availablity online, offline = util.sort_disks( self._entry.application_context["available_disks"]) if self._current_phy_UUID not in online.keys(): raise util.DiskRefused(self._current_phy_UUID) # step 1 - get current_block and parity block contents # step 2 - calculate new blocks to write if self._block_mode == WriteToDiskService.REGULAR: if self._block_state == WriteToDiskService.READ_STATE: contexts = self.contexts_for_regular_get_block() else: contexts = self.contexts_for_regular_set_block() self._disk_manager = disk_manager.DiskManager( self._disks, self._pollables, self._entry, contexts) except util.DiskRefused as disk_error: logging.error( ("%s:\t Got: %s, trying to connect with RECONSTRUCT") % (self._entry, disk_error)) self._faulty_disk_UUID = disk_error.disk_UUID self._block_mode = WriteToDiskService.RECONSTRUCT # start reading from the beginning again: self._block_state = WriteToDiskService.READ_STATE # if didn't work, try with reconstruct try: # step 1 - get all other non-parity blocks # step 2 - XOR and write in parity if self._block_mode == WriteToDiskService.RECONSTRUCT: if self._block_state == WriteToDiskService.READ_STATE: contexts = self.contexts_for_reconstruct_get_block() else: contexts = self.contexts_for_reconstruct_set_block() self._disk_manager = disk_manager.DiskManager( self._disks, self._pollables, self._entry, contexts) except util.DiskRefused as disk_error: # nothing to do logging.error( ("%s:\t Couldn't connect to two of the" + "BDSServers, giving up: %s") % (self._entry, disk_error))
def before_scratch_mount(self, entry): peers = self._volume["disks"].keys() # for order create var request_info = {} for disk_UUID in self._volume["disks"].keys(): boundary = post_util.generate_boundary() request_info[disk_UUID] = { "boundary": boundary, "content": self.create_disk_info_content( boundary, disk_UUID, peers, ) } # update final disk stats for disk_UUID, disk in self._volume["disks"].items(): self._volume["disks"][disk_UUID]["level"] = 0 self._volume["disks"][disk_UUID]["peers"] = peers self._volume["disks"][disk_UUID]["state"] = constants.ONLINE # create a disk manager self._disk_manager = disk_manager.DiskManager( self._volume["disks"], self._pollables, entry, service_util.create_set_disk_info_contexts(self._volume["disks"], request_info)) entry.state = constants.SLEEPING_STATE return False # need input, not an epsilon_path
def before_existing_mount(self, entry): self._disk_manager = disk_manager.DiskManager( self._volume["disks"], self._pollables, entry, service_util.create_get_disk_info_contexts( self._volume["disks"], self._volume["disks"].keys(), )) entry.state = constants.SLEEPING_STATE return False # will always need input, not an epsilon_path
def before_login(self, entry): if self._mode == InitService.EXISTING_MODE: # no need to login, already updated return True # epsilon_path # need to login to new block device self._disk_manager = disk_manager.DiskManager( self._volume["disks"], self._pollables, entry, service_util.create_login_contexts(self._volume, )) entry.state = constants.SLEEPING_STATE return False # will always need input, not an epsilon_path
def before_update_level(self, entry): self._disk_manager = disk_manager.DiskManager( self._disks, self._pollables, entry, service_util.create_update_level_contexts( self._disks, { self._disk_UUID: { "addition": "1", "password": self._volume["long_password"] } })) entry.state = constants.SLEEPING_STATE return False # need input, not an epsilon path
def before_set_data(self, entry): self._disk_manager = disk_manager.DiskManager( self._disks, self._pollables, entry, service_util.create_set_block_contexts( self._disks, { self._disk_UUID: { "block_num": self._current_block_num, "content": self._current_data, "password": self._volume["long_password"] } })) entry.state = constants.SLEEPING_STATE return False # need input, not an epsilon path
def before_disconnect(self, entry): self._disk_UUID = self._args["disk_UUID"][0] self._volume_UUID = self._args["volume_UUID"][0] # extract the disks from the wanted volume self._volume = entry.application_context["volumes"][self._volume_UUID] self._disks = self._volume["disks"] # check if disk is already disconnected if self._disks[self._disk_UUID]["state"] != constants.ONLINE: return True # check that all other disks are online (RAID5 requirements) for disk_UUID, disk in self._disks.items(): if not disk["state"] == constants.ONLINE: raise RuntimeError( "Can't turn disk %s offline, already have disk %s offline" % (self._disk_UUID, disk_UUID)) # already set to offline so that another attempt to disconnect shall be # denied self._disks[self._disk_UUID]["state"] = constants.OFFLINE self._disks[self._disk_UUID]["cache"] = cache.Cache( mode=cache.Cache.CACHE_MODE) # now need to increment other disks level # check this isn't the disk we are disconnecting self._disk_manager = disk_manager.DiskManager( self._disks, self._pollables, entry, service_util.create_update_level_contexts( self._disks, { disk_UUID: { "addition": "1", "password": self._volume["long_password"] } for disk_UUID in self._disks.keys() if disk_UUID != self._disk_UUID }), ) return False # will always need input, not an epsilon_path
def before_get_data(self, entry): self._current_block_num, self._current_data = ( self._disks[self._disk_UUID]["cache"].next_block()) if self._current_data is not None: # got data stored in cache, no need for hard rebuild # ==> This is an epsilon_path return True else: # need to retreive data from XOR of all the disks besides the current # in order to rebuild it request_info = {} for disk_UUID in self._disks.keys(): if disk_UUID != self._disk_UUID: request_info[disk_UUID] = { "block_num": self._current_block_num, "password": self._volume["long_password"] } self._disk_manager = disk_manager.DiskManager( self._disks, self._pollables, entry, service_util.create_get_block_contexts(self._disks, request_info)) entry.state = constants.SLEEPING_STATE return False # need input, not an epsilon path