class Reader(Thread): def __init__(self, walker_map, reader_map, conf, logger): Thread.__init__(self) self._walker_map = walker_map self._reader_map = reader_map self.logger = logger self.logger.info("Reader constructed") self._file_location = conf['stat_file_location'] self.msg = GlobalVariables(self.logger) self._complete_all_event = self.msg.get_complete_all_event() self._transfer_cmp_event = self.msg.get_transfer_cmp_event() self.put_queue_flag = False def __reader_check_complete_all_event(self): """ Reader complete all request (Stop) Event check """ if self._complete_all_event.is_set(): self.logger.info("Received complete all request event in reader") self._reader_map = {} self.msg.put_into_Queue() return True def __reader_get_new_ownership(self): """ Reader transfer/accept component request """ if self._transfer_cmp_event.is_set() and not self.put_queue_flag: self.logger.info( "Received transfer/accept request event in reader") for key in self._reader_map.keys(): if int(key) not in self.msg.get_ownershipList(): del self._reader_map[key] self.msg.put_into_Queue() self.put_queue_flag = True elif not self._transfer_cmp_event.is_set(): self.put_queue_flag = False def read_stat_file(self, comp, stat_file): if not os.path.exists( os.path.join(self._file_location, comp, stat_file)): raise StopIteration with open(os.path.join(self._file_location, comp, stat_file), 'rt') as fd: while True: data = fd.read(MAX_CHUNK_SIZE) if not data: break yield data.split(',')[:-1] def check_if_container_limit_exceeded(self): while self._reader_map.getContainerCount() > maxContainerEntries: self.logger.info("Container limit exceeded :%s therefore wait till"\ "update completed" %(self._reader_map.getContainerCount())) time.sleep(10) def run(self): try: self.__perform_reader_operation() except Exception as ex: self.logger.error("error occured : %s" % ex) os._exit(130) except: self.logger.error("Unknown exception occured") os._exit(130) def __perform_reader_operation(self): while True: self.logger.debug("Reader started") if self.__reader_check_complete_all_event(): break try: self.__reader_get_new_ownership() for entry in self._walker_map.getEntries(): comp, stat_file = entry.split("/") self._stat_obj = StatFile(stat_file) self.check_if_container_limit_exceeded() self.logger.debug("Processing start for component:%s stat" " files: %s" % (comp, stat_file)) for file_content in self.read_stat_file(comp, stat_file): self._stat_obj.set_lock_flag(True) for data in file_content: account_name, container_name = data.split("/") if comp not in self._reader_map.keys(): self._reader_map[comp] = {} if self._stat_obj not in self._reader_map[ comp].keys(): self._reader_map[comp][self._stat_obj] = {} if account_name not in self._reader_map[comp][ self._stat_obj].keys(): self._reader_map[comp][ self._stat_obj][account_name] = [] self._reader_map[comp][self._stat_obj][ account_name].append(container_name) self._reader_map.incrementContainer() self._stat_obj.set_lock_flag(False) self.logger.info("Stat File:%s processing done for component:%s count:%s" \ %(stat_file, comp, self._reader_map.getContainerCount())) self.logger.debug( "After completing all stat file processing, reader_map = \ %s" % (self._reader_map)) except Exception as ex: self.logger.error("Exception occured :%s" % ex) self.logger.debug("Exiting Reader") time.sleep(35)
class TestGlobalVariables(unittest.TestCase): @mock.patch("osd.accountUpdaterService.monitor.Req", mockReq) @mock.patch("osd.accountUpdaterService.monitor.Resp", mockResp) @mock.patch("osd.accountUpdaterService.monitor.ServiceInfo", mockServiceInfo) def setUp(self): self.global_var = GlobalVariables(logger) self.global_var.set_service_id("service_id") def test_load_gl_map(self): #self.assertTrue(self.global_var.load_gl_map()) with mock.patch( 'osd.accountUpdaterService.unitTests.mockReq.connector', return_value=None): self.assertFalse(self.global_var.load_gl_map()) with mock.patch( 'osd.accountUpdaterService.unitTests.mockStatus.get_status_code', return_value="Resp.FAILURE"): self.assertFalse(self.global_var.load_gl_map()) def test_load_ownership(self): with mock.patch( 'osd.accountUpdaterService.unitTests.mockReq.connector', return_value=None): self.global_var.load_ownership() self.assertEquals(self.global_var.get_ownershipList(), []) with mock.patch( 'osd.accountUpdaterService.unitTests.mockStatus.get_status_code', return_value="Resp.FAILURE"): self.global_var.load_ownership() self.assertEquals(self.global_var.get_ownershipList(), []) self.global_var.load_ownership() self.assertEquals(self.global_var.get_ownershipList(), range(1, 513)) def test_get_account_map(self): self.assertEquals( [obj.get_id() for obj in self.global_var.get_account_map()], acc_id) self.assertEquals( [obj.get_ip() for obj in self.global_var.get_account_map()], acc_ip) self.assertEquals( [obj.get_port() for obj in self.global_var.get_account_map()], acc_port) def test_get_container_map(self): self.assertEquals( [obj.get_id() for obj in self.global_var.get_container_map()], cont_id) self.assertEquals( [obj.get_ip() for obj in self.global_var.get_container_map()], cont_ip) self.assertEquals( [obj.get_port() for obj in self.global_var.get_container_map()], cont_port) def test_get_acc_updater_map(self): self.assertEquals( [obj.get_id() for obj in self.global_var.get_acc_updater_map()], au_id) self.assertEquals( [obj.get_ip() for obj in self.global_var.get_acc_updater_map()], au_ip) self.assertEquals( [obj.get_port() for obj in self.global_var.get_acc_updater_map()], au_port) def test_get_acc_updater_map_version(self): self.assertEquals(self.global_var.get_acc_updater_map_version(), "5.0") def test_get_global_map_version(self): self.assertEquals(self.global_var.get_global_map_version(), "5.0") def test_get_service_id(self): self.assertEquals(self.global_var.get_service_id(), "service_id")
class Updater(Thread): def __init__(self, walker_map, reader_map, conf, logger): Thread.__init__(self) self.conf = conf self.logger = logger self.logger.info("Updater constructed") self.msg = GlobalVariables(self.logger) self.__complete_all_request = False self._walker_map = walker_map self._reader_map = reader_map self._complete_all_event = self.msg.get_complete_all_event() self._transfer_cmp_event = self.msg.get_transfer_cmp_event() self.__container_dispatcher = ContainerDispatcher(self.conf, self.logger) self._updater_map = {} self.__other_file_list = [] self.put_queue_flag = False def populate_updater_map(self): self.__other_file_list = [] for component, stat_data in self._reader_map.items(): for stat_obj, data in stat_data.items(): if not stat_obj.get_lock_flag(): stat_file = stat_obj.get_stat_file() if not self._updater_map.has_key((component, stat_file)): self._updater_map[(component, stat_file)] = {} if self._reader_map.get(component, None): self._updater_map[(component, stat_file)] = self._reader_map[component][stat_obj] self.__other_file_list.append((component, stat_file)) self.logger.info("Updater map : %s" %self._updater_map) def __updater_check_complete_all_event(self): """ Updater complete all request (Stop) Event check """ if self._complete_all_event.is_set(): self.logger.info("Received complete all request event in updater") self._updater_map = {} self.msg.put_into_Queue() return True def __updater_get_new_ownership(self): """ Updater transfer/accept component request """ if self._transfer_cmp_event.is_set() and not self.put_queue_flag: self.logger.info("Received transfer/accept request event in updater") for comp_tuple in self._updater_map.keys(): if int(comp_tuple[0]) not in self.msg.get_ownershipList(): del self._updater_map[comp_tuple] self.msg.put_into_Queue() self.put_queue_flag = True elif not self._transfer_cmp_event.is_set(): self.put_queue_flag = False def __other_file(self, comp, file): listOfAccounts = [] index = self.__other_file_list.index((comp, file)) rest_comp_tuple_list = self.__other_file_list[:index] + self.__other_file_list[index+1:] for comp_tuple in rest_comp_tuple_list: listOfAccounts.extend(self._updater_map[comp_tuple].keys()) #self.logger.debug("Current component :%s and other stat files having accounts %s" %(comp, listOfAccounts)) return listOfAccounts def run(self): try: while True: self.logger.debug("Updater started") self.populate_updater_map() self.__updater_get_new_ownership() if self.__updater_check_complete_all_event(): self.__complete_all_request = True break waitList = [] delete_list=[] accObjectDict = {} container_list = [] #Container list contains AccountInfo objects where HEAD has been done on Accounts for comp_tuple, map_ in self._updater_map.items(): #NOTE: compTuple = (componentNum, Filename) #NOTE: map_ = {account name : [container name]} self.logger.info("Processing start for stat file : %s, map : %s" %(comp_tuple, map_)) #self.logger.debug("Modified account list: %s" % comp_tuple) if self.__updater_check_complete_all_event(): break for account, cont_list in map_.items(): #NOTE: loop to perform container head for a account and lastly perform account update self.logger.info("Processing account: %s, container_list:%s" %(account, cont_list)) if account not in accObjectDict.keys(): #NOTE: in case account object is already created, no need to re-create the object account_instance = AccountInfo(account, self.conf, self.logger) accObjectDict[account] = account_instance else: account_instance = accObjectDict[account] for container in cont_list: #NOTE: for all container in the account perform container head request if not account_instance.is_container(container): #NOTE: in case container is already existing that means container head has already been performed. #contObj = ContainerClass(container) account_instance.add_container(container) try: #NOTE: perform container HEAD self.logger.debug("Processing for container HEAD request") account_instance.get_container_path() self.__container_dispatcher.dispatcher_executer(account_instance) #If any container HEAD fails then do not perform account update operation and iterate next stat file if "fail" in account_instance.account_map.values(): self.logger.info("Container HEAD is failing on %s, \ skipping stat file %s" %(account, comp_tuple)) break #NOTE: if the account is present in the other stat files than we should not perform account updater # in this iteration. We should add it to waitList and will update account at the end. if account_instance.getAccountName() in self.__other_file(\ comp_tuple[0], comp_tuple[-1]): if account_instance.getAccountName() not in waitList: self.logger.info("Adding account :%s in the waitlist"\ %account_instance.getAccountName()) waitList.append(account_instance.getAccountName()) continue #Performing account update operation if this account is not present in other stat files if self.__container_dispatcher.account_update(account_instance): account_instance.remove_container() else: self.logger.info("Account %s can not be modified" % account) break except Exception as err: self.logger.error("Exception occured :%s", err) # do not create sweeper list of stat_file if any account update(wait list) of that stat_file remains for account in map_.keys(): if account in waitList or ('fail' in accObjectDict[account].account_map.values()) or account_instance.acc_update_failed: break else: self.msg.create_sweeper_list(comp_tuple) del self._updater_map[comp_tuple] self.__other_file_list.remove(comp_tuple) # Sending account update request for accounts exist in waitList at the end for comp_tuple, map_ in self._updater_map.items(): for account in map_.keys(): if account in waitList: if self.__container_dispatcher.account_update(accObjectDict[account]): accObjectDict[account].remove_container() waitList.remove(account) else: self.logger.info("Account %s can not be modified" % account) break elif not accObjectDict.get(account): continue elif ('fail' in accObjectDict[account].account_map.values()): self.logger.info("Container HEAD is failing on %s, \ skipping stat file %s" %(account, comp_tuple)) break elif not accObjectDict[account].acc_update_failed: self.logger.info("Account : %s already updated" %account) elif accObjectDict[account].acc_update_failed: self.logger.info("Account update for %s failed, \ skipping stat file %s" %(account, comp_tuple)) break else: self.msg.create_sweeper_list(comp_tuple) del self._updater_map[comp_tuple] self.__other_file_list.remove(comp_tuple) #self._updater_map = {} # Will be clear after one time processing self.logger.debug("Exiting Updater") time.sleep(40) except Exception as ex: self.logger.error("exception occured : %s" %ex) os._exit(130) except: self.logger.error("unkown exception occured") os._exit(130)
class AccountSweep(Thread): """ Sweep to deleted account """ def __init__(self, conf, logger): Thread.__init__(self) self._file_location = os.path.join(conf['filesystems'], FS, 'tmp', \ 'account') self.del_path = 'del' self.conf = conf self.logger = logger self.logger.info("AccountSweep constructed") self.root = conf['filesystems'] self.msg = GlobalVariables(self.logger) self._ownership_list = self.msg.get_ownershipList() self.put_queue_flag = False self._complete_all_event = self.msg.get_complete_all_event() self._transfer_cmp_event = self.msg.get_transfer_cmp_event() self._account_ring = AccountRing(conf['osd_dir'], self.logger) def _get_del_files(self): """Get the list of deleted temp account file""" del_files = {} try: for comp in self._ownership_list: if os.path.exists(os.path.join(self._file_location, \ str(comp), self.del_path)): #self.logger.debug("listing deleted tmp account files on " \ # "location: %s/%s/%s" \ # % (self._file_location, str(comp), self.del_path)) del_files[comp] = os.listdir(os.path.join(\ self._file_location, str(comp), self.del_path)) return del_files except Exception as ex: self.logger.error("While listing directory %s : %s" % \ (self._file_location, ex)) return del_files def _get_account_name(self, account_file): """Split name of account from file""" return account_file.split('.info')[0] def _delete_dir(self, path): """Delete dir""" if os.path.exists(path): running_procs = [Popen(['rm', '-rf', path], stdout=PIPE, \ stderr=PIPE)] while running_procs: for proc in running_procs: retcode = proc.poll() if retcode is not None: running_procs.remove(proc) break else: time.sleep(.2) continue if retcode != 0: for line in proc.stderr.readlines(): self.logger.error("Error in proc.stderr.readlines()") return False else: self.logger.debug("Path %s does not exist" % path) return True def _clean_account_data(self, deleted_account, fs_list): status = True acc_dir = self._account_ring.get_directory_by_hash(\ self._get_account_name(deleted_account)) for fs in fs_list[::-1]: account_path = os.path.join(self.root, fs, acc_dir, \ self._get_account_name(deleted_account)) try: if not self._delete_dir(account_path): status = False except Exception, ex: self.logger.error("while deleting dir: %s : %s" \ % (account_path, ex)) status = False return status
class HttpListener(BaseHTTPRequestHandler): def do_STOP_SERVICE(self): """ Handle COMPLETE ALL REQUEST HTTP request """ self.logger = SimpleLogger(conf=None).get_logger_object() self.conf = SimpleLogger(conf=None).get_conf() self.msg = GlobalVariables(self.logger) all_transfer_event_received = [] self.logger.info("Account-updater received STOP_SERVICE request") complete_all_request_event = self.msg.get_complete_all_event() complete_all_request_event.set() try: while len(all_transfer_event_received) != 4: all_transfer_event_received.append(self.msg.get_from_Queue()) self.logger.info("Completed STOP_SERVICE request") self.send_header('Message-Type', typeEnums.BLOCK_NEW_REQUESTS_ACK) self.send_header('Ownership-List', self.msg.get_ownershipList()) self.send_response(HTTP_OK) self.end_headers() self.wfile.write(self.msg.get_ownershipList()) return except Exception as err: self.logger.exception('Exception raised in' \ ' STOP_SERVICE error :%s' % err) self.send_response(HTTP_INTERNAL_SERVER_ERROR) self.end_headers() def do_ACCEPT_COMPONENT_TRANSFER(self): """ Handle ACCEPT COMPONENT TRANSFER HTTP request """ try: self.logger = SimpleLogger(conf=None).get_logger_object() self.conf = SimpleLogger(conf=None).get_conf() self.msg = GlobalVariables(self.logger) self.ll_port = int(self.conf.get('llport', 61014)) self.logger.info("Account-updater received ACCEPT_COMPONENT_" \ "TRANSFER request") length = int(self.headers['Content-Length']) self.logger.debug("Headers:%s" % self.headers) #sending intemediate (connection created) acknowledgement #to source node self.send_response(HTTP_CONTINUE) self.end_headers() #receiving new ownership list pickled_string = self.rfile.read(length) add_comp_list = ast.literal_eval(pickled_string) self.logger.info("Accepting new component ownership: %s" % add_comp_list) #updating global map for new onwership thread = threading.Thread(target = self.update_my_ownership, \ args=(add_comp_list,)) thread.start() self.logger.info("Completed ACCEPT_COMPONENTS_TRANSFER request") self.send_response(HTTP_OK) self.end_headers() return except Exception as err: self.logger.exception('Exception raised in' \ 'ACCEPT_COMPONENTS_TRANSFER error :%s' % err) self.send_response(HTTP_INTERNAL_SERVER_ERROR) self.end_headers() def do_TRANSFER_COMPONENTS(self): """ Handle TRANSFER COMPONENTS HTTP request """ self.logger = SimpleLogger(conf=None).get_logger_object() self.conf = SimpleLogger(conf=None).get_conf() self.msg = GlobalVariables(self.logger) self._request_handler = Req() transfer_component_timeout = int( self.conf.get('\ transfer_component_timeout', 600)) self.ll_port = int(self.conf.get('llport', 61014)) self.service_id = self.msg.get_service_id() self.deleted_comp_list = [] transfer_component_map = { } #dictionary containing{'(dest_node_obj)':'[comp_list]'} #eg: {('169.254.1.12', '61009', 'HN0101_61014_account-updater'):'['1', '2']'} self.final_transfer_status_list = [ ] #final component status list which will be send to GL #[(1, True),(2, False),(3, True)] self.final_status = False #final response to GL self.check_transfer_component_map = { } # to check if component transfer completed or failed: #{dest_node_obj1:True, dest_node_obj2:"Failed", dest_node_obj3:False} all_transfer_event_received = [] self.protocol_version = "HTTP/1.1" self.logger.info("Account-updater received Transfer component request") try: content_length = int(self.headers['Content-Length']) #sending acknowledgement of TRANSFER_COMPONENT to GL self.send_response(HTTP_CONTINUE, "Continue\r\n\r\n") data = self.rfile.read(content_length) message = TransferCompMessage() message.de_serialize(data) transfer_component_map = message.service_comp_map() self.logger.info("Deserialized transfer component map:%s" \ %transfer_component_map) except Exception, ex: self.logger.error("Exception: %s" % ex) self.send_header('Message-Type', \ typeEnums.TRANSFER_COMPONENT_RESPONSE) self.send_response(HTTP_INTERNAL_SERVER_ERROR) self.end_headers() return #if transfer_component_map is empty then send HTTP_OK to GL if not transfer_component_map: self.logger.info( "Deserialized transfer component map is empty, return HTTP OK!" ) comp_transfer_response = TransferCompResponseMessage(\ self.final_transfer_status_list, True) serialized_body = comp_transfer_response.serialize() self.send_header('Message-Type', \ typeEnums.TRANSFER_COMPONENT_RESPONSE) self.send_header('Content-Length', len(serialized_body)) self.send_response(HTTP_OK) self.end_headers() self.wfile.write(serialized_body) return for dest_node_obj, comp_list in transfer_component_map.items(): for comp in comp_list: self.deleted_comp_list.append(comp) self.final_transfer_status_list.append((comp, False)) self.check_transfer_component_map[dest_node_obj] = False self.logger.debug("Ownership for components:%s will be removed" % self.deleted_comp_list) self.delete_self_ownership() transfer_comp_event = self.msg.get_transfer_cmp_event() transfer_comp_event.set() try: #sending accept component request to other nodes self.logger.info("Sending accept component request to " \ "destination nodes") for target_service_obj, comp_list in transfer_component_map.items( ): thread_connecting_node = threading.Thread(target = \ self.send_accept_component_request, args = \ ("ACCEPT_COMPONENT_TRANSFER", target_service_obj, \ comp_list, )) thread_connecting_node.setDaemon(True) thread_connecting_node.start() #Checking if transfer component completed and intermediate #response sent or not self.logger.info("Checking if transfer component completed") thread_check_transfer_status = threading.Thread(target = \ self.check_component_transfer_completion) thread_check_transfer_status.setDaemon(True) thread_check_transfer_status.start() thread_check_transfer_status.join(transfer_component_timeout) # sending final response to GL self.logger.info("Sending final response to GL :%s" \ % self.final_transfer_status_list) if self.final_transfer_status_list: for comp_status_tuple in self.final_transfer_status_list: if not comp_status_tuple[1]: self.logger.warning("Final transfer component list having failed" \ "component:%s, %s" %(comp_status_tuple[0], comp_status_tuple[1])) self.msg.set_ownershipList(self.old_ownership_list) break else: self.final_status = True comp_transfer_response = TransferCompResponseMessage(\ self.final_transfer_status_list, self.final_status) serialized_body = comp_transfer_response.serialize() while len(all_transfer_event_received) != 4: all_transfer_event_received.append(self.msg.get_from_Queue()) self.logger.info("Completed TRANSFER_COMPONENTS request") self.send_header('Message-Type', \ typeEnums.TRANSFER_COMPONENT_RESPONSE) self.send_header('Content-Length', len(serialized_body)) self.send_response(HTTP_OK) self.end_headers() self.wfile.write(serialized_body) transfer_comp_event.clear() return except Exception as ex: self.logger.error("Exception raised: %s" % ex) transfer_comp_event.clear() self.msg.set_ownershipList(self.old_ownership_list) self.logger.error("old_ownership_list:%s" % self.old_ownership_list) self.send_header('Message-Type', \ typeEnums.TRANSFER_COMPONENT_RESPONSE) self.send_response(HTTP_INTERNAL_SERVER_ERROR) self.end_headers()
class Walker(Thread): def __init__(self, walker_map, conf, logger): Thread.__init__(self) #self.__conf = conf # TODO : once the complete configuration file will be created correct entries would be fetched self._file_location = conf['stat_file_location'] self._max_update_files = conf['max_update_files'] self._max_files_stored_in_cache = conf['max_files_stored_in_cache'] self._expire_delta_time = conf['updaterfile_expire_delta_time'] self.logger = logger self.msg = GlobalVariables(self.logger) #self.__MainPath = '/remote/hydra042/spsingh/Latest/objectStorage/src/osd/NewUpdater/devesh/sps/upfiles1' #Todo: will be fetched from configuration file #self.__ownershipList = ['X9','X3','X4','X5','X6','X8'] #TODO: dummy value, will be set at service start self._ownership_list = self.msg.get_ownershipList() self._walker_map = walker_map self.logger.info("Walker constructed") self._complete_all_event = self.msg.get_complete_all_event() self._transfer_cmp_event = self.msg.get_transfer_cmp_event() self.put_queue_flag = False """ Service Stop Event Handling """ def __walker_check_complete_all_event(self): """ Walker complete all request (Stop) Event check """ if self._complete_all_event.is_set(): self.logger.info("Received complete all request event in walker") self._walker_map = [] self.msg.put_into_Queue() return True """ Get Component Map Event Handling """ def __walker_get_new_ownership(self): """ Walker transfer/accept component request """ if self._transfer_cmp_event.is_set() and not self.put_queue_flag: self.logger.info( "Received transfer/accept request event in walker") self._ownership_list = self.msg.get_ownershipList() self.msg.put_into_Queue() self.put_queue_flag = True elif not self._transfer_cmp_event.is_set(): self.put_queue_flag = False def run(self): try: self.__perform_walker_operation() except Exception as ex: self.logger.error("error occured : %s" % ex) os._exit(130) except: self.logger.error("Unknown exception occured") os._exit(130) def __perform_walker_operation(self): old_stat_files = {} while True: self.logger.debug("Walker started") temp_structure = [] self._ownership_list = self.msg.get_ownershipList() if self.__walker_check_complete_all_event(): break self.__walker_get_new_ownership() try: if not old_stat_files: old_stat_files = {} all_stat_files = {} for comp in self._ownership_list: if os.path.exists( os.path.join(self._file_location, str(comp))): for file in os.listdir( '%s/%s' % (self._file_location, comp)): all_stat_files[os.path.join(self._file_location, str(comp), file)] = \ os.path.getmtime(os.path.join(self._file_location, str(comp), file)) for old_file, modtime in sorted(all_stat_files.items(), key=lambda file: \ file[1])[:-1][:self._max_files_stored_in_cache]: old_stat_files[os.path.join(old_file.split('/')[-2:-1][0], \ os.path.basename(old_file))] = all_stat_files[old_file] if len( os.listdir( '%s/%s' % (self._file_location, comp))) == 1: left_file = os.listdir( '%s/%s' % (self._file_location, comp))[0] totalsecond_diff = respondsecondsDiff( left_file) if totalsecond_diff > self._expire_delta_time: old_stat_files[os.path.join(str(comp),left_file)] = \ os.path.getmtime(os.path.join(self._file_location, str(comp), left_file)) else: self.logger.debug("Stat file %s not Expired: " \ %os.path.join(self._file_location, str(comp), left_file)) all_stat_files = {} else: self.logger.debug( "Directory :%s not exists" % os.path.join(self._file_location, str(comp))) for stat_file in old_stat_files.keys(): if os.stat(os.path.join(self._file_location, stat_file)).st_size == 0: comp_tuple = (stat_file.split('/')[0], stat_file.split('/')[1]) self.logger.info("Stat file :%s is empty therefore deleting it" \ %os.path.join(self._file_location, stat_file)) self.msg.create_sweeper_list(comp_tuple) del old_stat_files[stat_file] for stat_file, modtime in sorted(old_stat_files.items(), key=lambda file : \ file[1])[:self._max_update_files]: if stat_file not in temp_structure: temp_structure.append(stat_file) del old_stat_files[stat_file] self._walker_map[:] = temp_structure[:self._max_update_files] self.logger.debug("State files to be processed at walker :%s" % self._walker_map) except Exception as ex: self.logger.error("Exception occured :%s", ex) self.logger.debug("Exiting walker") time.sleep(30)