def execute(self, process, action_value): """ Get a dictionary that have the information necessary to process the information """ try: connection_name = process["connection_name"] connection = self.get_connection(connection_name) # Check if necessary reconnect. if connection_name in self.wait_reconnect.keys(): if self.wait_reconnect[connection_name] > datetime.now(): if connection.reconnect(): del self.wait_reconnect[connection_name] else: import procsync.modules.logger as log log.info(str(action_value)) self.wait_reconnect[connection_name] = datetime.now() + timedelta( 0, getattr(connection, "retry_sleep", settings.RETRY_SLEEP) ) return (None, settings.CONNECTION_ERROR, "Reconnect fail!") result = connection.run(process, action_value) # Check if need reconnect if connection.is_necessary_reprocess: self.wait_reconnect[connection_name] = datetime.now() + timedelta( 0, getattr(connection, "retry_sleep", settings.RETRY_SLEEP) ) return result except Exception, e: return (None, settings.SYSTEM_ERROR, e)
def finish(self, *args, **kwargs): """ Will be called when used kill or ^C, so will die because the while in monitoring was broke, but we need guarantee that only will die when every script done """ # Finishing the threads while len([ thread for thread in self.pool if thread.is_alive() ]) > 0: for thread in self.pool: if thread.is_alive(): thread.join() message = "The thread [%s] was finished!" % thread.name print >> stdout, message log.info(message) message = "Finish instance." print >> stdout, message log.info(message)
def monitoring(self): """ Will be look until all the threads die. """ while True: try: still_alive = [ thread for pos, thread in enumerate(self.pool) if self.pool[pos].is_alive() ] if len(still_alive) == 0: message = "All threads was closed!" print >> stdout, message log.info(message) break sleep(settings.MANAGER_SLEEP) except: log.exception("Problem while monitoring the threads.")
def start(self): message = "Start the instance." # Use the lock to add the pid used in shell/unix service with open(self.lock.lock_file, "w") as lock_file: lock_file.write(str(getpid())) lock_file.flush() print >> stdout, message log.info(message) # Initializing the threads for process in self.run_list: # Get the thread attribute settings attrib = settings.THREAD_CONFIG.get_thread_config(process) if attrib is None: raise ReferenceError("The thread name [%s] not exist in the file [%s] or have a wrong configuration." % (process, settings.THREAD_CONFIG.file_name)) process_class = attrib.pop("backend") # Starting the threads process_obj = process_class.Manager(attrib, name=process) process_obj.start() self.pool.append(process_obj) message = "The thread [%s] was started!" % process print >> stdout, message log.info(message) # Start monitoring self.monitoring()
# If don' had a problem, so finish the request self.update_request(action_name, row[PROCESS_ID], settings.PROCESS_SUCCESS, "Success!", action["retry"], action["reprocess_time"]) self.manager.connection.commit() except OperationalError, oe: update_param = (action_name, row[PROCESS_ID], settings.ACTION_ERROR, "Action Error [%s]!" % oe, action["retry"], action["reprocess_time"]) self.manager.connection.rollback() except Exception, e: update_param = (action_name, row[PROCESS_ID], settings.SYSTEM_ERROR, "System Error [%s]!" % e, action["retry"], action["reprocess_time"]) self.manager.connection.rollback() finally: self.manager.connection.autocommit(True) if update_param is not None: self.update_request(*update_param) elif action["tag"] == 'action': main_message = "[%s/%s] - " % (self.name, action_name) log.info("%s Processing: %s" % (main_message, str(row)), verbose=1) # Check if have origin exist other else use the same information of have_request origin = format_value(action, "origin", default_value=None) if origin is None: # Return to a row because we threat like a result of query origin = (row,) else: origin, result_code, error_message = process.execute(origin, row) if result_code != 0: self.update_request(action_name, row[PROCESS_ID], result_code, "Origin - %s" % (error_message,), action["retry"], action["reprocess_time"]) continue log.info("%s Retrieve origin: %s" % (main_message, str(origin)), verbose=1) have_error = False # each row that return need run in destination for origin_row in origin: # In case a list of error, will store the error in case of the stop_on_error = False
def error_with_retry(self, process, action_value, *args, **kwargs): log.info("Retry [%s]!" % action_value[6]) return (None, 2, "Test retrying")
def with_success(self, process, action_value, *args, **kwargs): log.info("Execute method with_success!") return (None, 0, "Success")
def with_error(self, process, action_value, *args, **kwargs): log.info("Error Message!") return (None, 3, "Test of error")
finally: self.manager.connection.autocommit(True) if update_param is not None: self.update_request(*update_param) elif action["tag"] == 'action': # Check if have origin exist other else use the same information of have_request origin = format_value(action, "origin", default_value=None) if origin is None: # Return to a row because we threat like a result of query origin = (row,) else: origin, result_code, error_message = process.execute(origin, row) if result_code != 0: self.update_request(action_name, row[PROCESS_ID], result_code, error_message, action["retry"], action["reprocess_time"]) continue log.info("Thread [%s], processing action [%s] - Value: [%s] - Origin Result [%s]" % (self.name, action_name, str(row), str(origin)), verbose=1) have_error = False # each row that return need run in destination for origin_row in origin: # In case a list of error, will store the error in case of the stop_on_error = False error_result = None for destination_item in format_value(action, "destination_list", default_value=[]): _, result_code, error_message = process.execute(destination_item, origin_row) if result_code != 0: have_error = True if destination_item["stop_on_error"]: self.update_request(action_name, row[PROCESS_ID], result_code, error_message, action["retry"], action["reprocess_time"]) break else: error_result = [ result_code, error_message ] if error_result:
uid=uid, gid=gid, files_preserve=log.handler_file, # umask=0o002, # Uncomment for direct command-line debugging # stdout=sys.stdout, # stderr=sys.stderr, ) context.signal_map = dict([ (signal_key, self.thread_manager.finish) for signal_key in [SIGTERM, SIGINT, SIGABRT] ]) with context: self.thread_manager.start() except Exception, e: log.exception("The instance generated a error, please check the Traceback [%s]." % e) finally: if lock is not None and lock.is_locked(): log.info("Releasing the file lock") lock.release() def main(options): try: # File that represent the lock lock_file = join(gettempdir(), settings.INSTANCE_NAME) if options.lock_file is None else options.lock_file # Information about the own user that will run the application (Used by Unix script) uid = gid = None if options.user_owner is not None: try: password_struct = getpwnam(options.user_owner) uid = password_struct.pw_uid gid = password_struct.pw_gid except Exception, e: log.warning("User name not found or have other problem [%s]!" % e)