def run(self): # Open config file (not actually used in this example) actionhandler_config=ConfigParser() actionhandler_config.read('/opt/autopilot/conf/pyactionhandler/counting-rhyme-actionhandler.conf') # Setup logging in normal operation logging.config.fileConfig('/opt/autopilot/conf/pyactionhandler/counting-rhyme-actionhandler-log.conf') logger = logging.getLogger('root') # Setup debug logging (see commandline interface at the end of the file) if self.debug: logger.setLevel(logging.DEBUG) ch = logging.StreamHandler() ch.setLevel(logging.DEBUG) formatter = logging.Formatter("%(asctime)s [%(levelname)s] %(message)s","%Y-%m-%d %H:%M:%S") ch.setFormatter(formatter) logger.addHandler(ch) logger.info("Logging to console and logfile") # An instance of the CountingRhyme class in this example demonstrates how to manage "state" that # is shared among all instances of the Action class. # # In a real world Actionhandler, this could be a database connection, an API etc. rhyme = CountingRhyme() # Map the "Capability" string to the Action object that implements this capability # # For each command issued by the HIRO engine, an instance will be created with a number of standard # parameters plus the ones you specify here # # Upon command execution, the __call__() method of the object will be called. capabilities = { "Rhyme":Capability(CountingRhymeAction, rhyme=rhyme) } # A worker collection shares the same request and response queue # Workers will be created as needed, one per MARS node # A worker will execute max. <parallel_tasks_per_worker> actions in parallel # A worker collection will execute max. <parallel_tasks> actions in parallel # Workers will be destroyed after <worker_max_idle> seconds of inactivity to free up memory worker_collection = WorkerCollection( capabilities, parallel_tasks = 10, parallel_tasks_per_worker = 3, worker_max_idle = 300, ) # The actual ActionHandler consists of a ZeroMQ socket and a worker collection # It will listen for incoming messages and if it knows the capability, create an # Action object and put it onto the request queue of the worker collection. # # The worker collection will then lookup if there's already a Worker for the MARS # node the command originated from or create a new one. It will then remove the Action # object from the worker collection's request queue and put it onto the worker's queue # # The Worker will remove the first <parallel_tasks_per_worker> action(s) from its queue, # execute them and put the results back onto the worker collection's response queue. counting_rhyme_handler = SyncHandler( worker_collection, # The socket(s) the Actionhandler will listen on, hardcoded in this example but # should really be read from a config file zmq_url = 'tcp://*:7291' ) action_handlers = [counting_rhyme_handler] # list of all defined Actionhandlers # Function to shutdown gracefully by letting all current commands finish def exit_gracefully(): logger.info("Starting shutdown") for action_handler in action_handlers: action_handler.shutdown() logger.info("Finished shutdown") # Graceful shutdown can be triggered by SIGINT and SIGTERM gevent.hub.signal(signal.SIGINT, exit_gracefully) gevent.hub.signal(signal.SIGTERM, exit_gracefully) # Start main gevent loop greenlets=[action_handler.run() for action_handler in action_handlers] gevent.idle() # Pass control over the event loop to the other greenlets, so they can initialize gevent.joinall(greenlets) # waits until all greenlet pseudo-threads terminate sys.exit(0)
def run(self): actionhandler_config = ConfigParser() actionhandler_config.read( '/opt/autopilot/conf/pyactionhandler/ayehu-actionhandler.conf') logging.config.fileConfig( '/opt/autopilot/conf/pyactionhandler/ayehu-actionhandler-log.conf') logger = logging.getLogger('root') if self.debug: logger.setLevel(logging.DEBUG) ch = logging.StreamHandler() ch.setLevel(logging.DEBUG) formatter = logging.Formatter( "%(asctime)s [%(levelname)s] %(message)s", "%Y-%m-%d %H:%M:%S") ch.setFormatter(formatter) logger.addHandler(ch) logger.info("Logging also to console") redis.connection.socket = gevent.socket ayehu_config = ConfigParser() ayehu_config.read( '/opt/autopilot/conf/pyactionhandler/ayehu-actionhandler-ayehu.conf' ) pmp_config = ConfigParser() pmp_config.read('/opt/autopilot/conf/pyactionhandler/pmp.conf') # Redis datastore for commands handed to Ayehu commands_redis = redis.StrictRedis( host=actionhandler_config.get('RESTInterface', 'RedisHost'), port=actionhandler_config.get('RESTInterface', 'RedisPort'), db=actionhandler_config.get('RESTInterface', 'RedisDB'), charset="utf-8", decode_responses=True) commands_pubsub = commands_redis.pubsub(ignore_subscribe_messages=True) # Redis datastore for zeep's cache zeep_cache_redis = redis.StrictRedis( host=actionhandler_config.get('SOAPClient', 'RedisHost'), port=actionhandler_config.get('SOAPClient', 'RedisPort'), db=actionhandler_config.get('SOAPClient', 'RedisDB')) zeep_cache = RedisCache(timeout=3600, redis=zeep_cache_redis) zeep_transport = zeep.transports.Transport(cache=zeep_cache) # Setup REST API for callback rest_api = RESTAPI(baseurl=actionhandler_config.get( 'RESTInterface', 'CallbackBaseURL'), redis=commands_redis, pubsub=commands_pubsub) server = pywsgi.WSGIServer(('', 8080), rest_api.app, log=None, error_log=None) # Setup GraphIT session for updating issues in background mode try: wso2_verify_ssl = actionhandler_config.getboolean( 'BackgroundMode', 'WSO2_SSL_Cert') except ValueError: wso2_verify_ssl = actionhandler_config.get('BackgroundMode', 'WSO2_SSL_Cert') try: graphit_verify_ssl = actionhandler_config.getboolean( 'BackgroundMode', 'GraphIT_SSL_Cert') except ValueError: graphit_verify_ssl = actionhandler_config.get( 'BackgroundMode', 'GraphIT_SSL_Cert') graphit_session = GraphitSession( actionhandler_config.get('BackgroundMode', 'GraphIT_URL')) graphit_session.auth = WSO2AuthCC( actionhandler_config.get('BackgroundMode', 'WSO2_URL'), client=[ actionhandler_config.get('BackgroundMode', 'WSO2_Client_ID'), actionhandler_config.get('BackgroundMode', 'WSO2_Client_Secret') ], verify=wso2_verify_ssl) graphit_session.verify = graphit_verify_ssl deployment_timeout = actionhandler_config.getint( 'BackgroundMode', 'GraphIT_Deployment_Timeout', fallback=60) action_handlers = [ SyncHandler(WorkerCollection( { "ExecuteWorkflow": Capability(AyehuAction, zeep_transport=zeep_transport, redis=commands_redis, ayehu_config=ayehu_config, pmp_config=pmp_config, rest_api=rest_api), "ExecuteWorkflowInBackground": Capability(AyehuBackgroundAction, zeep_transport=zeep_transport, redis=commands_redis, ayehu_config=ayehu_config, pmp_config=pmp_config, rest_api=rest_api, graphit_session=graphit_session, deployment_timeout=deployment_timeout), }, parallel_tasks=actionhandler_config.getint('ActionHandler', 'ParallelTasks', fallback=10), parallel_tasks_per_worker=actionhandler_config.getint( 'ActionHandler', 'ParallelTasksPerWorker', fallback=10), worker_max_idle=actionhandler_config.getint('ActionHandler', 'WorkerMaxIdle', fallback=300)), zmq_url=actionhandler_config.get( 'ActionHandler', 'ZMQ_URL')) ] def exit_gracefully(): logger.info("Starting shutdown") for action_handler in action_handlers: action_handler.shutdown() logger.info("Finished shutdown") server.stop() gevent.hub.signal(signal.SIGINT, exit_gracefully) gevent.hub.signal(signal.SIGTERM, exit_gracefully) greenlets = [ action_handler.run() for action_handler in action_handlers ] gevent.idle() server.serve_forever() gevent.joinall(greenlets) sys.exit()
def run(self): actionhandler_config = ConfigParser() actionhandler_config.read(('/opt/autopilot/conf/pyactionhandler/' 'winrm-actionhandler.conf')) logging.config.fileConfig(('/opt/autopilot/conf/pyactionhandler/' 'winrm-actionhandler-log.conf')) logger = logging.getLogger('root') if self.debug: logger.setLevel(logging.DEBUG) ch = logging.StreamHandler() ch.setLevel(logging.DEBUG) formatter = logging.Formatter( "%(asctime)s [%(levelname)s] %(message)s", "%Y-%m-%d %H:%M:%S") ch.setFormatter(formatter) logger.addHandler(ch) logger.info("Logging also to console") # Read config files jumpserver_config = ConfigParser() jumpserver_config.read(('/opt/autopilot/conf/pyactionhandler/' 'winrm-actionhandler-jumpserver.conf')) pmp_config = ConfigParser() pmp_config.read('/opt/autopilot/conf/pyactionhandler/pmp.conf') action_handlers = [ SyncHandler(WorkerCollection( { "ExecuteCommand": Capability(WinRMCmdAction, pmp_config=pmp_config, jumpserver_config=jumpserver_config), "ExecutePowershell": Capability(WinRMPowershellAction, pmp_config=pmp_config, jumpserver_config=jumpserver_config) }, parallel_tasks=actionhandler_config.getint('ActionHandler', 'ParallelTasks', fallback=5), parallel_tasks_per_worker=actionhandler_config.getint( 'ActionHandler', 'ParallelTasksPerWorker', fallback=5), worker_max_idle=actionhandler_config.getint('ActionHandler', 'WorkerMaxIdle', fallback=300)), zmq_url=actionhandler_config.get( 'ActionHandler', 'ZMQ_URL')) ] def exit_gracefully(): logger.info("Starting shutdown") for action_handler in action_handlers: action_handler.shutdown() logger.info("Finished shutdown") gevent.hub.signal(signal.SIGINT, exit_gracefully) gevent.hub.signal(signal.SIGTERM, exit_gracefully) greenlets = [ action_handler.run() for action_handler in action_handlers ] gevent.idle() gevent.joinall(greenlets) sys.exit()
def run(self): actionhandler_config=ConfigParser() actionhandler_config.read(( '/opt/autopilot/conf/pyactionhandler/' 'winrm-actionhandler.conf')) logging.config.fileConfig(( '/opt/autopilot/conf/pyactionhandler/' 'winrm-actionhandler-log.conf')) logger = logging.getLogger('root') if self.debug: logger.setLevel(logging.DEBUG) ch = logging.StreamHandler() ch.setLevel(logging.DEBUG) formatter = logging.Formatter( "%(asctime)s [%(levelname)s] %(message)s", "%Y-%m-%d %H:%M:%S") ch.setFormatter(formatter) logger.addHandler(ch) logger.info("Logging also to console") # Read config files jumpserver_config = ConfigParser() jumpserver_config.read(( '/opt/autopilot/conf/pyactionhandler/' 'winrm-actionhandler-jumpserver.conf')) pmp_config = ConfigParser() pmp_config.read('/opt/autopilot/conf/pyactionhandler/pmp.conf') action_handlers = [SyncHandler( WorkerCollection( {"ExecuteCommand":Capability(WinRMCmdAction, pmp_config=pmp_config, jumpserver_config=jumpserver_config), "ExecutePowershell":Capability(WinRMPowershellAction, pmp_config=pmp_config, jumpserver_config=jumpserver_config)}, parallel_tasks = actionhandler_config.getint( 'ActionHandler', 'ParallelTasks', fallback=5), parallel_tasks_per_worker = actionhandler_config.getint( 'ActionHandler', 'ParallelTasksPerWorker', fallback=5), worker_max_idle = actionhandler_config.getint( 'ActionHandler', 'WorkerMaxIdle', fallback=300)), zmq_url = actionhandler_config.get( 'ActionHandler', 'ZMQ_URL'))] def exit_gracefully(): logger.info("Starting shutdown") for action_handler in action_handlers: action_handler.shutdown() logger.info("Finished shutdown") gevent.hub.signal(signal.SIGINT, exit_gracefully) gevent.hub.signal(signal.SIGTERM, exit_gracefully) greenlets=[action_handler.run() for action_handler in action_handlers] gevent.idle() gevent.joinall(greenlets) sys.exit()
def run(self): # Open config file (not actually used in this example) actionhandler_config = ConfigParser() actionhandler_config.read( '/opt/autopilot/conf/pyactionhandler/counting-rhyme-actionhandler.conf' ) # Setup logging in normal operation logging.config.fileConfig( '/opt/autopilot/conf/pyactionhandler/counting-rhyme-actionhandler-log.conf' ) logger = logging.getLogger('root') # Setup debug logging (see commandline interface at the end of the file) if self.debug: logger.setLevel(logging.DEBUG) ch = logging.StreamHandler() ch.setLevel(logging.DEBUG) formatter = logging.Formatter( "%(asctime)s [%(levelname)s] %(message)s", "%Y-%m-%d %H:%M:%S") ch.setFormatter(formatter) logger.addHandler(ch) logger.info("Logging to console and logfile") # An instance of the CountingRhyme class in this example demonstrates how to manage "state" that # is shared among all instances of the Action class. # # In a real world Actionhandler, this could be a database connection, an API etc. rhyme = CountingRhyme() # Map the "Capability" string to the Action object that implements this capability # # For each command issued by the HIRO engine, an instance will be created with a number of standard # parameters plus the ones you specify here # # Upon command execution, the __call__() method of the object will be called. capabilities = {"Rhyme": Capability(CountingRhymeAction, rhyme=rhyme)} # A worker collection shares the same request and response queue # Workers will be created as needed, one per MARS node # A worker will execute max. <parallel_tasks_per_worker> actions in parallel # A worker collection will execute max. <parallel_tasks> actions in parallel # Workers will be destroyed after <worker_max_idle> seconds of inactivity to free up memory worker_collection = WorkerCollection( capabilities, parallel_tasks=10, parallel_tasks_per_worker=3, worker_max_idle=300, ) # The actual ActionHandler consists of a ZeroMQ socket and a worker collection # It will listen for incoming messages and if it knows the capability, create an # Action object and put it onto the request queue of the worker collection. # # The worker collection will then lookup if there's already a Worker for the MARS # node the command originated from or create a new one. It will then remove the Action # object from the worker collection's request queue and put it onto the worker's queue # # The Worker will remove the first <parallel_tasks_per_worker> action(s) from its queue, # execute them and put the results back onto the worker collection's response queue. counting_rhyme_handler = SyncHandler( worker_collection, # The socket(s) the Actionhandler will listen on, hardcoded in this example but # should really be read from a config file zmq_url='tcp://*:7291') action_handlers = [counting_rhyme_handler ] # list of all defined Actionhandlers # Function to shutdown gracefully by letting all current commands finish def exit_gracefully(): logger.info("Starting shutdown") for action_handler in action_handlers: action_handler.shutdown() logger.info("Finished shutdown") # Graceful shutdown can be triggered by SIGINT and SIGTERM gevent.hub.signal(signal.SIGINT, exit_gracefully) gevent.hub.signal(signal.SIGTERM, exit_gracefully) # Start main gevent loop greenlets = [ action_handler.run() for action_handler in action_handlers ] gevent.idle( ) # Pass control over the event loop to the other greenlets, so they can initialize gevent.joinall( greenlets) # waits until all greenlet pseudo-threads terminate sys.exit(0)
def run(self): actionhandler_config=ConfigParser() actionhandler_config.read('/opt/autopilot/conf/pyactionhandler/ayehu-actionhandler.conf') logging.config.fileConfig('/opt/autopilot/conf/pyactionhandler/ayehu-actionhandler-log.conf') logger = logging.getLogger('root') if self.debug: logger.setLevel(logging.DEBUG) ch = logging.StreamHandler() ch.setLevel(logging.DEBUG) formatter = logging.Formatter( "%(asctime)s [%(levelname)s] %(message)s","%Y-%m-%d %H:%M:%S") ch.setFormatter(formatter) logger.addHandler(ch) logger.info("Logging also to console") redis.connection.socket = gevent.socket ayehu_config = ConfigParser() ayehu_config.read('/opt/autopilot/conf/pyactionhandler/ayehu-actionhandler-ayehu.conf') pmp_config = ConfigParser() pmp_config.read('/opt/autopilot/conf/pyactionhandler/pmp.conf') # Redis datastore for commands handed to Ayehu commands_redis = redis.StrictRedis( host=actionhandler_config.get('RESTInterface', 'RedisHost'), port=actionhandler_config.get('RESTInterface', 'RedisPort'), db=actionhandler_config.get('RESTInterface', 'RedisDB'), charset = "utf-8", decode_responses = True) commands_pubsub = commands_redis.pubsub(ignore_subscribe_messages=True) # Redis datastore for zeep's cache zeep_cache_redis = redis.StrictRedis( host=actionhandler_config.get('SOAPClient', 'RedisHost'), port=actionhandler_config.get('SOAPClient', 'RedisPort'), db=actionhandler_config.get('SOAPClient', 'RedisDB')) zeep_cache = RedisCache(timeout=3600, redis=zeep_cache_redis) zeep_transport = zeep.transports.Transport(cache=zeep_cache) # Setup REST API for callback rest_api = RESTAPI( baseurl=actionhandler_config.get('RESTInterface', 'CallbackBaseURL'), redis=commands_redis, pubsub=commands_pubsub) server = pywsgi.WSGIServer( ('', 8080), rest_api.app, log=None, error_log=None) # Setup GraphIT session for updating issues in background mode try: wso2_verify_ssl=actionhandler_config.getboolean( 'BackgroundMode', 'WSO2_SSL_Cert') except ValueError: wso2_verify_ssl=actionhandler_config.get( 'BackgroundMode', 'WSO2_SSL_Cert') try: graphit_verify_ssl=actionhandler_config.getboolean( 'BackgroundMode', 'GraphIT_SSL_Cert') except ValueError: graphit_verify_ssl=actionhandler_config.get( 'BackgroundMode', 'GraphIT_SSL_Cert') graphit_session = GraphitSession( actionhandler_config.get('BackgroundMode', 'GraphIT_URL')) graphit_session.auth = WSO2AuthCC( actionhandler_config.get('BackgroundMode', 'WSO2_URL'), client = [ actionhandler_config.get( 'BackgroundMode', 'WSO2_Client_ID'), actionhandler_config.get( 'BackgroundMode', 'WSO2_Client_Secret') ], verify=wso2_verify_ssl) graphit_session.verify=graphit_verify_ssl deployment_timeout=actionhandler_config.getint( 'BackgroundMode', 'GraphIT_Deployment_Timeout', fallback=60) action_handlers = [SyncHandler( WorkerCollection( {"ExecuteWorkflow":Capability( AyehuAction, zeep_transport=zeep_transport, redis=commands_redis, ayehu_config=ayehu_config, pmp_config=pmp_config, rest_api=rest_api), "ExecuteWorkflowInBackground":Capability( AyehuBackgroundAction, zeep_transport=zeep_transport, redis=commands_redis, ayehu_config=ayehu_config, pmp_config=pmp_config, rest_api=rest_api, graphit_session=graphit_session, deployment_timeout=deployment_timeout), }, parallel_tasks = actionhandler_config.getint( 'ActionHandler', 'ParallelTasks', fallback=10), parallel_tasks_per_worker = actionhandler_config.getint( 'ActionHandler', 'ParallelTasksPerWorker', fallback=10), worker_max_idle = actionhandler_config.getint('ActionHandler', 'WorkerMaxIdle', fallback=300)), zmq_url = actionhandler_config.get('ActionHandler', 'ZMQ_URL'))] def exit_gracefully(): logger.info("Starting shutdown") for action_handler in action_handlers: action_handler.shutdown() logger.info("Finished shutdown") server.stop() gevent.hub.signal(signal.SIGINT, exit_gracefully) gevent.hub.signal(signal.SIGTERM, exit_gracefully) greenlets=[action_handler.run() for action_handler in action_handlers] gevent.idle() server.serve_forever() gevent.joinall(greenlets) sys.exit()