示例#1
0
 def __init__(self, infra_model, os_creds, num_threads=5):
     self.logger = root_logger.getChild("os_prov_agent")
     self.infra_config_model = self.compute_model(infra_model)
     class ShutupNamespace(NamespaceModel): pass
     nmi = ShutupNamespace()
     super(ResourceTaskSequencerAgent, self).__init__(config_model_instance=self.infra_config_model,
                                                      namespace_model_instance=nmi,
                                                      no_delay=True,
                                                      num_threads=num_threads,
                                                      log_level=self.logger.getEffectiveLevel())
     self.run_contexts = {}  #keys are threads, values are RunContext objects
     self.record = OpenstackProvisioningRecord(uuid.uuid4())
     self.os_creds = os_creds
示例#2
0
 def __init__(self, username, password, tenant_name, auth_url, num_threads=5,
              log_level=LOG_INFO):
     """
     @param username: String; the Openstack user name
     @param password: String; the Openstack password for username
     @param tenant_name: String, the Openstack tenant's name
     @param auth_url: String; the Openstack authentication URL. This will
         authenticate the username/password to allow them to perform the
         resource provisioning tasks.
     @keyword num_threads: Optional. Integer, default 5. The number of threads
         to spawn to handle parallel provisioning tasks
     @keyword log_level: Optional; default LOG_INFO. One of the logging values
         from actuator: LOG_CRIT, LOG_ERROR, LOG_WARN, LOG_INFO, LOG_DEBUG.
     """
     self.os_creds = OpenstackCredentials(username, password, tenant_name, auth_url)
     self.agent = None
     self.num_threads = num_threads
     root_logger.setLevel(log_level)
     self.logger = root_logger.getChild(self.LOG_SUFFIX)
示例#3
0
文件: core.py 项目: haxsaw/actuator
 def perform_config(self, completion_record=None):
     """
     Start the agent working on the configuration tasks. This is the method
     the outside world calls when it wants the agent to start the config
     processing process.
     
     @keyword completion_record: currently unused
     """
     logger = root_logger.getChild(self.exec_agent)
     logger.info("Agent starting task processing")
     if self.namespace_mi and self.config_mi:
         self.config_mi.update_nexus(self.namespace_mi.nexus)
         graph = self.config_mi.get_graph(with_fix=True)
         self.num_tasks_to_perform = len(graph.nodes())
         for n in graph.nodes():
             graph.node[n]["ins_traversed"] = 0
             n.fix_arguments()
         self.stop = False
         # start the workers
         logger.info("Starting workers...")
         for _ in range(self.num_threads):
             worker = threading.Thread(target=self.process_tasks)
             worker.start()
         logger.info("...workers started")
         # queue the initial tasks
         for task in (t for t in graph.nodes() if graph.in_degree(t) == 0):
             logger.debug(
                 "Queueing up %s named %s id %s for performance"
                 % (task.__class__.__name__, task.name, str(task._id))
             )
             self.task_queue.put((graph, task))
         logger.info("Initial tasks queued; waiting for completion")
         # now wait to be signaled it finished
         while not self.stop:
             time.sleep(0.2)
         logger.info("Agent task processing complete")
         if self.aborted_tasks:
             raise self.exception_class(
                 "Tasks aborted causing config to abort; see the execution agent's aborted_tasks list for details"
             )
     else:
         raise ExecutionException("either namespace_model_instance or config_model_instance weren't specified")
示例#4
0
文件: core.py 项目: haxsaw/actuator
 def perform_task(self, graph, task):
     """
     Internal, used to perform a task in graph. Derived classes implement
     _perform_task() to supply the actual mechanics of for the underlying
     task execution system.
     
     @param graph: an NetworkX DiGraph; needed to find the next tasks
         to queue when the current one is done
     @param task: The actual task to perform
     """
     add_suffix = lambda t, sfx: ("task %s named %s id %s->%s" % (t.__class__.__name__, t.name, t._id, sfx))
     logger = root_logger.getChild(self.exec_agent)
     try:
         role_name = task.get_task_role().name
         if isinstance(role_name, AbstractModelReference):
             role_name = role_name.value()
         role_id = task.get_task_role()._id
     except Exception, _:
         role_name = "NO_ROLE"
         role_id = ""