def armada_base(self, context): # Define task_instance self.task_instance = context['task_instance'] # Set up and retrieve values from xcom self.xcom_puller = XcomPuller(self.main_dag_name, self.task_instance) self.action_info = self.xcom_puller.get_action_info() self.dc = self.xcom_puller.get_deployment_configuration() # Set up xcom_pusher to push values to xcom self.xcom_pusher = XcomPusher(self.task_instance) # Logs uuid of action performed by the Operator logging.info("Armada Operator for action %s", self.action_info['id']) # Retrieve Endpoint Information armada_svc_endpoint = ucp_service_endpoint( self, svc_type=self.armada_svc_type) # Set up armada client self.armada_client = self._init_armada_client(armada_svc_endpoint, self.svc_token) # Retrieve DeckHand Endpoint Information deckhand_svc_endpoint = ucp_service_endpoint( self, svc_type=self.deckhand_svc_type) # Retrieve last committed revision id committed_revision_id = self.xcom_puller.get_design_version() # Get deckhand design reference url self.deckhand_design_ref = self._init_deckhand_design_ref( deckhand_svc_endpoint, committed_revision_id)
def get_deckhand_design_ref(self, context): # Retrieve DeckHand Endpoint Information svc_type = 'deckhand' deckhand_svc_endpoint = ucp_service_endpoint(self, svc_type=svc_type) logging.info("Deckhand endpoint is %s", deckhand_svc_endpoint) # Retrieve revision_id from xcom # Note that in the case of 'deploy_site', the dag_id will # be 'deploy_site.deckhand_get_design_version' for the # 'deckhand_get_design_version' task. We need to extract # the xcom value from it in order to get the value of the # last committed revision ID committed_revision_id = context['task_instance'].xcom_pull( task_ids='deckhand_get_design_version', dag_id=self.main_dag_name + '.deckhand_get_design_version') # Form Design Reference Path that we will use to retrieve # the Design YAMLs deckhand_path = "deckhand+" + deckhand_svc_endpoint deckhand_design_ref = os.path.join(deckhand_path, "revisions", str(committed_revision_id), "rendered-documents") return deckhand_design_ref
def run_base(self, context): # Read and parse shiyard.conf config = configparser.ConfigParser() config.read(self.shipyard_conf) # Initialize variables self.deckhand_client_read_timeout = int(config.get( 'requests_config', 'deckhand_client_read_timeout')) self.validation_read_timeout = int(config.get( 'requests_config', 'validation_read_timeout')) # Logs uuid of Shipyard action LOG.info("Executing Shipyard Action %s", self.action_info['id']) # Retrieve Endpoint Information self.deckhand_svc_endpoint = ucp_service_endpoint( self, svc_type=self.deckhand_svc_type) LOG.info("Deckhand endpoint is %s", self.deckhand_svc_endpoint) # Set up DeckHand Client LOG.info("Setting up DeckHand Client...") # NOTE: The communication between the Airflow workers # and Deckhand happens via the 'internal' endpoint. self.deckhandclient = deckhand_client.Client( session=self.svc_session, endpoint_type='internal') if not self.deckhandclient: raise AirflowException('Failed to set up deckhand client!')
def promenade_base(self, context): # Define task_instance task_instance = context['task_instance'] # Extract information related to current workflow # The workflow_info variable will be a dictionary # that contains information about the workflow such # as action_id, name and other related parameters self.workflow_info = task_instance.xcom_pull(task_ids='action_xcom', key='action', dag_id=self.main_dag_name) # Logs uuid of Shipyard action logging.info("Executing Shipyard Action %s", self.workflow_info['id']) # Retrieve information of the server that we want to redeploy # if user executes the 'redeploy_server' dag if self.workflow_info['dag_id'] == 'redeploy_server': self.redeploy_server = self.workflow_info['parameters'].get( 'server-name') if self.redeploy_server: logging.info("Server to be redeployed is %s", self.redeploy_server) else: raise AirflowException('%s was unable to retrieve the ' 'server to be redeployed.' % self.__class__.__name__) # Retrieve promenade endpoint self.promenade_svc_endpoint = ucp_service_endpoint( self, svc_type=self.promenade_svc_type) logging.info("Promenade endpoint is %s", self.promenade_svc_endpoint)
def promenade_base(self, context): # Define task_instance task_instance = context['task_instance'] # Set up and retrieve values from xcom self.xcom_puller = XcomPuller(self.main_dag_name, task_instance) self.action_info = self.xcom_puller.get_action_info() self.dc = self.xcom_puller.get_deployment_configuration() # Logs uuid of Shipyard action logging.info("Executing Shipyard Action %s", self.action_info['id']) # Retrieve information of the server that we want to redeploy # if user executes the 'redeploy_server' dag if self.action_info['dag_id'] == 'redeploy_server': self.redeploy_server = self.action_info['parameters'].get( 'server-name') if self.redeploy_server: logging.info("Server to be redeployed is %s", self.redeploy_server) else: raise AirflowException('%s was unable to retrieve the ' 'server to be redeployed.' % self.__class__.__name__) # Retrieve promenade endpoint self.promenade_svc_endpoint = ucp_service_endpoint( self, svc_type=self.promenade_svc_type) logging.info("Promenade endpoint is %s", self.promenade_svc_endpoint)
def deckhand_base(self, context): # Read and parse shiyard.conf config = configparser.ConfigParser() config.read(self.shipyard_conf) # Initialize variables self.deckhand_client_read_timeout = int(config.get( 'requests_config', 'deckhand_client_read_timeout')) self.validation_read_timeout = int(config.get( 'requests_config', 'validation_read_timeout')) # Define task_instance task_instance = context['task_instance'] # Set up and retrieve values from xcom self.xcom_puller = XcomPuller(self.main_dag_name, task_instance) self.action_info = self.xcom_puller.get_action_info() # Logs uuid of Shipyard action logging.info("Executing Shipyard Action %s", self.action_info['id']) # Retrieve Endpoint Information self.deckhand_svc_endpoint = ucp_service_endpoint( self, svc_type=self.deckhand_svc_type) logging.info("Deckhand endpoint is %s", self.deckhand_svc_endpoint) # Set up DeckHand Client logging.info("Setting up DeckHand Client...") # NOTE: The communication between the Airflow workers # and Deckhand happens via the 'internal' endpoint. self.deckhandclient = deckhand_client.Client( session=self.svc_session, endpoint_type='internal') if not self.deckhandclient: raise AirflowException('Failed to set up deckhand client!') # Retrieve 'revision_id' from xcom for tasks other than # 'deckhand_get_design_version' # # NOTE: In the case of 'deploy_site', the dag_id will # be 'deploy_site.deckhand_get_design_version' for the # 'deckhand_get_design_version' task. We need to extract # the xcom value from it in order to get the value of the # last committed revision ID if self.task_id != 'deckhand_get_design_version': # Retrieve 'revision_id' from xcom self.revision_id = self.xcom_puller.get_design_version() if self.revision_id: logging.info("Revision ID is %d", self.revision_id) else: raise AirflowException('Failed to retrieve Revision ID!')
def run_base(self, context): # Logs uuid of Shipyard action LOG.info("Executing Shipyard Action %s", self.action_info['id']) # Retrieve information of the server that we want to redeploy # if user executes the 'redeploy_server' dag if self.action_info['dag_id'] == 'redeploy_server': self.redeploy_server = self.action_info['parameters'].get( 'server-name') if self.redeploy_server: LOG.info("Server to be redeployed is %s", self.redeploy_server) else: raise AirflowException('%s was unable to retrieve the ' 'server to be redeployed.' % self.__class__.__name__) # Retrieve promenade endpoint self.promenade_svc_endpoint = ucp_service_endpoint( self, svc_type=self.promenade_svc_type) LOG.info("Promenade endpoint is %s", self.promenade_svc_endpoint) # Retrieve Deckhand Endpoint Information deckhand_svc_endpoint = ucp_service_endpoint( self, svc_type=self.deckhand_svc_type) LOG.info("Deckhand endpoint is %s", deckhand_svc_endpoint) # Form Deckhand Design Reference Path # This URL will be used to retrieve the Site Design YAMLs deckhand_path = "deckhand+" + deckhand_svc_endpoint self.deckhand_design_ref = os.path.join(deckhand_path, "revisions", str(self.revision_id), "rendered-documents") if self.deckhand_design_ref: LOG.info("Design YAMLs will be retrieved from %s", self.deckhand_design_ref) else: raise AirflowException("Unable to Retrieve Deckhand Revision " "%d!" % self.revision_id)
def execute(self, context): # Initialize Variables context['svc_type'] = 'deckhand' deckhand_design_version = None # Define task_instance task_instance = context['task_instance'] # Extract information related to current workflow # The workflow_info variable will be a dictionary # that contains information about the workflow such # as action_id, name and other related parameters workflow_info = task_instance.xcom_pull(task_ids='action_xcom', key='action', dag_id=self.main_dag_name) # Logs uuid of action performed by the Operator logging.info("DeckHand Operator for action %s", workflow_info['id']) # Retrieve Endpoint Information context['svc_endpoint'] = ucp_service_endpoint(self, context) logging.info("Deckhand endpoint is %s", context['svc_endpoint']) # Deckhand API Call # Retrieve Design Version from DeckHand if self.action == 'deckhand_get_design_version': # Retrieve DeckHand Design Version deckhand_design_version = self.deckhand_get_design(context) if deckhand_design_version: return deckhand_design_version else: raise AirflowException('Failed to retrieve revision ID!') # Validate Design using DeckHand elif self.action == 'deckhand_validate_site_design': # Retrieve revision_id from xcom # Note that in the case of 'deploy_site', the dag_id will be # 'deploy_site.deckhand_get_design_version.deckhand_get_design_version' # for the 'deckhand_get_design_version' task. We need to extract # the xcom value from it in order to get the value of the last # committed revision ID context['revision_id'] = task_instance.xcom_pull( task_ids='deckhand_get_design_version', dag_id=self.main_dag_name + '.deckhand_get_design_version' * 2) logging.info("Revision ID is %d", context['revision_id']) self.deckhand_validate_site(context) # No action to perform else: logging.info('No Action to Perform')
def run_base(self, context): # Set up xcom_pusher to push values to xcom self.xcom_pusher = XcomPusher(self.task_instance) # Logs uuid of action performed by the Operator LOG.info("Armada Operator for action %s", self.action_info['id']) # Retrieve Endpoint Information armada_svc_endpoint = ucp_service_endpoint( self, svc_type=self.armada_svc_type) # Set up armada client self.armada_client = self._init_armada_client(armada_svc_endpoint, self.svc_token) # Retrieve DeckHand Endpoint Information deckhand_svc_endpoint = ucp_service_endpoint( self, svc_type=self.deckhand_svc_type) # Get deckhand design reference url self.deckhand_design_ref = self._init_deckhand_design_ref( deckhand_svc_endpoint)
def execute(self, context): # Initialize variable ucp_components = [ 'armada', 'deckhand', 'kubernetesprovisioner', 'physicalprovisioner', 'shipyard' ] # Define task_instance self.task_instance = context['task_instance'] # Set up and retrieve values from xcom self.xcom_puller = XcomPuller(self.main_dag_name, self.task_instance) self.action_info = self.xcom_puller.get_action_info() # Set up xcom_pusher to push values to xcom self.xcom_pusher = XcomPusher(self.task_instance) # Loop through various UCP Components for component in ucp_components: # Retrieve Endpoint Information service_endpoint = ucp_service_endpoint(self, svc_type=component) LOG.info("%s endpoint is %s", component, service_endpoint) # Construct Health Check Endpoint healthcheck_endpoint = os.path.join(service_endpoint, 'health') LOG.info("%s healthcheck endpoint is %s", component, healthcheck_endpoint) try: LOG.info("Performing Health Check on %s", component) # Set health check timeout to 30 seconds req = requests.get(healthcheck_endpoint, timeout=30) # An empty response/body returned by a component means # that it is healthy if req.status_code == 204: LOG.info("%s is alive and healthy", component) except requests.exceptions.RequestException as e: self.log_health_exception(component, e)
def execute(self, context): # Initialize variable ucp_components = [ 'armada', 'deckhand', 'kubernetesprovisioner', 'physicalprovisioner', 'shipyard'] # Loop through various UCP Components for i in ucp_components: # Retrieve Endpoint Information service_endpoint = ucp_service_endpoint(self, svc_type=i) logging.info("%s endpoint is %s", i, service_endpoint) # Construct Health Check Endpoint healthcheck_endpoint = os.path.join(service_endpoint, 'health') logging.info("%s healthcheck endpoint is %s", i, healthcheck_endpoint) try: logging.info("Performing Health Check on %s", i) # Set health check timeout to 30 seconds req = requests.get(healthcheck_endpoint, timeout=30) except requests.exceptions.RequestException as e: raise AirflowException(e) # UCP Component will return empty response/body to show that # it is healthy if req.status_code == 204: logging.info("%s is alive and healthy", i) else: logging.error(req.text) raise AirflowException("Invalid Response!")
def run_base(self, context): # Logs uuid of Shipyard action LOG.info("Executing Shipyard Action %s", self.action_info['id']) # Retrieve information of the server that we want to redeploy # if user executes the 'redeploy_server' dag if self.action_info['dag_id'] == 'redeploy_server': self.redeploy_server = self.action_info['parameters'].get( 'server-name') if self.redeploy_server: LOG.info("Server to be redeployed is %s", self.redeploy_server) else: raise AirflowException('%s was unable to retrieve the ' 'server to be redeployed.' % self.__class__.__name__) # Retrieve promenade endpoint self.promenade_svc_endpoint = ucp_service_endpoint( self, svc_type=self.promenade_svc_type) LOG.info("Promenade endpoint is %s", self.promenade_svc_endpoint)
def drydock_query_task(self, drydock_client, context, interval, time_out, task_id): # Initialize Variables keystone_token_expired = False new_dd_client = None dd_client = drydock_client # Calculate number of times to execute the 'for' loop # Convert 'time_out' and 'interval' from string into integer # The result from the division will be a floating number which # We will round off to nearest whole number end_range = round(int(time_out) / int(interval)) # Query task status for i in range(0, end_range + 1): if keystone_token_expired: logging.info("Established new drydock session") dd_client = new_dd_client try: # Retrieve current task state task_state = dd_client.get_task(task_id=task_id) task_status = task_state.get('status') task_result = task_state.get('result')['status'] logging.info("Current status of task id %s is %s", task_id, task_status) keystone_token_expired = False except errors.ClientUnauthorizedError as unauthorized_error: # TODO: This is a temporary workaround. Drydock will be # updated with the appropriate fix in the drydock api # client by having the session detect a 401/403 response # and refresh the token appropriately. # Logs drydock client unauthorized error keystone_token_expired = True logging.error(unauthorized_error) # Set up new drydock client with new keystone token logging.info("Setting up new drydock session...") drydock_svc_endpoint = ucp_service_endpoint( self, svc_type='physicalprovisioner') new_dd_client = self.drydock_session_client( drydock_svc_endpoint) except errors.ClientForbiddenError as forbidden_error: raise AirflowException(forbidden_error) except errors.ClientError as client_error: raise AirflowException(client_error) except: # There can be instances where there are intermittent network # issues that prevents us from retrieving the task state. We # will want to retry in such situations. logging.info("Unable to retrieve task state. Retrying...") # Raise Time Out Exception if task_status == 'running' and i == end_range: raise AirflowException("Task Execution Timed Out!") # Exit 'for' loop if the task is in 'complete' or 'terminated' # state if task_status in ['complete', 'terminated']: logging.info('Task result is %s', task_result) break else: time.sleep(int(interval)) # Get final task result if task_result == 'success': logging.info('Task id %s has been successfully completed', self.task_id) else: raise AirflowException("Failed to execute/complete task!")
def drydock_base(self, context): # Initialize Variables drydock_url = None dd_session = None # Define task_instance task_instance = context['task_instance'] # Set up and retrieve values from xcom self.xcom_puller = XcomPuller(self.main_dag_name, task_instance) self.action_info = self.xcom_puller.get_action_info() self.dc = self.xcom_puller.get_deployment_configuration() # Logs uuid of action performed by the Operator logging.info("DryDock Operator for action %s", self.action_info['id']) # Retrieve information of the server that we want to redeploy if user # executes the 'redeploy_server' dag # Set node filter to be the server that we want to redeploy if self.action_info['dag_id'] == 'redeploy_server': self.redeploy_server = ( self.action_info['parameters']['server-name']) if self.redeploy_server: logging.info("Server to be redeployed is %s", self.redeploy_server) self.node_filter = self.redeploy_server else: raise AirflowException('Unable to retrieve information of ' 'node to be redeployed!') # Retrieve Endpoint Information self.drydock_svc_endpoint = ucp_service_endpoint( self, svc_type=self.drydock_svc_type) logging.info("Drydock endpoint is %s", self.drydock_svc_endpoint) # Parse DryDock Service Endpoint drydock_url = urlparse(self.drydock_svc_endpoint) # Build a DrydockSession with credentials and target host # information. # The DrydockSession will care for TCP connection pooling # and header management logging.info("Build DryDock Session") dd_session = session.DrydockSession(drydock_url.hostname, port=drydock_url.port, auth_gen=self._auth_gen) # Raise Exception if we are not able to set up the session if dd_session: logging.info("Successfully Set Up DryDock Session") else: raise AirflowException("Failed to set up Drydock Session!") # Use the DrydockSession to build a DrydockClient that can # be used to make one or more API calls logging.info("Create DryDock Client") self.drydock_client = client.DrydockClient(dd_session) # Raise Exception if we are not able to build the client if self.drydock_client: logging.info("Successfully Set Up DryDock client") else: raise AirflowException("Failed to set up Drydock Client!") # Retrieve DeckHand Endpoint Information deckhand_svc_endpoint = ucp_service_endpoint( self, svc_type=self.deckhand_svc_type) logging.info("Deckhand endpoint is %s", deckhand_svc_endpoint) # Retrieve last committed revision id committed_revision_id = self.xcom_puller.get_design_version() # Form DeckHand Design Reference Path # This URL will be used to retrieve the Site Design YAMLs deckhand_path = "deckhand+" + deckhand_svc_endpoint self.deckhand_design_ref = os.path.join(deckhand_path, "revisions", str(committed_revision_id), "rendered-documents") if self.deckhand_design_ref: logging.info("Design YAMLs will be retrieved from %s", self.deckhand_design_ref) else: raise AirflowException("Unable to Retrieve Design Reference!")
def execute(self, context): # Initialize Variables armada_client = None design_ref = None # Define task_instance task_instance = context['task_instance'] # Extract information related to current workflow # The workflow_info variable will be a dictionary # that contains information about the workflow such # as action_id, name and other related parameters workflow_info = task_instance.xcom_pull(task_ids='action_xcom', key='action', dag_id=self.main_dag_name) # Logs uuid of action performed by the Operator logging.info("Armada Operator for action %s", workflow_info['id']) # Retrieve Deckhand Design Reference design_ref = self.get_deckhand_design_ref(context) if design_ref: logging.info("Design YAMLs will be retrieved from %s", design_ref) else: raise AirflowException("Unable to Retrieve Design Reference!") # Validate Site Design if self.action == 'validate_site_design': # Initialize variable armada_svc_endpoint = None site_design_validity = 'invalid' # Retrieve Endpoint Information svc_type = 'armada' armada_svc_endpoint = ucp_service_endpoint(self, svc_type=svc_type) site_design_validity = self.armada_validate_site_design( armada_svc_endpoint, design_ref) if site_design_validity == 'valid': logging.info("Site Design has been successfully validated") else: raise AirflowException("Site Design Validation Failed!") return site_design_validity # Create Armada Client # Retrieve Endpoint Information svc_type = 'armada' armada_svc_endpoint = ucp_service_endpoint(self, svc_type=svc_type) logging.info("Armada endpoint is %s", armada_svc_endpoint) # Set up Armada Client armada_client = self.armada_session_client(armada_svc_endpoint) # Retrieve Tiller Information and assign to context 'query' context['query'] = self.get_tiller_info(context) # Armada API Call # Armada Status if self.action == 'armada_status': self.get_armada_status(context, armada_client) # Armada Apply elif self.action == 'armada_apply': # TODO (bryan-strassner) externalize the name of the manifest to # use this needs to come from a site configuration document for # consumption by shipyard/airflow. For now. "full-site" is the # only value that will work. target_manifest = 'full-site' self.armada_apply(context, armada_client, design_ref, target_manifest) # Armada Get Releases elif self.action == 'armada_get_releases': self.armada_get_releases(context, armada_client) else: logging.info('No Action to Perform')
def execute(self, context): # Initialize Variable redeploy_server = None # Placeholder definition # TODO: Need to decide how to pass the required value from Shipyard to # the 'node_filter' variable. No filter will be used for now. self.node_filter = None # Define task_instance task_instance = context['task_instance'] # Extract information related to current workflow # The workflow_info variable will be a dictionary # that contains information about the workflow such # as action_id, name and other related parameters workflow_info = task_instance.xcom_pull( task_ids='action_xcom', key='action', dag_id=self.main_dag_name) # Logs uuid of action performed by the Operator logging.info("DryDock Operator for action %s", workflow_info['id']) # Retrieve information of the server that we want to redeploy if user # executes the 'redeploy_server' dag # Set node filter to be the server that we want to redeploy if workflow_info['dag_id'] == 'redeploy_server': redeploy_server = workflow_info['parameters'].get('server-name') if redeploy_server: logging.info("Server to be redeployed is %s", redeploy_server) self.node_filter = redeploy_server else: raise AirflowException('Unable to retrieve information of ' 'node to be redeployed!') # Retrieve Deckhand Design Reference self.design_ref = self.get_deckhand_design_ref(context) if self.design_ref: logging.info("Drydock YAMLs will be retrieved from %s", self.design_ref) else: raise AirflowException("Unable to Retrieve Design Reference!") # Drydock Validate Site Design if self.action == 'validate_site_design': # Initialize variable site_design_validity = 'invalid' # Retrieve Endpoint Information svc_type = 'physicalprovisioner' drydock_svc_endpoint = ucp_service_endpoint(self, svc_type=svc_type) site_design_validity = self.drydock_validate_design( drydock_svc_endpoint) return site_design_validity # DrydockClient # Retrieve Endpoint Information svc_type = 'physicalprovisioner' drydock_svc_endpoint = ucp_service_endpoint(self, svc_type=svc_type) logging.info("DryDock endpoint is %s", drydock_svc_endpoint) # Set up DryDock Client drydock_client = self.drydock_session_client(drydock_svc_endpoint) # Read shipyard.conf config = configparser.ConfigParser() config.read(self.shipyard_conf) if not config.read(self.shipyard_conf): raise AirflowException("Unable to read content of shipyard.conf") # Create Task for verify_site if self.action == 'verify_site': # Default settings for 'verify_site' execution is to query # the task every 10 seconds and to time out after 60 seconds query_interval = config.get('drydock', 'verify_site_query_interval') task_timeout = config.get('drydock', 'verify_site_task_timeout') self.drydock_action(drydock_client, context, self.action, query_interval, task_timeout) # Create Task for prepare_site elif self.action == 'prepare_site': # Default settings for 'prepare_site' execution is to query # the task every 10 seconds and to time out after 300 seconds query_interval = config.get('drydock', 'prepare_site_query_interval') task_timeout = config.get('drydock', 'prepare_site_task_timeout') self.drydock_action(drydock_client, context, self.action, query_interval, task_timeout) # Create Task for prepare_node elif self.action == 'prepare_nodes': # Default settings for 'prepare_node' execution is to query # the task every 30 seconds and to time out after 1800 seconds query_interval = config.get('drydock', 'prepare_node_query_interval') task_timeout = config.get('drydock', 'prepare_node_task_timeout') self.drydock_action(drydock_client, context, self.action, query_interval, task_timeout) # Create Task for deploy_node elif self.action == 'deploy_nodes': # Default settings for 'deploy_node' execution is to query # the task every 30 seconds and to time out after 3600 seconds query_interval = config.get('drydock', 'deploy_node_query_interval') task_timeout = config.get('drydock', 'deploy_node_task_timeout') self.drydock_action(drydock_client, context, self.action, query_interval, task_timeout) # Wait for 120 seconds (default value) before checking the cluster # join process as it takes time for process to be triggered across # all nodes cluster_join_check_backoff_time = config.get( 'drydock', 'cluster_join_check_backoff_time') logging.info("All nodes deployed in MAAS") logging.info("Wait for %d seconds before checking node state...", int(cluster_join_check_backoff_time)) time.sleep(int(cluster_join_check_backoff_time)) # Check that cluster join process is completed before declaring # deploy_node as 'completed'. Set time out to 30 minutes and set # polling interval to 30 seconds. check_node_status(1800, 30) # Create Task for destroy_node # NOTE: This is a PlaceHolder function. The 'destroy_node' # functionalities in DryDock is being worked on and is not # ready at the moment. elif self.action == 'destroy_node': # Default settings for 'destroy_node' execution is to query # the task every 30 seconds and to time out after 900 seconds query_interval = config.get('drydock', 'destroy_node_query_interval') task_timeout = config.get('drydock', 'destroy_node_task_timeout') logging.info("Destroying node %s from cluster...", redeploy_server) time.sleep(15) logging.info("Successfully deleted node %s", redeploy_server) # TODO: Uncomment when the function to destroy/delete node is # ready for consumption in Drydock # self.drydock_action(drydock_client, context, self.action, # query_interval, task_timeout) # Do not perform any action else: logging.info('No Action to Perform')
def run_base(self, context): # Logs uuid of action performed by the Operator LOG.info("DryDock Operator for action %s", self.action_info['id']) # Skip workflow if health checks on Drydock failed and continue-on-fail # option is turned on if self.xcom_puller.get_check_drydock_continue_on_fail(): LOG.info( "Skipping %s as health checks on Drydock have " "failed and continue-on-fail option has been " "turned on", self.__class__.__name__) # Set continue processing to False self.continue_processing = False return # Retrieve information of the server that we want to redeploy if user # executes the 'redeploy_server' dag # Set node filter to be the server that we want to redeploy if self.action_info['dag_id'] == 'redeploy_server': self.redeploy_server = ( self.action_info['parameters']['server-name']) if self.redeploy_server: LOG.info("Server to be redeployed is %s", self.redeploy_server) self.node_filter = self.redeploy_server else: raise AirflowException('%s was unable to retrieve the ' 'server to be redeployed.' % self.__class__.__name__) # Retrieve Endpoint Information self.drydock_svc_endpoint = ucp_service_endpoint( self, svc_type=self.drydock_svc_type) LOG.info("Drydock endpoint is %s", self.drydock_svc_endpoint) # Parse DryDock Service Endpoint drydock_url = urlparse(self.drydock_svc_endpoint) # Build a DrydockSession with credentials and target host # information. # The DrydockSession will care for TCP connection pooling # and header management LOG.info("Build DryDock Session") dd_session = session.DrydockSession(drydock_url.hostname, port=drydock_url.port, auth_gen=self._auth_gen) # Raise Exception if we are not able to set up the session if dd_session: LOG.info("Successfully Set Up DryDock Session") else: raise AirflowException("Failed to set up Drydock Session!") # Use the DrydockSession to build a DrydockClient that can # be used to make one or more API calls LOG.info("Create DryDock Client") self.drydock_client = client.DrydockClient(dd_session) # Raise Exception if we are not able to build the client if self.drydock_client: LOG.info("Successfully Set Up DryDock client") else: raise AirflowException("Failed to set up Drydock Client!") # Retrieve DeckHand Endpoint Information deckhand_svc_endpoint = ucp_service_endpoint( self, svc_type=self.deckhand_svc_type) LOG.info("Deckhand endpoint is %s", deckhand_svc_endpoint) # Form DeckHand Design Reference Path # This URL will be used to retrieve the Site Design YAMLs deckhand_path = "deckhand+" + deckhand_svc_endpoint self.deckhand_design_ref = os.path.join(deckhand_path, "revisions", str(self.revision_id), "rendered-documents") if self.deckhand_design_ref: LOG.info("Design YAMLs will be retrieved from %s", self.deckhand_design_ref) else: raise AirflowException("Unable to Retrieve Design Reference!")