def test_run_lifecycle(self):
        # this is needed to ensure logging output appears in test context - see https://stackoverflow.com/questions/7472863/pydev-unittesting-how-to-capture-text-logged-to-a-logging-logger-in-captured-o
        stream_handler.stream = sys.stdout

        request_id = uuid.uuid4().hex
        handler = AnsibleRequestHandler(self.mock_messaging_service,
                                        self.mock_ansible_client)

        self.mock_ansible_client.run_lifecycle_playbook.return_value = LifecycleExecution(
            request_id, STATUS_COMPLETE, None, {'prop1': 'output__value1'})

        handler.handle_request({
            'lifecycle_name':
            'Install',
            'driver_files':
            DirectoryTree(self.tmp_workspace),
            'system_properties':
            PropValueMap({}),
            'resource_properties':
            PropValueMap({}),
            'deployment_location': {
                'properties': {
                    'testPropA': 'A'
                }
            },
            'request_id':
            request_id
        })

        self.check_response_only(
            LifecycleExecution(request_id, STATUS_COMPLETE, None,
                               {'prop1': 'output__value1'}))
 def get_result(self):
     if self.playbook_failed:
         return LifecycleExecution(self.request_id, STATUS_FAILED,
                                   self.failure_details, self.properties)
     else:
         return LifecycleExecution(self.request_id, STATUS_COMPLETE, None,
                                   self.properties)
Exemple #3
0
    def handle_request(self, request):
        try:
            if request is not None:
                if request.get('logging_context', None) is not None:
                    logging_context.set_from_dict(request['logging_context'])

                if 'request_id' not in request:
                    self.messaging_service.send_lifecycle_execution(
                        LifecycleExecution(
                            None, STATUS_FAILED,
                            FailureDetails(FAILURE_CODE_INTERNAL_ERROR,
                                           "Request must have a request_id"),
                            {}))
                if 'lifecycle_name' not in request:
                    self.messaging_service.send_lifecycle_execution(
                        LifecycleExecution(
                            request['request_id'], STATUS_FAILED,
                            FailureDetails(
                                FAILURE_CODE_INTERNAL_ERROR,
                                "Request must have a lifecycle_name"), {}))
                if 'driver_files' not in request:
                    self.messaging_service.send_lifecycle_execution(
                        LifecycleExecution(
                            request['request_id'], STATUS_FAILED,
                            FailureDetails(FAILURE_CODE_INTERNAL_ERROR,
                                           "Request must have a driver_files"),
                            {}))

                # run the playbook and send the response to the response queue
                logger.debug(
                    'Ansible worker running request {0}'.format(request))
                result = self.ansible_client.run_lifecycle_playbook(request)
                if result is not None:
                    logger.debug(
                        'Ansible worker finished for request {0}: {1}'.format(
                            request, result))
                    self.messaging_service.send_lifecycle_execution(result)
                else:
                    logger.warning(
                        "Empty response from Ansible worker for request {0}".
                        format(request))
            else:
                logger.warning('Null lifecycle request from request queue')
        except Exception as e:
            logger.error('Unexpected exception {0}'.format(e))
            traceback.print_exc(file=sys.stderr)
            # don't want the worker to die without knowing the cause, so catch all exceptions
            if request is not None:
                self.messaging_service.send_lifecycle_execution(
                    LifecycleExecution(
                        request['request_id'], STATUS_FAILED,
                        FailureDetails(FAILURE_CODE_INTERNAL_ERROR,
                                       "Unexpected exception: {0}".format(e)),
                        {}))
        finally:
            # clean up zombie processes (Ansible can leave these behind)
            for p in active_children():
                logger.debug("removed zombie process {0}".format(p.name))
 def __build_execution_response(self, stack, request_id):
     request_type, stack_id, operation_id = self.__split_request_id(
         request_id)
     stack_status = stack.get('stack_status', None)
     failure_details = None
     if request_type == CREATE_REQUEST_PREFIX:
         status = self.__determine_create_status(request_id, stack_id,
                                                 stack_status)
     else:
         status = self.__determine_delete_status(request_id, stack_id,
                                                 stack_status)
     if status == STATUS_FAILED:
         description = stack.get('stack_status_reason', None)
         failure_details = FailureDetails(FAILURE_CODE_INFRASTRUCTURE_ERROR,
                                          description)
         status_reason = stack.get('stack_status_reason', None)
     outputs = None
     associated_topology = None
     if request_type == CREATE_REQUEST_PREFIX:
         outputs_from_stack = stack.get('outputs', [])
         outputs = self.__translate_outputs_to_values_dict(
             outputs_from_stack)
     return LifecycleExecution(request_id,
                               status,
                               failure_details=failure_details,
                               outputs=outputs)
    def test_run_lifecycle_keep_files(self):
        # configure so that we can see logging from the code under test
        stream_handler = logging.StreamHandler(sys.stdout)
        logger.addHandler(stream_handler)
        try:
            request_id = uuid.uuid4().hex

            properties = PropValueMap({
                'hello_world_private_ip': {
                    'value': '10.220.217.113',
                    'type': 'string'
                },
                'ansible_ssh_user': {
                    'value': 'accanto',
                    'type': 'string'
                },
                'ansible_ssh_pass': {
                    'value': 'accanto',
                    'type': 'string'
                },
                'ansible_become_pass': {
                    'value': 'accanto',
                    'type': 'string'
                }
            })
            system_properties = PropValueMap({})

            cwd = os.getcwd()
            src = cwd + '/tests/resources/ansible'
            dst = cwd + '/tests/resources/ansible-copy'
            shutil.rmtree(dst, ignore_errors=True)
            shutil.copytree(src, dst)

            resp = self.ansible_client.run_lifecycle_playbook({
                'lifecycle_name':
                'install',
                'driver_files':
                DirectoryTree(dst),
                'system_properties':
                system_properties,
                'resource_properties':
                properties,
                'deployment_location': {
                    'name': 'winterfell',
                    'type': "type",
                    'properties': PropValueMap({})
                },
                'keep_files':
                True,
                'request_id':
                request_id
            })

            self.assertLifecycleExecutionEqual(
                resp,
                LifecycleExecution(request_id, STATUS_COMPLETE, None,
                                   {'msg': "hello there!"}))
            self.assertTrue(os.path.exists(dst))
        finally:
            logger.removeHandler(stream_handler)
    def test_run_lifecycle_missing_lifecycle_name(self):
        # this is needed to ensure logging output appears in test context - see https://stackoverflow.com/questions/7472863/pydev-unittesting-how-to-capture-text-logged-to-a-logging-logger-in-captured-o
        stream_handler.stream = sys.stdout

        request_id = uuid.uuid4().hex

        handler = AnsibleRequestHandler(self.mock_messaging_service,
                                        self.mock_ansible_client)
        handler.handle_request({
            'request_id':
            request_id,
            'driver_files':
            DirectoryTree(self.tmp_workspace),
            'system_properties':
            PropValueMap({}),
            'resource_properties':
            PropValueMap({}),
            'deployment_location':
            PropValueMap({})
        })
        self.check_response_only(
            LifecycleExecution(
                request_id, STATUS_FAILED,
                FailureDetails(FAILURE_CODE_INTERNAL_ERROR,
                               "Request must have a lifecycle_name"), {}))
Exemple #7
0
    def handle_request(self, request):
        try:
            partition = request.partition
            offset = request.offset
            request_as_dict = request.as_new_dict()
            request_id = request_as_dict.get('request_id', None)

            if 'lifecycle_name' not in request_as_dict or request_as_dict['lifecycle_name'] is None:
                msg = 'Lifecycle request for partition {0} offset {1} is missing lifecycle_name.'.format(partition, offset)
                logger.warning(msg)
                self.messaging_service.send_lifecycle_execution(LifecycleExecution(request_id, STATUS_FAILED, FailureDetails(FAILURE_CODE_INTERNAL_ERROR, msg), {}))
                return
            if 'driver_files' not in request_as_dict or request_as_dict['driver_files'] is None:
                msg = 'Lifecycle request for partition {0} offset {1} is missing driver_files.'.format(partition, offset)
                logger.warning(msg)
                self.messaging_service.send_lifecycle_execution(LifecycleExecution(request_id, STATUS_FAILED, FailureDetails(FAILURE_CODE_INTERNAL_ERROR, msg), {}))
                return
            if 'system_properties' not in request_as_dict or request_as_dict['system_properties'] is None:
                msg = 'Lifecycle request for partition {0} offset {1} is missing system_properties.'.format(partition, offset)
                logger.warning(msg)
                self.messaging_service.send_lifecycle_execution(LifecycleExecution(request_id, STATUS_FAILED, FailureDetails(FAILURE_CODE_INTERNAL_ERROR, msg), {}))
                return
            if 'resource_properties' not in request_as_dict or request_as_dict['resource_properties'] is None:
                msg = 'Lifecycle request for partition {0} offset {1} is missing resource_properties.'.format(partition, offset)
                logger.warning(msg)
                self.messaging_service.send_lifecycle_execution(LifecycleExecution(request_id, STATUS_FAILED, FailureDetails(FAILURE_CODE_INTERNAL_ERROR, msg), {}))
                return
            if 'request_properties' not in request_as_dict or request_as_dict['request_properties'] is None:
                msg = 'Lifecycle request for partition {0} offset {1} is missing request_properties.'.format(partition, offset)
                logger.warning(msg)
                self.messaging_service.send_lifecycle_execution(LifecycleExecution(request_id, STATUS_FAILED, FailureDetails(FAILURE_CODE_INTERNAL_ERROR, msg), {}))
                return
            if 'associated_topology' not in request_as_dict or request_as_dict['associated_topology'] is None:
                msg = 'Lifecycle request for partition {0} offset {1} is missing associated_topology.'.format(partition, offset)
                logger.warning(msg)
                self.messaging_service.send_lifecycle_execution(LifecycleExecution(request_id, STATUS_FAILED, FailureDetails(FAILURE_CODE_INTERNAL_ERROR, msg), {}))
                return
            if 'deployment_location' not in request_as_dict or request_as_dict['deployment_location'] is None:
                msg = 'Lifecycle request for partition {0} offset {1} is missing deployment_location.'.format(partition, offset)
                logger.warning(msg)
                self.messaging_service.send_lifecycle_execution(LifecycleExecution(request_id, STATUS_FAILED, FailureDetails(FAILURE_CODE_INTERNAL_ERROR, msg), {}))
                return

            file_name = '{0}'.format(str(uuid.uuid4()))
            request_as_dict['driver_files'] = self.driver_files_manager.build_tree(file_name, request_as_dict['driver_files'])
            request_as_dict['resource_properties'] = PropValueMap(request_as_dict['resource_properties'])
            request_as_dict['system_properties'] = PropValueMap(request_as_dict['system_properties'])
            request_as_dict['request_properties'] = PropValueMap(request_as_dict['request_properties'])
            request_as_dict['associated_topology'] = AssociatedTopology.from_dict(request_as_dict['associated_topology'])

            self.lifecycle_request_handler.handle_request(request_as_dict)
        except Exception as e:
            try:
                self.messaging_service.send_lifecycle_execution(LifecycleExecution(request.request_id, STATUS_FAILED, FailureDetails(FAILURE_CODE_INTERNAL_ERROR, str(e)), {}))
            except Exception as e:
                # just log this and carry on
                logger.exception('Caught exception sending lifecycle response for driver request {0} for topic {1} : {2}'.format(request.request_id, self.request_queue_config.topic.name, str(e)))
Exemple #8
0
 def job_handler(self, job_definition):
     if 'request_id' not in job_definition or job_definition[
             'request_id'] is None:
         logger.warning(
             'Job with {0} job type is missing request_id. This job has been discarded'
             .format(LIFECYCLE_EXECUTION_MONITOR_JOB_TYPE))
         return True
     if 'deployment_location' not in job_definition or job_definition[
             'deployment_location'] is None:
         logger.warning(
             'Job with {0} job type is missing deployment_location. This job has been discarded'
             .format(LIFECYCLE_EXECUTION_MONITOR_JOB_TYPE))
         return True
     request_id = job_definition['request_id']
     deployment_location = job_definition['deployment_location']
     try:
         lifecycle_execution_task = self.handler.get_lifecycle_execution(
             request_id, deployment_location)
     except RequestNotFoundError as e:
         logger.debug(
             'Request with ID {0} not found, the request will no longer be monitored'
             .format(request_id))
         return True
     except TemporaryResourceDriverError as e:
         logger.exception(
             'Temporary error occurred checking status of request with ID {0}. The job will be re-queued: {1}'
             .format(request_id, str(e)))
         return False
     except Exception as e:
         logger.exception(
             'Unexpected error occurred checking status of request with ID {0}. A failure response will be posted and the job will NOT be re-queued: {1}'
             .format(request_id, str(e)))
         lifecycle_execution_task = LifecycleExecution(
             request_id, STATUS_FAILED,
             FailureDetails(FAILURE_CODE_INTERNAL_ERROR, str(e)))
         self.lifecycle_messaging_service.send_lifecycle_execution(
             lifecycle_execution_task)
         return True
     status = lifecycle_execution_task.status
     if status in [STATUS_COMPLETE, STATUS_FAILED]:
         self.lifecycle_messaging_service.send_lifecycle_execution(
             lifecycle_execution_task)
         if hasattr(self.handler, 'post_lifecycle_response'):
             try:
                 logger.debug(
                     f'Calling post_lifecycle_response for request with ID: {0}'
                     .format(request_id))
                 self.handler.post_lifecycle_response(
                     request_id, deployment_location)
             except Exception as e:
                 logger.exception(
                     'Unexpected error occurred on post_lifecycle_response for request with ID {0}. This error has no impact on the response: {1}'
                     .format(request_id, str(e)))
         return True
     return False
    def ansible_missing_associated_topology_id_in_fact(self):
        # configure so that we can see logging from the code under test
        stream_handler = logging.StreamHandler(sys.stdout)
        logger.addHandler(stream_handler)

        try:
            request_id = uuid.uuid4().hex

            properties = PropValueMap({
                'hello_world_private_ip': {
                    'value': '10.220.217.113',
                    'type': 'string'
                },
                'ansible_ssh_user': {
                    'value': 'accanto',
                    'type': 'string'
                },
                'ansible_ssh_pass': {
                    'value': 'accanto',
                    'type': 'string'
                },
                'ansible_become_pass': {
                    'value': 'accanto',
                    'type': 'string'
                }
            })
            system_properties = PropValueMap({
            })
            
            dst = self.__copy_directory_tree(str(pathlib.Path(__file__).parent.absolute()) + '/../../resources/ansible_missing_associated_topology_id_in_fact')

            resp = self.ansible_client.run_lifecycle_playbook({
            'lifecycle_name': 'create',
            'driver_files': DirectoryTree(dst),
            'system_properties': system_properties,
            'resource_properties': properties,
            'deployment_location': {
                'name': 'winterfell',
                'type': "Kubernetes",
                'properties': PropValueMap({
                })
            },
            'keep_files': True,
            'request_id': request_id
            })
            
            self.assertLifecycleExecutionEqual(resp, LifecycleExecution(request_id, STATUS_COMPLETE, None, {}, None))
            self.assertTrue(os.path.exists(dst))
        finally:
            logger.removeHandler(stream_handler)
    def test_run_lifecycle_with_missing_inventory(self):
        # configure so that we can see logging from the code under test
        stream_handler = logging.StreamHandler(sys.stdout)
        logger.addHandler(stream_handler)
        try:
            request_id = uuid.uuid4().hex

            properties = PropValueMap({
                'hello_world_private_ip': {
                    'value': '10.220.217.113',
                    'type': 'string'
                },
                'ansible_ssh_user': {
                    'value': 'accanto',
                    'type': 'string'
                },
                'ansible_ssh_pass': {
                    'value': 'accanto',
                    'type': 'string'
                },
                'ansible_become_pass': {
                    'value': 'accanto',
                    'type': 'string'
                }
            })
            system_properties = PropValueMap({
            })

            dst = self.__copy_directory_tree(os.getcwd() + '/tests/resources/ansible-with-missing-inventory')

            resp = self.ansible_client.run_lifecycle_playbook({
            'lifecycle_name': 'install',
            'driver_files': DirectoryTree(dst),
            'system_properties': system_properties,
            'resource_properties': properties,
            'deployment_location': {
                'name': 'winterfell',
                'type': "Kubernetes",
                'properties': PropValueMap({
                })
            },
            'request_id': request_id
            })

            self.assertLifecycleExecutionEqual(resp, LifecycleExecution(request_id, STATUS_COMPLETE, None, {}))
            self.assertFalse(os.path.exists(dst))
        finally:
            logger.removeHandler(stream_handler)
 def get_lifecycle_execution(self, request_id, deployment_location):
     openstack_location = self.location_translator.from_deployment_location(
         deployment_location)
     heat_driver = openstack_location.heat_driver
     request_type, stack_id, operation_id = self.__split_request_id(
         request_id)
     try:
         stack = heat_driver.get_stack(stack_id)
     except StackNotFoundError as e:
         logger.debug('Stack not found: %s', stack_id)
         if request_type == DELETE_REQUEST_PREFIX:
             logger.debug(
                 'Stack not found on delete request, returning task as successful: %s',
                 stack_id)
             return LifecycleExecution(request_id, STATUS_COMPLETE)
         else:
             raise InfrastructureNotFoundError(str(e)) from e
     logger.debug('Retrieved stack: %s', stack)
     return self.__build_execution_response(stack, request_id)
    def test_run_lifecycle_with_malformed_associated_topology_in_playbook(self):
        # configure so that we can see logging from the code under test
        stream_handler = logging.StreamHandler(sys.stdout)
        logger.addHandler(stream_handler)
        try:
            request_id = uuid.uuid4().hex
            infrastructure_id_1 = uuid.uuid4().hex
            infrastructure_id_2 = uuid.uuid4().hex
            infrastructure_osp_type = 'Openstack'
            infrastructure_k8s_type = 'Kubernetes'

            properties = PropValueMap({
                'hello_world_private_ip': {
                    'value': '10.220.217.113',
                    'type': 'string'
                },
                'ansible_ssh_user': {
                    'value': 'accanto',
                    'type': 'string'
                },
                'ansible_ssh_pass': {
                    'value': 'accanto',
                    'type': 'string'
                },
                'ansible_become_pass': {
                    'value': 'accanto',
                    'type': 'string'
                }
            })
            system_properties = PropValueMap({
            })
            
            associated_topology = AssociatedTopology.from_dict({
                'apache1': {
                    'id': infrastructure_id_1,
                    'type': infrastructure_osp_type
                },
                'apache2': {
                    'id': infrastructure_id_2,
                    'type': infrastructure_k8s_type
                }

            })

            dst = self.__copy_directory_tree(str(pathlib.Path(__file__).parent.absolute()) + '/../../resources/ansible_with_malformed_associated_topology_in_playbook')

            resp = self.ansible_client.run_lifecycle_playbook({
            'lifecycle_name': 'adopt',
            'driver_files': DirectoryTree(dst),
            'system_properties': system_properties,
            'resource_properties': properties,
            'deployment_location': {
                'name': 'winterfell',
                'type': "Kubernetes",
                'properties': PropValueMap({
                })
            },
            'associated_topology': associated_topology,
            'keep_files': True,
            'request_id': request_id
            })

            self.assertLifecycleExecutionMatches(resp, LifecycleExecution(request_id, STATUS_FAILED, FailureDetails(FAILURE_CODE_INFRASTRUCTURE_ERROR, "task debug failed: {'msg': \"The task includes an option with an undefined variable. The error was: 'dict object' has no attribute 'wrong'"), {}))
            self.assertTrue(os.path.exists(dst))
        finally:
            logger.removeHandler(stream_handler)
    def test_run_lifecycle_return_associated_topology(self):
        # configure so that we can see logging from the code under test
        stream_handler = logging.StreamHandler(sys.stdout)
        logger.addHandler(stream_handler)

        try:
            request_id = uuid.uuid4().hex

            properties = PropValueMap({
                'hello_world_private_ip': {
                    'value': '10.220.217.113',
                    'type': 'string'
                },
                'ansible_ssh_user': {
                    'value': 'accanto',
                    'type': 'string'
                },
                'ansible_ssh_pass': {
                    'value': 'accanto',
                    'type': 'string'
                },
                'ansible_become_pass': {
                    'value': 'accanto',
                    'type': 'string'
                }
            })
            system_properties = PropValueMap({
            })
            
            dst = self.__copy_directory_tree(str(pathlib.Path(__file__).parent.absolute()) + '/../../resources/ansible_returning_associated_topology_and_outputs')

            resp = self.ansible_client.run_lifecycle_playbook({
            'lifecycle_name': 'create',
            'driver_files': DirectoryTree(dst),
            'system_properties': system_properties,
            'resource_properties': properties,
            'deployment_location': {
                'name': 'winterfell',
                'type': "Kubernetes",
                'properties': PropValueMap({
                })
            },
            'keep_files': True,
            'request_id': request_id
            })
            
            associated_topology = AssociatedTopology.from_dict({
                'apache1': {
                    'id': '12345678',
                    'type': 'Openstack'
                },
                'apache2': {
                    'id': '910111213',
                    'type': 'Openstack'
                }
            })

            self.assertLifecycleExecutionEqual(resp, LifecycleExecution(request_id, STATUS_COMPLETE, None, {'msg': "Associated topology returned", 'public_ip': "10.21.28.94", 'internal_ip': "10.10.10.42"}, associated_topology))
            self.assertTrue(os.path.exists(dst))
        finally:
            logger.removeHandler(stream_handler)
    def run_lifecycle_playbook(self, request):
        driver_files = request['driver_files']
        key_property_processor = None

        try:
            request_id = request['request_id']
            lifecycle = request['lifecycle_name']
            properties = request['resource_properties']
            system_properties = request['system_properties']
            deployment_location = request['deployment_location']
            if not isinstance(deployment_location, dict):
                return LifecycleExecution(
                    request_id, STATUS_FAILED,
                    FailureDetails(FAILURE_CODE_INTERNAL_ERROR,
                                   "Deployment Location must be an object"),
                    {})
            dl_properties = PropValueMap(
                deployment_location.get('properties', {}))

            config_path = driver_files.get_directory_tree('config')
            scripts_path = driver_files.get_directory_tree('scripts')

            key_property_processor = KeyPropertyProcessor(
                properties, system_properties, dl_properties)

            playbook_path = get_lifecycle_playbook_path(
                scripts_path, lifecycle)
            if playbook_path is not None:
                if not os.path.exists(playbook_path):
                    return LifecycleExecution(
                        request_id, STATUS_FAILED,
                        FailureDetails(FAILURE_CODE_INTERNAL_ERROR,
                                       "Playbook path does not exist"), {})

                if deployment_location.get('type') == 'Kubernetes':
                    dl_properties['kubeconfig_path'] = self.create_kube_config(
                        deployment_location)
                    connection_type = "k8s"
                    inventory_path = config_path.get_file_path(INVENTORY_K8S)
                else:
                    connection_type = "ssh"
                    inventory_path = config_path.get_file_path(INVENTORY)

                # process key properties by writing them out to a temporary file and adding an
                # entry to the property dictionary that maps the "[key_name].path" to the key file path
                key_property_processor.process_key_properties()

                logger.debug('config_path = ' + config_path.get_path())
                logger.debug('driver_files = ' + scripts_path.get_path())
                logger.debug("playbook_path=" + playbook_path)
                logger.debug("inventory_path=" + inventory_path)

                all_properties = {
                    'properties': properties,
                    'system_properties': system_properties,
                    'dl_properties': dl_properties
                }

                process_templates(config_path, all_properties)

                # always retry on unreachable
                num_retries = self.ansible_properties.max_unreachable_retries

                for i in range(0, num_retries):
                    if i > 0:
                        logger.debug(
                            'Playbook {0}, unreachable retry attempt {1}/{2}'.
                            format(playbook_path, i + 1, num_retries))
                    start_time = datetime.now()
                    ret = self.run_playbook(request_id, connection_type,
                                            inventory_path, playbook_path,
                                            lifecycle, all_properties)
                    if not ret.host_unreachable:
                        break
                    end_time = datetime.now()
                    if self.ansible_properties.unreachable_sleep_seconds > 0:
                        # Factor in that the playbook may have taken some time to determine is was unreachable
                        # by using the unreachable_sleep_seconds value as a minimum amount of time for the delay
                        delta = end_time - start_time
                        retry_seconds = max(
                            0,
                            self.ansible_properties.unreachable_sleep_seconds -
                            int(delta.total_seconds()))
                        time.sleep(retry_seconds)

                return ret.get_result()
            else:
                msg = "No playbook to run at {0} for lifecycle {1} for request {2}".format(
                    playbook_path, lifecycle, request_id)
                logger.debug(msg)
                return LifecycleExecution(
                    request_id, STATUS_FAILED,
                    FailureDetails(FAILURE_CODE_INTERNAL_ERROR, msg), {})
        except InvalidRequestException as ire:
            return LifecycleExecution(
                request_id, STATUS_FAILED,
                FailureDetails(FAILURE_CODE_INTERNAL_ERROR, ire.msg), {})
        except Exception as e:
            logger.exception("Unexpected exception running playbook")
            return LifecycleExecution(
                request_id, STATUS_FAILED,
                FailureDetails(FAILURE_CODE_INTERNAL_ERROR,
                               "Unexpected exception: {0}".format(e)), {})
        finally:
            if key_property_processor is not None:
                key_property_processor.clear_key_files()

            keep_files = request.get('keep_files', False)
            if not keep_files and driver_files is not None:
                try:
                    logger.debug(
                        'Attempting to remove lifecycle scripts at {0}'.format(
                            driver_files.root_path))
                    driver_files.remove_all()
                except Exception as e:
                    logger.exception(
                        'Encountered an error whilst trying to clear out lifecycle scripts directory {0}: {1}'
                        .format(driver_files.root_path, str(e)))
    def run_lifecycle_playbook(self, request):
        driver_files = request['driver_files']
        key_property_processor = None
        location = None

        try:
            request_id = request['request_id']
            lifecycle = request['lifecycle_name']
            resource_properties = request.get('resource_properties', {})
            system_properties = request.get('system_properties', {})
            request_properties = request.get('request_properties', {})
            associated_topology = request.get('associated_topology', None)

            location = DeploymentLocation.from_request(request)

            config_path = driver_files.get_directory_tree('config')
            scripts_path = driver_files.get_directory_tree('scripts')

            key_property_processor = KeyPropertyProcessor(
                resource_properties, system_properties, location.properties())

            playbook_path = get_lifecycle_playbook_path(
                scripts_path, lifecycle)
            if playbook_path is not None:
                if not os.path.exists(playbook_path):
                    return LifecycleExecution(
                        request_id, STATUS_FAILED,
                        FailureDetails(FAILURE_CODE_INTERNAL_ERROR,
                                       "Playbook path does not exist"), {})

                inventory = Inventory(driver_files,
                                      location.infrastructure_type)

                # process key properties by writing them out to a temporary file and adding an
                # entry to the property dictionary that maps the "[key_name].path" to the key file path
                key_property_processor.process_key_properties()

                logger.debug(
                    f'Handling request {request_id} with config_path: {config_path.get_path()} driver files path: {scripts_path.get_path()} resource properties: {resource_properties} system properties {system_properties} request properties {request_properties}'
                )

                all_properties = self.render_context_service.build(
                    system_properties, resource_properties, request_properties,
                    location.deployment_location(), associated_topology)

                process_templates(config_path, self.templating, all_properties)

                # always retry on unreachable
                num_retries = self.ansible_properties.max_unreachable_retries

                for i in range(0, num_retries):
                    if i > 0:
                        logger.debug(
                            'Playbook {0}, unreachable retry attempt {1}/{2}'.
                            format(playbook_path, i + 1, num_retries))
                    start_time = datetime.now()
                    ret = self.run_playbook(request_id,
                                            location.connection_type,
                                            inventory.get_inventory_path(),
                                            playbook_path, lifecycle,
                                            all_properties)
                    if not ret.host_unreachable:
                        break
                    end_time = datetime.now()
                    if self.ansible_properties.unreachable_sleep_seconds > 0:
                        # Factor in that the playbook may have taken some time to determine is was unreachable
                        # by using the unreachable_sleep_seconds value as a minimum amount of time for the delay
                        delta = end_time - start_time
                        retry_seconds = max(
                            0,
                            self.ansible_properties.unreachable_sleep_seconds -
                            int(delta.total_seconds()))
                        time.sleep(retry_seconds)

                return ret.get_result()
            else:
                msg = "No playbook to run at {0} for lifecycle {1} for request {2}".format(
                    playbook_path, lifecycle, request_id)
                logger.debug(msg)
                return LifecycleExecution(
                    request_id, STATUS_FAILED,
                    FailureDetails(FAILURE_CODE_INTERNAL_ERROR, msg), {})
        except InvalidRequestException as ire:
            return LifecycleExecution(
                request_id, STATUS_FAILED,
                FailureDetails(FAILURE_CODE_INTERNAL_ERROR, ire.msg), {})
        except Exception as e:
            logger.exception("Unexpected exception running playbook")
            return LifecycleExecution(
                request_id, STATUS_FAILED,
                FailureDetails(FAILURE_CODE_INTERNAL_ERROR,
                               "Unexpected exception: {0}".format(e)), {})
        finally:
            if location is not None:
                location.cleanup()

            if key_property_processor is not None:
                key_property_processor.clear_key_files()

            keep_files = request.get('keep_files', False)
            if not keep_files and driver_files is not None:
                try:
                    logger.debug(
                        'Attempting to remove lifecycle scripts at {0}'.format(
                            driver_files.root_path))
                    driver_files.remove_all()
                except Exception as e:
                    logger.exception(
                        'Encountered an error whilst trying to clear out lifecycle scripts directory {0}: {1}'
                        .format(driver_files.root_path, str(e)))
    def test_run_lifecycle_with_input_associated_topology(self):
        # configure so that we can see logging from the code under test
        stream_handler = logging.StreamHandler(sys.stdout)
        logger.addHandler(stream_handler)
        try:
            request_id = uuid.uuid4().hex
            infrastructure_id_1 = uuid.uuid4().hex
            infrastructure_id_2 = uuid.uuid4().hex
            infrastructure_osp_type = 'Openstack'
            infrastructure_k8s_type = 'Kubernetes'

            properties = PropValueMap({
                'hello_world_private_ip': {
                    'value': '10.220.217.113',
                    'type': 'string'
                },
                'ansible_ssh_user': {
                    'value': 'accanto',
                    'type': 'string'
                },
                'ansible_ssh_pass': {
                    'value': 'accanto',
                    'type': 'string'
                },
                'ansible_become_pass': {
                    'value': 'accanto',
                    'type': 'string'
                }
            })
            system_properties = PropValueMap({
            })
            
            associated_topology = AssociatedTopology.from_dict({
                'apache1': {
                    'id': infrastructure_id_1,
                    'type': infrastructure_osp_type
                },
                'apache2': {
                    'id': infrastructure_id_2,
                    'type': infrastructure_k8s_type
                }

            })

            dst = self.__copy_directory_tree(str(pathlib.Path(__file__).parent.absolute()) + '/../../resources/ansible_input_associated_topology')

            resp = self.ansible_client.run_lifecycle_playbook({
            'lifecycle_name': 'adopt',
            'driver_files': DirectoryTree(dst),
            'system_properties': system_properties,
            'resource_properties': properties,
            'deployment_location': {
                'name': 'winterfell',
                'type': "Kubernetes",
                'properties': PropValueMap({
                })
            },
            'associated_topology': associated_topology,
            'keep_files': True,
            'request_id': request_id
            })

            self.assertLifecycleExecutionEqual(resp, LifecycleExecution(request_id, STATUS_COMPLETE, None, {'msg': "hello there!"}))
            self.assertTrue(os.path.exists(dst))
        finally:
            logger.removeHandler(stream_handler)
    def test_run_lifecycle_with_outputs_of_different_types(self):
        stream_handler = logging.StreamHandler(sys.stdout)
        logger.addHandler(stream_handler)
        try:
            request_id = uuid.uuid4().hex

            properties = PropValueMap({
                'hello_world_private_ip': {
                    'value': '10.220.217.113',
                    'type': 'string'
                },
                'ansible_ssh_user': {
                    'value': 'accanto',
                    'type': 'string'
                },
                'ansible_ssh_pass': {
                    'value': 'accanto',
                    'type': 'string'
                },
                'ansible_become_pass': {
                    'value': 'accanto',
                    'type': 'string'
                }
            })
            system_properties = PropValueMap({
            })

            dst = self.__copy_directory_tree(str(pathlib.Path(__file__).parent.absolute()) + '/../../resources/ansible_outputs')

            resp = self.ansible_client.run_lifecycle_playbook({
            'lifecycle_name': 'install',
            'driver_files': DirectoryTree(dst),
            'system_properties': system_properties,
            'resource_properties': properties,
            'deployment_location': {
                'name': 'winterfell',
                'type': "Kubernetes",
                'properties': PropValueMap({
                })
            },
            'request_id': request_id
            })

            expected_outputs = {
                'string_prop': 'Hello',
                'int_prop': 1,
                'float_prop': 1.2,
                'bool_prop': True,
                'timestamp_prop': '2020-11-23T11:49:33.308703Z',
                'map_prop': {     
                    'A': 'ValueA',
                    'B': 123
                },
                'list_prop': ['A', 'B'],
                'custom_type_prop': {
                    'name': 'Testing',
                    'age': 42
                }
            }

            self.assertLifecycleExecutionEqual(resp, LifecycleExecution(request_id, STATUS_COMPLETE, None, expected_outputs))
            self.assertFalse(os.path.exists(dst))
        finally:
            logger.removeHandler(stream_handler)
    def test_run_lifecycle_with_kubernetes_inventory(self):
        # configure so that we can see logging from the code under test
        stream_handler = logging.StreamHandler(sys.stdout)
        logger.addHandler(stream_handler)
        try:
            request_id = uuid.uuid4().hex

            properties = PropValueMap({
                'hello_world_private_ip': {
                    'value': '10.220.217.113',
                    'type': 'string'
                },
                'ansible_ssh_user': {
                    'value': 'accanto',
                    'type': 'string'
                },
                'ansible_ssh_pass': {
                    'value': 'accanto',
                    'type': 'string'
                },
                'ansible_become_pass': {
                    'value': 'accanto',
                    'type': 'string'
                },
                'bool_prop': {
                    'value': True,
                    'type': 'boolean'
                },
                'int_prop': {
                    'value': 123,
                    'type': 'integer'
                },
                'float_prop': {
                    'value': 1.2,
                    'type': 'float'
                },
                'timestamp_prop': {
                    'value': '2020-11-23T11:49:33.308703Z',
                    'type': 'timestamp'
                },
                'map_prop': {
                    'value': {
                        'A': 1,
                        'B': 'A string'
                    },
                    'type': 'map'
                },
                'list_prop': {
                    'value': ['a', 'b', 'c'],
                    'type': 'list'
                },
                'custom_type_prop': {
                    'value': {
                        'name': 'Testing',
                        'age': 42
                    },
                    'type': 'MyCustomType'
                }
            })
            system_properties = PropValueMap({
            })

            dst = self.__copy_directory_tree(str(pathlib.Path(__file__).parent.absolute()) + '/../../resources/ansible')

            resp = self.ansible_client.run_lifecycle_playbook({
            'lifecycle_name': 'install',
            'driver_files': DirectoryTree(dst),
            'system_properties': system_properties,
            'resource_properties': properties,
            'deployment_location': {
                'name': 'winterfell',
                'type': "Kubernetes",
                'properties': PropValueMap({
                })
            },
            'request_id': request_id
            })

            self.assertLifecycleExecutionEqual(resp, LifecycleExecution(request_id, STATUS_COMPLETE, None, {'msg': "hello there!"}))
            self.assertFalse(os.path.exists(dst))
        finally:
            logger.removeHandler(stream_handler)