Example #1
0
    def handle_request(self, request):
        try:
            partition = request.partition
            offset = request.offset
            request_as_dict = request.as_new_dict()
            request_id = request_as_dict.get('request_id', None)

            if 'lifecycle_name' not in request_as_dict or request_as_dict['lifecycle_name'] is None:
                msg = 'Lifecycle request for partition {0} offset {1} is missing lifecycle_name.'.format(partition, offset)
                logger.warning(msg)
                self.messaging_service.send_lifecycle_execution(LifecycleExecution(request_id, STATUS_FAILED, FailureDetails(FAILURE_CODE_INTERNAL_ERROR, msg), {}))
                return
            if 'driver_files' not in request_as_dict or request_as_dict['driver_files'] is None:
                msg = 'Lifecycle request for partition {0} offset {1} is missing driver_files.'.format(partition, offset)
                logger.warning(msg)
                self.messaging_service.send_lifecycle_execution(LifecycleExecution(request_id, STATUS_FAILED, FailureDetails(FAILURE_CODE_INTERNAL_ERROR, msg), {}))
                return
            if 'system_properties' not in request_as_dict or request_as_dict['system_properties'] is None:
                msg = 'Lifecycle request for partition {0} offset {1} is missing system_properties.'.format(partition, offset)
                logger.warning(msg)
                self.messaging_service.send_lifecycle_execution(LifecycleExecution(request_id, STATUS_FAILED, FailureDetails(FAILURE_CODE_INTERNAL_ERROR, msg), {}))
                return
            if 'resource_properties' not in request_as_dict or request_as_dict['resource_properties'] is None:
                msg = 'Lifecycle request for partition {0} offset {1} is missing resource_properties.'.format(partition, offset)
                logger.warning(msg)
                self.messaging_service.send_lifecycle_execution(LifecycleExecution(request_id, STATUS_FAILED, FailureDetails(FAILURE_CODE_INTERNAL_ERROR, msg), {}))
                return
            if 'request_properties' not in request_as_dict or request_as_dict['request_properties'] is None:
                msg = 'Lifecycle request for partition {0} offset {1} is missing request_properties.'.format(partition, offset)
                logger.warning(msg)
                self.messaging_service.send_lifecycle_execution(LifecycleExecution(request_id, STATUS_FAILED, FailureDetails(FAILURE_CODE_INTERNAL_ERROR, msg), {}))
                return
            if 'associated_topology' not in request_as_dict or request_as_dict['associated_topology'] is None:
                msg = 'Lifecycle request for partition {0} offset {1} is missing associated_topology.'.format(partition, offset)
                logger.warning(msg)
                self.messaging_service.send_lifecycle_execution(LifecycleExecution(request_id, STATUS_FAILED, FailureDetails(FAILURE_CODE_INTERNAL_ERROR, msg), {}))
                return
            if 'deployment_location' not in request_as_dict or request_as_dict['deployment_location'] is None:
                msg = 'Lifecycle request for partition {0} offset {1} is missing deployment_location.'.format(partition, offset)
                logger.warning(msg)
                self.messaging_service.send_lifecycle_execution(LifecycleExecution(request_id, STATUS_FAILED, FailureDetails(FAILURE_CODE_INTERNAL_ERROR, msg), {}))
                return

            file_name = '{0}'.format(str(uuid.uuid4()))
            request_as_dict['driver_files'] = self.driver_files_manager.build_tree(file_name, request_as_dict['driver_files'])
            request_as_dict['resource_properties'] = PropValueMap(request_as_dict['resource_properties'])
            request_as_dict['system_properties'] = PropValueMap(request_as_dict['system_properties'])
            request_as_dict['request_properties'] = PropValueMap(request_as_dict['request_properties'])
            request_as_dict['associated_topology'] = AssociatedTopology.from_dict(request_as_dict['associated_topology'])

            self.lifecycle_request_handler.handle_request(request_as_dict)
        except Exception as e:
            try:
                self.messaging_service.send_lifecycle_execution(LifecycleExecution(request.request_id, STATUS_FAILED, FailureDetails(FAILURE_CODE_INTERNAL_ERROR, str(e)), {}))
            except Exception as e:
                # just log this and carry on
                logger.exception('Caught exception sending lifecycle response for driver request {0} for topic {1} : {2}'.format(request.request_id, self.request_queue_config.topic.name, str(e)))
 def test_execute_lifecycle_removes_files(self):
     self.mock_heat_driver.create_stack.return_value = '1'
     driver = ResourceDriverHandler(
         self.mock_location_translator,
         resource_driver_config=self.resource_driver_config,
         heat_translator_service=self.mock_heat_translator,
         tosca_discovery_service=self.mock_tosca_discover_service)
     result = driver.execute_lifecycle('Create', self.heat_driver_files,
                                       self.system_properties,
                                       self.resource_properties, {},
                                       AssociatedTopology(),
                                       self.deployment_location)
     self.assertFalse(os.path.exists(self.heat_driver_files.root_path))
Example #3
0
 def __created_associated_topology(self, adopt=False, stack_id=''):
     associated_topology = AssociatedTopology()
     if adopt == True:
         associated_topology.add_entry(stack_id, stack_id, 'Openstack')
     else:
         associated_topology.add_entry('InfrastructureStack', '1',
                                       'Openstack')
     return associated_topology
Example #4
0
    def v2_runner_on_ok(self, result, *args, **kwargs):
        """
        Called when task execution completes (called for each host the task executes against)
        Note: even when a loop is used (so v2_runner_item_on_ok is called for each successful item) this function is called at the end, when all items have succeeded
        """
        logger.debug('v2_runner_on_ok: {0}'.format(result))

        props = []
        if 'results' in result._result.keys():
            self.facts = result._result['results']
            props = [
                item['ansible_facts'] for item in self.facts
                if 'ansible_facts' in item
            ]
        else:
            self.facts = result._result
            if 'ansible_facts' in self.facts:
                props = [self.facts['ansible_facts']]

        for prop in props:
            for key, value in prop.items():
                if key.startswith(self.ansible_properties.output_prop_prefix):
                    output_facts = {
                        key[len(self.ansible_properties.output_prop_prefix):]:
                        value
                    }
                    logger.debug('output props = {0}'.format(output_facts))
                    self.properties.update(output_facts)
                elif key == 'associated_topology':
                    try:
                        logger.info('associated_topology = {0}'.format(
                            associated_topology))
                        self.associated_topology = AssociatedTopology.from_dict(
                            value)
                    except ValueError as ve:
                        self.failure_reason = f'An error has occurred while parsing the ansible fact \'{key}\'. {ve}'
                        self.failure_details = FailureDetails(
                            FAILURE_CODE_INFRASTRUCTURE_ERROR,
                            self.failure_reason)
                        self.playbook_failed = True
                    except Exception as e:
                        self.failure_reason = f'An internal error has occurred. {e}'
                        self.failure_details = FailureDetails(
                            FAILURE_CODE_INFRASTRUCTURE_ERROR,
                            self.failure_reason)
                        self.playbook_failed = True
        self._log_event_for_ok_task(result)
 def test_create_infrastructure_with_invalid_tosca_template_throws_error(
         self):
     self.mock_heat_translator.generate_heat_template.side_effect = ToscaValidationError(
         'Validation error')
     driver = ResourceDriverHandler(
         self.mock_location_translator,
         resource_driver_config=self.resource_driver_config,
         heat_translator_service=self.mock_heat_translator,
         tosca_discovery_service=self.mock_tosca_discover_service)
     with self.assertRaises(InvalidDriverFilesError) as context:
         driver.execute_lifecycle('Create', self.tosca_driver_files,
                                  self.system_properties,
                                  self.resource_properties,
                                  {'template-type': 'TOSCA'},
                                  AssociatedTopology(),
                                  self.deployment_location)
     self.assertEqual(str(context.exception), 'Validation error')
 def test_create_infrastructure_with_invalid_template_type_throws_error(
         self):
     request_properties = {'template-type': 'YAML'}
     driver = ResourceDriverHandler(
         self.mock_location_translator,
         resource_driver_config=self.resource_driver_config,
         heat_translator_service=self.mock_heat_translator,
         tosca_discovery_service=self.mock_tosca_discover_service)
     with self.assertRaises(InvalidDriverFilesError) as context:
         driver.execute_lifecycle('Create', self.tosca_driver_files,
                                  self.system_properties,
                                  self.resource_properties,
                                  request_properties, AssociatedTopology(),
                                  self.deployment_location)
     self.assertEqual(
         str(context.exception),
         'Cannot create using template of type \'YAML\'. Must be one of: [\'TOSCA\', \'HEAT\']'
     )
 def test_create_infrastructure_uses_stack_name_creator(
         self, mock_stack_name_creator):
     self.mock_heat_driver.create_stack.return_value = '1'
     driver = ResourceDriverHandler(
         self.mock_location_translator,
         resource_driver_config=self.resource_driver_config,
         heat_translator_service=self.mock_heat_translator,
         tosca_discovery_service=self.mock_tosca_discover_service)
     result = driver.execute_lifecycle('Create', self.heat_driver_files,
                                       self.system_properties,
                                       self.resource_properties, {},
                                       AssociatedTopology(),
                                       self.deployment_location)
     mock_stack_name_creator_inst = mock_stack_name_creator.return_value
     mock_stack_name_creator_inst.create.assert_called_once_with(
         '123', 'TestResource')
     self.mock_heat_driver.create_stack.assert_called_once_with(
         mock_stack_name_creator_inst.create.return_value,
         self.heat_template, {'propA': 'valueA'})
 def test_create_infrastructure(self):
     self.mock_heat_driver.create_stack.return_value = '1'
     driver = ResourceDriverHandler(
         self.mock_location_translator,
         resource_driver_config=self.resource_driver_config,
         heat_translator_service=self.mock_heat_translator,
         tosca_discovery_service=self.mock_tosca_discover_service)
     result = driver.execute_lifecycle('Create', self.heat_driver_files,
                                       self.system_properties,
                                       self.resource_properties, {},
                                       AssociatedTopology(),
                                       self.deployment_location)
     self.assertIsInstance(result, LifecycleExecuteResponse)
     self.assert_request_id(result.request_id, 'Create', '1')
     self.assert_internal_resource(result.associated_topology, '1')
     self.mock_location_translator.from_deployment_location.assert_called_once_with(
         self.deployment_location)
     self.mock_heat_driver.create_stack.assert_called_once_with(
         ANY, self.heat_template, {'propA': 'valueA'})
 def test_create_infrastructure_with_not_found_stack_id(self):
     self.mock_heat_driver.get_stack.side_effect = StackNotFoundError(
         'Existing stack not found')
     driver = ResourceDriverHandler(
         self.mock_location_translator,
         resource_driver_config=self.resource_driver_config,
         heat_translator_service=self.mock_heat_translator,
         tosca_discovery_service=self.mock_tosca_discover_service)
     self.resource_properties['stack_id'] = {
         'type': 'string',
         'value': 'MY_STACK_ID'
     }
     with self.assertRaises(InfrastructureNotFoundError) as context:
         driver.execute_lifecycle('Create', self.heat_driver_files,
                                  self.system_properties,
                                  self.resource_properties, {},
                                  AssociatedTopology(),
                                  self.deployment_location)
     self.assertEqual(str(context.exception), 'Existing stack not found')
Example #10
0
 def execute_lifecycle(self, lifecycle_name, driver_files,
                       system_properties, resource_properties,
                       request_properties, associated_topology,
                       deployment_location):
     if self.async_requests_enabled:
         request_id = str(uuid.uuid4())
         self.lifecycle_request_queue.queue_lifecycle_request({
             'request_id':
             request_id,
             'lifecycle_name':
             lifecycle_name,
             'driver_files':
             driver_files,
             'system_properties':
             system_properties,
             'resource_properties':
             resource_properties,
             'request_properties':
             request_properties,
             'associated_topology':
             associated_topology,
             'deployment_location':
             deployment_location,
             'logging_context':
             dict(logging_context.get_all())
         })
         execute_response = LifecycleExecuteResponse(request_id)
     else:
         file_name = '{0}'.format(str(uuid.uuid4()))
         driver_files_tree = self.driver_files_manager.build_tree(
             file_name, driver_files)
         associated_topology = AssociatedTopology.from_dict(
             associated_topology)
         execute_response = self.handler.execute_lifecycle(
             lifecycle_name, driver_files_tree,
             PropValueMap(system_properties),
             PropValueMap(resource_properties),
             PropValueMap(request_properties), associated_topology,
             deployment_location)
         if self.async_enabled is True:
             self.__async_lifecycle_execution_completion(
                 execute_response.request_id, deployment_location)
     return execute_response
 def test_create_infrastructure_uses_system_prop(self):
     self.mock_heat_input_utils.filter_used_properties.return_value = {
         'system_resourceId': '123'
     }
     self.mock_heat_driver.create_stack.return_value = '1'
     driver = ResourceDriverHandler(
         self.mock_location_translator,
         resource_driver_config=self.resource_driver_config,
         heat_translator_service=self.mock_heat_translator,
         tosca_discovery_service=self.mock_tosca_discover_service)
     result = driver.execute_lifecycle('Create', self.heat_driver_files,
                                       self.system_properties,
                                       self.resource_properties, {},
                                       AssociatedTopology(),
                                       self.deployment_location)
     self.mock_heat_input_utils.filter_used_properties.assert_called_once_with(
         self.heat_template,
         PropValueMap({
             'propA': {
                 'type': 'string',
                 'value': 'valueA'
             },
             'propB': {
                 'type': 'string',
                 'value': 'valueB'
             },
             'system_resourceId': {
                 'type': 'string',
                 'value': '123'
             },
             'system_resourceName': {
                 'type': 'string',
                 'value': 'TestResource'
             }
         }))
     self.mock_heat_driver.create_stack.assert_called_once_with(
         ANY, self.heat_template, {'system_resourceId': '123'})
 def test_create_infrastructure_with_stack_id_input(self):
     driver = ResourceDriverHandler(
         self.mock_location_translator,
         resource_driver_config=self.resource_driver_config,
         heat_translator_service=self.mock_heat_translator,
         tosca_discovery_service=self.mock_tosca_discover_service)
     self.resource_properties['stack_id'] = {
         'type': 'string',
         'value': 'MY_STACK_ID'
     }
     result = driver.execute_lifecycle('Create', self.heat_driver_files,
                                       self.system_properties,
                                       self.resource_properties, {},
                                       AssociatedTopology(),
                                       self.deployment_location)
     self.assertIsInstance(result, LifecycleExecuteResponse)
     self.assert_request_id(result.request_id, 'Create', 'MY_STACK_ID')
     self.assert_internal_resource(result.associated_topology,
                                   'MY_STACK_ID')
     self.mock_heat_translator.generate_heat_template.assert_not_called()
     self.mock_heat_driver.create_stack.assert_not_called()
     self.mock_location_translator.from_deployment_location.assert_called_once_with(
         self.deployment_location)
     self.mock_heat_driver.get_stack.assert_called_once_with('MY_STACK_ID')
 def __created_associated_topology(self):
     associated_topology = AssociatedTopology()
     associated_topology.add_entry('InfrastructureStack', '1', 'Openstack')
     return associated_topology
 def __build_associated_topology_response(self, stack_id):
     associated_topology = AssociatedTopology()
     associated_topology.add_entry(STACK_NAME, stack_id,
                                   STACK_RESOURCE_TYPE)
     return associated_topology
    def test_run_lifecycle_with_input_associated_topology(self):
        # configure so that we can see logging from the code under test
        stream_handler = logging.StreamHandler(sys.stdout)
        logger.addHandler(stream_handler)
        try:
            request_id = uuid.uuid4().hex
            infrastructure_id_1 = uuid.uuid4().hex
            infrastructure_id_2 = uuid.uuid4().hex
            infrastructure_osp_type = 'Openstack'
            infrastructure_k8s_type = 'Kubernetes'

            properties = PropValueMap({
                'hello_world_private_ip': {
                    'value': '10.220.217.113',
                    'type': 'string'
                },
                'ansible_ssh_user': {
                    'value': 'accanto',
                    'type': 'string'
                },
                'ansible_ssh_pass': {
                    'value': 'accanto',
                    'type': 'string'
                },
                'ansible_become_pass': {
                    'value': 'accanto',
                    'type': 'string'
                }
            })
            system_properties = PropValueMap({
            })
            
            associated_topology = AssociatedTopology.from_dict({
                'apache1': {
                    'id': infrastructure_id_1,
                    'type': infrastructure_osp_type
                },
                'apache2': {
                    'id': infrastructure_id_2,
                    'type': infrastructure_k8s_type
                }

            })

            dst = self.__copy_directory_tree(str(pathlib.Path(__file__).parent.absolute()) + '/../../resources/ansible_input_associated_topology')

            resp = self.ansible_client.run_lifecycle_playbook({
            'lifecycle_name': 'adopt',
            'driver_files': DirectoryTree(dst),
            'system_properties': system_properties,
            'resource_properties': properties,
            'deployment_location': {
                'name': 'winterfell',
                'type': "Kubernetes",
                'properties': PropValueMap({
                })
            },
            'associated_topology': associated_topology,
            'keep_files': True,
            'request_id': request_id
            })

            self.assertLifecycleExecutionEqual(resp, LifecycleExecution(request_id, STATUS_COMPLETE, None, {'msg': "hello there!"}))
            self.assertTrue(os.path.exists(dst))
        finally:
            logger.removeHandler(stream_handler)
    def test_run_lifecycle_return_associated_topology(self):
        # configure so that we can see logging from the code under test
        stream_handler = logging.StreamHandler(sys.stdout)
        logger.addHandler(stream_handler)

        try:
            request_id = uuid.uuid4().hex

            properties = PropValueMap({
                'hello_world_private_ip': {
                    'value': '10.220.217.113',
                    'type': 'string'
                },
                'ansible_ssh_user': {
                    'value': 'accanto',
                    'type': 'string'
                },
                'ansible_ssh_pass': {
                    'value': 'accanto',
                    'type': 'string'
                },
                'ansible_become_pass': {
                    'value': 'accanto',
                    'type': 'string'
                }
            })
            system_properties = PropValueMap({
            })
            
            dst = self.__copy_directory_tree(str(pathlib.Path(__file__).parent.absolute()) + '/../../resources/ansible_returning_associated_topology_and_outputs')

            resp = self.ansible_client.run_lifecycle_playbook({
            'lifecycle_name': 'create',
            'driver_files': DirectoryTree(dst),
            'system_properties': system_properties,
            'resource_properties': properties,
            'deployment_location': {
                'name': 'winterfell',
                'type': "Kubernetes",
                'properties': PropValueMap({
                })
            },
            'keep_files': True,
            'request_id': request_id
            })
            
            associated_topology = AssociatedTopology.from_dict({
                'apache1': {
                    'id': '12345678',
                    'type': 'Openstack'
                },
                'apache2': {
                    'id': '910111213',
                    'type': 'Openstack'
                }
            })

            self.assertLifecycleExecutionEqual(resp, LifecycleExecution(request_id, STATUS_COMPLETE, None, {'msg': "Associated topology returned", 'public_ip': "10.21.28.94", 'internal_ip': "10.10.10.42"}, associated_topology))
            self.assertTrue(os.path.exists(dst))
        finally:
            logger.removeHandler(stream_handler)
    def test_run_lifecycle_with_malformed_associated_topology_in_playbook(self):
        # configure so that we can see logging from the code under test
        stream_handler = logging.StreamHandler(sys.stdout)
        logger.addHandler(stream_handler)
        try:
            request_id = uuid.uuid4().hex
            infrastructure_id_1 = uuid.uuid4().hex
            infrastructure_id_2 = uuid.uuid4().hex
            infrastructure_osp_type = 'Openstack'
            infrastructure_k8s_type = 'Kubernetes'

            properties = PropValueMap({
                'hello_world_private_ip': {
                    'value': '10.220.217.113',
                    'type': 'string'
                },
                'ansible_ssh_user': {
                    'value': 'accanto',
                    'type': 'string'
                },
                'ansible_ssh_pass': {
                    'value': 'accanto',
                    'type': 'string'
                },
                'ansible_become_pass': {
                    'value': 'accanto',
                    'type': 'string'
                }
            })
            system_properties = PropValueMap({
            })
            
            associated_topology = AssociatedTopology.from_dict({
                'apache1': {
                    'id': infrastructure_id_1,
                    'type': infrastructure_osp_type
                },
                'apache2': {
                    'id': infrastructure_id_2,
                    'type': infrastructure_k8s_type
                }

            })

            dst = self.__copy_directory_tree(str(pathlib.Path(__file__).parent.absolute()) + '/../../resources/ansible_with_malformed_associated_topology_in_playbook')

            resp = self.ansible_client.run_lifecycle_playbook({
            'lifecycle_name': 'adopt',
            'driver_files': DirectoryTree(dst),
            'system_properties': system_properties,
            'resource_properties': properties,
            'deployment_location': {
                'name': 'winterfell',
                'type': "Kubernetes",
                'properties': PropValueMap({
                })
            },
            'associated_topology': associated_topology,
            'keep_files': True,
            'request_id': request_id
            })

            self.assertLifecycleExecutionMatches(resp, LifecycleExecution(request_id, STATUS_FAILED, FailureDetails(FAILURE_CODE_INFRASTRUCTURE_ERROR, "task debug failed: {'msg': \"The task includes an option with an undefined variable. The error was: 'dict object' has no attribute 'wrong'"), {}))
            self.assertTrue(os.path.exists(dst))
        finally:
            logger.removeHandler(stream_handler)