def test_run_lifecycle(self): # this is needed to ensure logging output appears in test context - see https://stackoverflow.com/questions/7472863/pydev-unittesting-how-to-capture-text-logged-to-a-logging-logger-in-captured-o stream_handler.stream = sys.stdout request_id = uuid.uuid4().hex handler = AnsibleRequestHandler(self.mock_messaging_service, self.mock_ansible_client) self.mock_ansible_client.run_lifecycle_playbook.return_value = LifecycleExecution( request_id, STATUS_COMPLETE, None, {'prop1': 'output__value1'}) handler.handle_request({ 'lifecycle_name': 'Install', 'driver_files': DirectoryTree(self.tmp_workspace), 'system_properties': PropValueMap({}), 'resource_properties': PropValueMap({}), 'deployment_location': { 'properties': { 'testPropA': 'A' } }, 'request_id': request_id }) self.check_response_only( LifecycleExecution(request_id, STATUS_COMPLETE, None, {'prop1': 'output__value1'}))
def test_run_lifecycle_missing_lifecycle_name(self): # this is needed to ensure logging output appears in test context - see https://stackoverflow.com/questions/7472863/pydev-unittesting-how-to-capture-text-logged-to-a-logging-logger-in-captured-o stream_handler.stream = sys.stdout request_id = uuid.uuid4().hex handler = AnsibleRequestHandler(self.mock_messaging_service, self.mock_ansible_client) handler.handle_request({ 'request_id': request_id, 'driver_files': DirectoryTree(self.tmp_workspace), 'system_properties': PropValueMap({}), 'resource_properties': PropValueMap({}), 'deployment_location': PropValueMap({}) }) self.check_response_only( LifecycleExecution( request_id, STATUS_FAILED, FailureDetails(FAILURE_CODE_INTERNAL_ERROR, "Request must have a lifecycle_name"), {}))
def test_run_lifecycle_keep_files(self): # configure so that we can see logging from the code under test stream_handler = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) try: request_id = uuid.uuid4().hex properties = PropValueMap({ 'hello_world_private_ip': { 'value': '10.220.217.113', 'type': 'string' }, 'ansible_ssh_user': { 'value': 'accanto', 'type': 'string' }, 'ansible_ssh_pass': { 'value': 'accanto', 'type': 'string' }, 'ansible_become_pass': { 'value': 'accanto', 'type': 'string' } }) system_properties = PropValueMap({}) cwd = os.getcwd() src = cwd + '/tests/resources/ansible' dst = cwd + '/tests/resources/ansible-copy' shutil.rmtree(dst, ignore_errors=True) shutil.copytree(src, dst) resp = self.ansible_client.run_lifecycle_playbook({ 'lifecycle_name': 'install', 'driver_files': DirectoryTree(dst), 'system_properties': system_properties, 'resource_properties': properties, 'deployment_location': { 'name': 'winterfell', 'type': "type", 'properties': PropValueMap({}) }, 'keep_files': True, 'request_id': request_id }) self.assertLifecycleExecutionEqual( resp, LifecycleExecution(request_id, STATUS_COMPLETE, None, {'msg': "hello there!"})) self.assertTrue(os.path.exists(dst)) finally: logger.removeHandler(stream_handler)
def test_merge(self): merger = PropertiesMerger() result = merger.merge( PropValueMap({ 'propA': { 'type': 'string', 'value': 'propA' }, 'propB': { 'type': 'string', 'value': 'propB' } }), PropValueMap({'propA': { 'type': 'string', 'value': 'sysPropA' }})) self.assertEqual( result, PropValueMap({ 'propA': { 'type': 'string', 'value': 'propA' }, 'propB': { 'type': 'string', 'value': 'propB' }, 'system_propA': { 'type': 'string', 'value': 'sysPropA' } }))
def handle_request(self, request): try: partition = request.partition offset = request.offset request_as_dict = request.as_new_dict() request_id = request_as_dict.get('request_id', None) if 'lifecycle_name' not in request_as_dict or request_as_dict['lifecycle_name'] is None: msg = 'Lifecycle request for partition {0} offset {1} is missing lifecycle_name.'.format(partition, offset) logger.warning(msg) self.messaging_service.send_lifecycle_execution(LifecycleExecution(request_id, STATUS_FAILED, FailureDetails(FAILURE_CODE_INTERNAL_ERROR, msg), {})) return if 'driver_files' not in request_as_dict or request_as_dict['driver_files'] is None: msg = 'Lifecycle request for partition {0} offset {1} is missing driver_files.'.format(partition, offset) logger.warning(msg) self.messaging_service.send_lifecycle_execution(LifecycleExecution(request_id, STATUS_FAILED, FailureDetails(FAILURE_CODE_INTERNAL_ERROR, msg), {})) return if 'system_properties' not in request_as_dict or request_as_dict['system_properties'] is None: msg = 'Lifecycle request for partition {0} offset {1} is missing system_properties.'.format(partition, offset) logger.warning(msg) self.messaging_service.send_lifecycle_execution(LifecycleExecution(request_id, STATUS_FAILED, FailureDetails(FAILURE_CODE_INTERNAL_ERROR, msg), {})) return if 'resource_properties' not in request_as_dict or request_as_dict['resource_properties'] is None: msg = 'Lifecycle request for partition {0} offset {1} is missing resource_properties.'.format(partition, offset) logger.warning(msg) self.messaging_service.send_lifecycle_execution(LifecycleExecution(request_id, STATUS_FAILED, FailureDetails(FAILURE_CODE_INTERNAL_ERROR, msg), {})) return if 'request_properties' not in request_as_dict or request_as_dict['request_properties'] is None: msg = 'Lifecycle request for partition {0} offset {1} is missing request_properties.'.format(partition, offset) logger.warning(msg) self.messaging_service.send_lifecycle_execution(LifecycleExecution(request_id, STATUS_FAILED, FailureDetails(FAILURE_CODE_INTERNAL_ERROR, msg), {})) return if 'associated_topology' not in request_as_dict or request_as_dict['associated_topology'] is None: msg = 'Lifecycle request for partition {0} offset {1} is missing associated_topology.'.format(partition, offset) logger.warning(msg) self.messaging_service.send_lifecycle_execution(LifecycleExecution(request_id, STATUS_FAILED, FailureDetails(FAILURE_CODE_INTERNAL_ERROR, msg), {})) return if 'deployment_location' not in request_as_dict or request_as_dict['deployment_location'] is None: msg = 'Lifecycle request for partition {0} offset {1} is missing deployment_location.'.format(partition, offset) logger.warning(msg) self.messaging_service.send_lifecycle_execution(LifecycleExecution(request_id, STATUS_FAILED, FailureDetails(FAILURE_CODE_INTERNAL_ERROR, msg), {})) return file_name = '{0}'.format(str(uuid.uuid4())) request_as_dict['driver_files'] = self.driver_files_manager.build_tree(file_name, request_as_dict['driver_files']) request_as_dict['resource_properties'] = PropValueMap(request_as_dict['resource_properties']) request_as_dict['system_properties'] = PropValueMap(request_as_dict['system_properties']) request_as_dict['request_properties'] = PropValueMap(request_as_dict['request_properties']) request_as_dict['associated_topology'] = AssociatedTopology.from_dict(request_as_dict['associated_topology']) self.lifecycle_request_handler.handle_request(request_as_dict) except Exception as e: try: self.messaging_service.send_lifecycle_execution(LifecycleExecution(request.request_id, STATUS_FAILED, FailureDetails(FAILURE_CODE_INTERNAL_ERROR, str(e)), {})) except Exception as e: # just log this and carry on logger.exception('Caught exception sending lifecycle response for driver request {0} for topic {1} : {2}'.format(request.request_id, self.request_queue_config.topic.name, str(e)))
def ansible_missing_associated_topology_id_in_fact(self): # configure so that we can see logging from the code under test stream_handler = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) try: request_id = uuid.uuid4().hex properties = PropValueMap({ 'hello_world_private_ip': { 'value': '10.220.217.113', 'type': 'string' }, 'ansible_ssh_user': { 'value': 'accanto', 'type': 'string' }, 'ansible_ssh_pass': { 'value': 'accanto', 'type': 'string' }, 'ansible_become_pass': { 'value': 'accanto', 'type': 'string' } }) system_properties = PropValueMap({ }) dst = self.__copy_directory_tree(str(pathlib.Path(__file__).parent.absolute()) + '/../../resources/ansible_missing_associated_topology_id_in_fact') resp = self.ansible_client.run_lifecycle_playbook({ 'lifecycle_name': 'create', 'driver_files': DirectoryTree(dst), 'system_properties': system_properties, 'resource_properties': properties, 'deployment_location': { 'name': 'winterfell', 'type': "Kubernetes", 'properties': PropValueMap({ }) }, 'keep_files': True, 'request_id': request_id }) self.assertLifecycleExecutionEqual(resp, LifecycleExecution(request_id, STATUS_COMPLETE, None, {}, None)) self.assertTrue(os.path.exists(dst)) finally: logger.removeHandler(stream_handler)
def test_run_lifecycle_with_missing_inventory(self): # configure so that we can see logging from the code under test stream_handler = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) try: request_id = uuid.uuid4().hex properties = PropValueMap({ 'hello_world_private_ip': { 'value': '10.220.217.113', 'type': 'string' }, 'ansible_ssh_user': { 'value': 'accanto', 'type': 'string' }, 'ansible_ssh_pass': { 'value': 'accanto', 'type': 'string' }, 'ansible_become_pass': { 'value': 'accanto', 'type': 'string' } }) system_properties = PropValueMap({ }) dst = self.__copy_directory_tree(os.getcwd() + '/tests/resources/ansible-with-missing-inventory') resp = self.ansible_client.run_lifecycle_playbook({ 'lifecycle_name': 'install', 'driver_files': DirectoryTree(dst), 'system_properties': system_properties, 'resource_properties': properties, 'deployment_location': { 'name': 'winterfell', 'type': "Kubernetes", 'properties': PropValueMap({ }) }, 'request_id': request_id }) self.assertLifecycleExecutionEqual(resp, LifecycleExecution(request_id, STATUS_COMPLETE, None, {})) self.assertFalse(os.path.exists(dst)) finally: logger.removeHandler(stream_handler)
def test_filter_used_properties_reference_public(self): util = HeatInputUtil() heat_yml = ''' parameters: propA: type: string propB_public: type: string ''' orig_props = PropValueMap({ 'propA': { 'type': 'string', 'value': 'testA' }, 'propB': { 'type': 'key', 'keyName': 'keyB', 'privateKey': 'thisIsPrivate', 'publicKey': 'thisIsPublic' }, 'propC': { 'type': 'string', 'value': 'testC' } }) new_props = util.filter_used_properties(heat_yml, orig_props) self.assertEqual(new_props, { 'propA': 'testA', 'propB_public': 'thisIsPublic' })
def test_filter_used_properties_prop_value_map(self): util = HeatInputUtil() heat_yml = ''' parameters: propA: type: string propB: type: string ''' orig_props = PropValueMap({ 'propA': { 'type': 'string', 'value': 'testA' }, 'propB': { 'type': 'string', 'value': 'testB' }, 'propC': { 'type': 'string', 'value': 'testC' } }) new_props = util.filter_used_properties(heat_yml, orig_props) self.assertEqual(new_props, {'propA': 'testA', 'propB': 'testB'})
def execute_lifecycle(self, lifecycle_name, driver_files, system_properties, resource_properties, request_properties, associated_topology, deployment_location): if self.async_requests_enabled: request_id = str(uuid.uuid4()) self.lifecycle_request_queue.queue_lifecycle_request({ 'request_id': request_id, 'lifecycle_name': lifecycle_name, 'driver_files': driver_files, 'system_properties': system_properties, 'resource_properties': resource_properties, 'request_properties': request_properties, 'associated_topology': associated_topology, 'deployment_location': deployment_location, 'logging_context': dict(logging_context.get_all()) }) execute_response = LifecycleExecuteResponse(request_id) else: file_name = '{0}'.format(str(uuid.uuid4())) driver_files_tree = self.driver_files_manager.build_tree( file_name, driver_files) associated_topology = AssociatedTopology.from_dict( associated_topology) execute_response = self.handler.execute_lifecycle( lifecycle_name, driver_files_tree, PropValueMap(system_properties), PropValueMap(resource_properties), PropValueMap(request_properties), associated_topology, deployment_location) if self.async_enabled is True: self.__async_lifecycle_execution_completion( execute_response.request_id, deployment_location) return execute_response
def test_build(self, name_manager): service = ExtendedResourceTemplateContext() name_manager.return_value.safe_label_name_for_resource.return_value = 'resource-label' name_manager.return_value.safe_subdomain_name_for_resource.return_value = 'resource-subdomain' name_manager.return_value.safe_label_name_from_resource_id.return_value = 'resource-id-label' name_manager.return_value.safe_subdomain_name_from_resource_id.return_value = 'resource-id-subdomain' name_manager.return_value.safe_label_name_from_resource_name.return_value = 'resource-name-label' name_manager.return_value.safe_subdomain_name_from_resource_name.return_value = 'resource-name-subdomain' properties = PropValueMap({'propA': {'type': 'string', 'value': 'A'}, 'propB': {'type': 'string', 'value': 'B'}}) system_properties = PropValueMap({'resourceId': {'type': 'string', 'value': '123-456-789'}, 'resourceName': {'type': 'string', 'value': 'Testing'}}) deployment_location = { 'name': 'Test', 'type': 'Kubernetes', 'properties': { 'dlPropA': 'A DL Prop' } } result = service.build(system_properties, properties, deployment_location) self.maxDiff = None self.assertEqual(result, { 'propA': 'A', 'propB': 'B', 'systemProperties': { 'resourceLabel': 'resource-label', 'resourceSd': 'resource-subdomain', 'resourceSubdomain': 'resource-subdomain', 'resourceId': '123-456-789', 'resourceIdSd': 'resource-id-subdomain', 'resourceIdSubdomain': 'resource-id-subdomain', 'resourceIdLabel': 'resource-id-label', 'resourceName': 'Testing', 'resourceNameSd': 'resource-name-subdomain', 'resourceNameSubdomain': 'resource-name-subdomain', 'resourceNameLabel': 'resource-name-label' }, 'deploymentLocationInst': { 'name': 'Test', 'type': 'Kubernetes', 'properties': { 'dlPropA': 'A DL Prop' } } })
def test_merge_keys(self): merger = PropertiesMerger() result = merger.merge( PropValueMap({ 'propA': { 'type': 'key', 'privateKey': 'private', 'publicKey': 'public', 'keyName': 'SomeKey' }, 'propB': { 'type': 'string', 'value': 'propB' } }), PropValueMap({'propA': { 'type': 'string', 'value': 'sysPropA' }})) self.assertEqual( result, PropValueMap({ 'propA': { 'type': 'key', 'privateKey': 'private', 'publicKey': 'public', 'keyName': 'SomeKey' }, 'propB': { 'type': 'string', 'value': 'propB' }, 'system_propA': { 'type': 'string', 'value': 'sysPropA' } }))
def test_filter_used_properties_allows_private_key_suffix_on_non_key_property( self): util = HeatInputUtil() heat_yml = ''' parameters: propA_private: type: string ''' orig_props = PropValueMap( {'propA_private': { 'type': 'string', 'value': 'testA' }}) new_props = util.filter_used_properties(heat_yml, orig_props) self.assertEqual(new_props, {'propA_private': 'testA'})
def test_filter_used_properties_supports_no_public_key(self): util = HeatInputUtil() heat_yml = ''' parameters: propA_public: type: string ''' orig_props = PropValueMap({ 'propA': { 'type': 'key', 'keyName': 'keyA', 'privateKey': 'thisIsPrivate' } }) # The property has no public key, so nothing is added to the used properties. Let Heat determine if this parameter is required # (and ultimately throw an error if it is) new_props = util.filter_used_properties(heat_yml, orig_props) self.assertEqual(new_props, {})
def test_create_infrastructure_uses_system_prop(self): self.mock_heat_input_utils.filter_used_properties.return_value = { 'system_resourceId': '123' } self.mock_heat_driver.create_stack.return_value = '1' driver = ResourceDriverHandler( self.mock_location_translator, resource_driver_config=self.resource_driver_config, heat_translator_service=self.mock_heat_translator, tosca_discovery_service=self.mock_tosca_discover_service) result = driver.execute_lifecycle('Create', self.heat_driver_files, self.system_properties, self.resource_properties, {}, AssociatedTopology(), self.deployment_location) self.mock_heat_input_utils.filter_used_properties.assert_called_once_with( self.heat_template, PropValueMap({ 'propA': { 'type': 'string', 'value': 'valueA' }, 'propB': { 'type': 'string', 'value': 'valueB' }, 'system_resourceId': { 'type': 'string', 'value': '123' }, 'system_resourceName': { 'type': 'string', 'value': 'TestResource' } })) self.mock_heat_driver.create_stack.assert_called_once_with( ANY, self.heat_template, {'system_resourceId': '123'})
def __propvaluemap(self, orig_props): props = {} for k, v in orig_props.items(): props[k] = {'type': 'string', 'value': v} return PropValueMap(props)
def merge(self, properties, system_properties): new_props = {k: v for k, v in properties.items_with_types()} for k, v in system_properties.items_with_types(): new_key = 'system_{0}'.format(k) new_props[new_key] = v return PropValueMap(new_props)
def properties(self): return PropValueMap(self.__deployment_location['properties'])
def run_lifecycle_playbook(self, request): driver_files = request['driver_files'] key_property_processor = None try: request_id = request['request_id'] lifecycle = request['lifecycle_name'] properties = request['resource_properties'] system_properties = request['system_properties'] deployment_location = request['deployment_location'] if not isinstance(deployment_location, dict): return LifecycleExecution( request_id, STATUS_FAILED, FailureDetails(FAILURE_CODE_INTERNAL_ERROR, "Deployment Location must be an object"), {}) dl_properties = PropValueMap( deployment_location.get('properties', {})) config_path = driver_files.get_directory_tree('config') scripts_path = driver_files.get_directory_tree('scripts') key_property_processor = KeyPropertyProcessor( properties, system_properties, dl_properties) playbook_path = get_lifecycle_playbook_path( scripts_path, lifecycle) if playbook_path is not None: if not os.path.exists(playbook_path): return LifecycleExecution( request_id, STATUS_FAILED, FailureDetails(FAILURE_CODE_INTERNAL_ERROR, "Playbook path does not exist"), {}) if deployment_location.get('type') == 'Kubernetes': dl_properties['kubeconfig_path'] = self.create_kube_config( deployment_location) connection_type = "k8s" inventory_path = config_path.get_file_path(INVENTORY_K8S) else: connection_type = "ssh" inventory_path = config_path.get_file_path(INVENTORY) # process key properties by writing them out to a temporary file and adding an # entry to the property dictionary that maps the "[key_name].path" to the key file path key_property_processor.process_key_properties() logger.debug('config_path = ' + config_path.get_path()) logger.debug('driver_files = ' + scripts_path.get_path()) logger.debug("playbook_path=" + playbook_path) logger.debug("inventory_path=" + inventory_path) all_properties = { 'properties': properties, 'system_properties': system_properties, 'dl_properties': dl_properties } process_templates(config_path, all_properties) # always retry on unreachable num_retries = self.ansible_properties.max_unreachable_retries for i in range(0, num_retries): if i > 0: logger.debug( 'Playbook {0}, unreachable retry attempt {1}/{2}'. format(playbook_path, i + 1, num_retries)) start_time = datetime.now() ret = self.run_playbook(request_id, connection_type, inventory_path, playbook_path, lifecycle, all_properties) if not ret.host_unreachable: break end_time = datetime.now() if self.ansible_properties.unreachable_sleep_seconds > 0: # Factor in that the playbook may have taken some time to determine is was unreachable # by using the unreachable_sleep_seconds value as a minimum amount of time for the delay delta = end_time - start_time retry_seconds = max( 0, self.ansible_properties.unreachable_sleep_seconds - int(delta.total_seconds())) time.sleep(retry_seconds) return ret.get_result() else: msg = "No playbook to run at {0} for lifecycle {1} for request {2}".format( playbook_path, lifecycle, request_id) logger.debug(msg) return LifecycleExecution( request_id, STATUS_FAILED, FailureDetails(FAILURE_CODE_INTERNAL_ERROR, msg), {}) except InvalidRequestException as ire: return LifecycleExecution( request_id, STATUS_FAILED, FailureDetails(FAILURE_CODE_INTERNAL_ERROR, ire.msg), {}) except Exception as e: logger.exception("Unexpected exception running playbook") return LifecycleExecution( request_id, STATUS_FAILED, FailureDetails(FAILURE_CODE_INTERNAL_ERROR, "Unexpected exception: {0}".format(e)), {}) finally: if key_property_processor is not None: key_property_processor.clear_key_files() keep_files = request.get('keep_files', False) if not keep_files and driver_files is not None: try: logger.debug( 'Attempting to remove lifecycle scripts at {0}'.format( driver_files.root_path)) driver_files.remove_all() except Exception as e: logger.exception( 'Encountered an error whilst trying to clear out lifecycle scripts directory {0}: {1}' .format(driver_files.root_path, str(e)))
def __system_properties(self): props = {} props['resourceId'] = '123' props['resourceName'] = 'TestResource' return PropValueMap(props)
def test_run_lifecycle_with_input_associated_topology(self): # configure so that we can see logging from the code under test stream_handler = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) try: request_id = uuid.uuid4().hex infrastructure_id_1 = uuid.uuid4().hex infrastructure_id_2 = uuid.uuid4().hex infrastructure_osp_type = 'Openstack' infrastructure_k8s_type = 'Kubernetes' properties = PropValueMap({ 'hello_world_private_ip': { 'value': '10.220.217.113', 'type': 'string' }, 'ansible_ssh_user': { 'value': 'accanto', 'type': 'string' }, 'ansible_ssh_pass': { 'value': 'accanto', 'type': 'string' }, 'ansible_become_pass': { 'value': 'accanto', 'type': 'string' } }) system_properties = PropValueMap({ }) associated_topology = AssociatedTopology.from_dict({ 'apache1': { 'id': infrastructure_id_1, 'type': infrastructure_osp_type }, 'apache2': { 'id': infrastructure_id_2, 'type': infrastructure_k8s_type } }) dst = self.__copy_directory_tree(str(pathlib.Path(__file__).parent.absolute()) + '/../../resources/ansible_input_associated_topology') resp = self.ansible_client.run_lifecycle_playbook({ 'lifecycle_name': 'adopt', 'driver_files': DirectoryTree(dst), 'system_properties': system_properties, 'resource_properties': properties, 'deployment_location': { 'name': 'winterfell', 'type': "Kubernetes", 'properties': PropValueMap({ }) }, 'associated_topology': associated_topology, 'keep_files': True, 'request_id': request_id }) self.assertLifecycleExecutionEqual(resp, LifecycleExecution(request_id, STATUS_COMPLETE, None, {'msg': "hello there!"})) self.assertTrue(os.path.exists(dst)) finally: logger.removeHandler(stream_handler)
def test_run_lifecycle_with_malformed_associated_topology_in_playbook(self): # configure so that we can see logging from the code under test stream_handler = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) try: request_id = uuid.uuid4().hex infrastructure_id_1 = uuid.uuid4().hex infrastructure_id_2 = uuid.uuid4().hex infrastructure_osp_type = 'Openstack' infrastructure_k8s_type = 'Kubernetes' properties = PropValueMap({ 'hello_world_private_ip': { 'value': '10.220.217.113', 'type': 'string' }, 'ansible_ssh_user': { 'value': 'accanto', 'type': 'string' }, 'ansible_ssh_pass': { 'value': 'accanto', 'type': 'string' }, 'ansible_become_pass': { 'value': 'accanto', 'type': 'string' } }) system_properties = PropValueMap({ }) associated_topology = AssociatedTopology.from_dict({ 'apache1': { 'id': infrastructure_id_1, 'type': infrastructure_osp_type }, 'apache2': { 'id': infrastructure_id_2, 'type': infrastructure_k8s_type } }) dst = self.__copy_directory_tree(str(pathlib.Path(__file__).parent.absolute()) + '/../../resources/ansible_with_malformed_associated_topology_in_playbook') resp = self.ansible_client.run_lifecycle_playbook({ 'lifecycle_name': 'adopt', 'driver_files': DirectoryTree(dst), 'system_properties': system_properties, 'resource_properties': properties, 'deployment_location': { 'name': 'winterfell', 'type': "Kubernetes", 'properties': PropValueMap({ }) }, 'associated_topology': associated_topology, 'keep_files': True, 'request_id': request_id }) self.assertLifecycleExecutionMatches(resp, LifecycleExecution(request_id, STATUS_FAILED, FailureDetails(FAILURE_CODE_INFRASTRUCTURE_ERROR, "task debug failed: {'msg': \"The task includes an option with an undefined variable. The error was: 'dict object' has no attribute 'wrong'"), {})) self.assertTrue(os.path.exists(dst)) finally: logger.removeHandler(stream_handler)
def test_run_lifecycle_return_associated_topology(self): # configure so that we can see logging from the code under test stream_handler = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) try: request_id = uuid.uuid4().hex properties = PropValueMap({ 'hello_world_private_ip': { 'value': '10.220.217.113', 'type': 'string' }, 'ansible_ssh_user': { 'value': 'accanto', 'type': 'string' }, 'ansible_ssh_pass': { 'value': 'accanto', 'type': 'string' }, 'ansible_become_pass': { 'value': 'accanto', 'type': 'string' } }) system_properties = PropValueMap({ }) dst = self.__copy_directory_tree(str(pathlib.Path(__file__).parent.absolute()) + '/../../resources/ansible_returning_associated_topology_and_outputs') resp = self.ansible_client.run_lifecycle_playbook({ 'lifecycle_name': 'create', 'driver_files': DirectoryTree(dst), 'system_properties': system_properties, 'resource_properties': properties, 'deployment_location': { 'name': 'winterfell', 'type': "Kubernetes", 'properties': PropValueMap({ }) }, 'keep_files': True, 'request_id': request_id }) associated_topology = AssociatedTopology.from_dict({ 'apache1': { 'id': '12345678', 'type': 'Openstack' }, 'apache2': { 'id': '910111213', 'type': 'Openstack' } }) self.assertLifecycleExecutionEqual(resp, LifecycleExecution(request_id, STATUS_COMPLETE, None, {'msg': "Associated topology returned", 'public_ip': "10.21.28.94", 'internal_ip': "10.10.10.42"}, associated_topology)) self.assertTrue(os.path.exists(dst)) finally: logger.removeHandler(stream_handler)
def test_run_lifecycle_with_outputs_of_different_types(self): stream_handler = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) try: request_id = uuid.uuid4().hex properties = PropValueMap({ 'hello_world_private_ip': { 'value': '10.220.217.113', 'type': 'string' }, 'ansible_ssh_user': { 'value': 'accanto', 'type': 'string' }, 'ansible_ssh_pass': { 'value': 'accanto', 'type': 'string' }, 'ansible_become_pass': { 'value': 'accanto', 'type': 'string' } }) system_properties = PropValueMap({ }) dst = self.__copy_directory_tree(str(pathlib.Path(__file__).parent.absolute()) + '/../../resources/ansible_outputs') resp = self.ansible_client.run_lifecycle_playbook({ 'lifecycle_name': 'install', 'driver_files': DirectoryTree(dst), 'system_properties': system_properties, 'resource_properties': properties, 'deployment_location': { 'name': 'winterfell', 'type': "Kubernetes", 'properties': PropValueMap({ }) }, 'request_id': request_id }) expected_outputs = { 'string_prop': 'Hello', 'int_prop': 1, 'float_prop': 1.2, 'bool_prop': True, 'timestamp_prop': '2020-11-23T11:49:33.308703Z', 'map_prop': { 'A': 'ValueA', 'B': 123 }, 'list_prop': ['A', 'B'], 'custom_type_prop': { 'name': 'Testing', 'age': 42 } } self.assertLifecycleExecutionEqual(resp, LifecycleExecution(request_id, STATUS_COMPLETE, None, expected_outputs)) self.assertFalse(os.path.exists(dst)) finally: logger.removeHandler(stream_handler)
def __resource_properties(self): props = {} props['propA'] = {'type': 'string', 'value': 'valueA'} props['propB'] = {'type': 'string', 'value': 'valueB'} return PropValueMap(props)
def __tosca_request_properties(self): props = {'template-type': {'type': 'string', 'value': 'TOSCA'}} return PropValueMap(props)
def test_run_lifecycle_with_kubernetes_inventory(self): # configure so that we can see logging from the code under test stream_handler = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) try: request_id = uuid.uuid4().hex properties = PropValueMap({ 'hello_world_private_ip': { 'value': '10.220.217.113', 'type': 'string' }, 'ansible_ssh_user': { 'value': 'accanto', 'type': 'string' }, 'ansible_ssh_pass': { 'value': 'accanto', 'type': 'string' }, 'ansible_become_pass': { 'value': 'accanto', 'type': 'string' }, 'bool_prop': { 'value': True, 'type': 'boolean' }, 'int_prop': { 'value': 123, 'type': 'integer' }, 'float_prop': { 'value': 1.2, 'type': 'float' }, 'timestamp_prop': { 'value': '2020-11-23T11:49:33.308703Z', 'type': 'timestamp' }, 'map_prop': { 'value': { 'A': 1, 'B': 'A string' }, 'type': 'map' }, 'list_prop': { 'value': ['a', 'b', 'c'], 'type': 'list' }, 'custom_type_prop': { 'value': { 'name': 'Testing', 'age': 42 }, 'type': 'MyCustomType' } }) system_properties = PropValueMap({ }) dst = self.__copy_directory_tree(str(pathlib.Path(__file__).parent.absolute()) + '/../../resources/ansible') resp = self.ansible_client.run_lifecycle_playbook({ 'lifecycle_name': 'install', 'driver_files': DirectoryTree(dst), 'system_properties': system_properties, 'resource_properties': properties, 'deployment_location': { 'name': 'winterfell', 'type': "Kubernetes", 'properties': PropValueMap({ }) }, 'request_id': request_id }) self.assertLifecycleExecutionEqual(resp, LifecycleExecution(request_id, STATUS_COMPLETE, None, {'msg': "hello there!"})) self.assertFalse(os.path.exists(dst)) finally: logger.removeHandler(stream_handler)