def test_string(): ss = settings.StringSetting() # # Assert that a non-string value raises a validation error # with pytest.raises(settings.SettingValidationError): ss.validate(1) # # Assert that a string value does not raise a validation error # ss.validate('abc') # # Assert value is in values list # ss.values = ['abc', 'def'] with pytest.raises(settings.SettingValidationError): ss.validate('ghi') ss.validate('def') # # Assert dump returns valid values # assert ss.dump('abc') == 'abc' ss.list = True assert ss.dump('abc, def') == ['abc', 'def']
def test_required(): s = { 'integer': settings.IntegerSetting(required=True), 'string': settings.StringSetting() } p = ConfigurationValidator(s) # # Assert that a required value, if not provided, throws an validation # error # with pytest.raises(ValidationError) as err_info: p.validate() err: ValidationError = err_info.value # # Should not raise a validation error, because it is not marked as # required # assert 'string' not in err.errors.keys() # # Should raise a validation error, because it is required # assert str(err.errors['integer']) == 'Setting is required' # # Assert that if the value is in fact provided, no errors are raised # p['integer'] = '5' p.validate()
def test_validate_mutually_exclusive(): s = { 'integer': settings.IntegerSetting(mutually_exclusive=['string']), 'string': settings.StringSetting() } p = ConfigurationValidator(s) # # Assert no validation errors raised # p['integer'] = '1' p.validate() # # Assert mutually exclusive validation error raised # p['string'] = 'abc' with pytest.raises(ValidationError) as err_info: p.validate() err: ValidationError = err_info.value # # Should not raise a validation error, because mutually exclusive flag is # set on integer only # assert 'string' not in err.errors.keys() # # Should raise a validation error, because it has mutually exclusive # setting # assert str(err.errors['integer']) == 'Mutually exclusive with string'
def test_partial_validation(): s = { 'integer': settings.IntegerSetting(required=True), 'string': settings.StringSetting() } p = ConfigurationValidator(s) # # Assert that missing fields do not raise a validation error for # partial validation # p.validate(full=False) # # Assert that partial validation does validate field values # p['integer'] = 'abc' with pytest.raises(ValidationError) as err_info: p.validate() err: ValidationError = err_info.value assert 'integer' in err.errors.keys() # # Assert that partial validation does validate field names # p['xyz'] = 'abc' with pytest.raises(ValidationError) as err_info: p.validate() err: ValidationError = err_info.value assert 'xyz' in err.errors.keys()
def test_validate_requires(): s = { 'integer': settings.IntegerSetting(requires=['string']), 'string': settings.StringSetting() } p = ConfigurationValidator(s) # # Assert requires validation error raised # p['integer'] = '1' with pytest.raises(ValidationError) as err_info: p.validate() err: ValidationError = err_info.value # # Should not raise a validation error, because requires flag is # set on integer only # assert 'string' not in err.errors.keys() # # Should raise a validation error, because it has a requires setting # assert str(err.errors['integer']) == 'Requires string' # # Assert no validation errors # p['string'] = 'abc' p.validate()
def test_validate_requires(): s = { 'integer': settings.IntegerSetting(requires=['string']), 'string': settings.StringSetting(), 'bool': settings.BooleanSetting(requires=['sub_requirement']), 'sub_requirement': settings.StringSetting(), } p = ConfigurationValidator(s) # # Assert requires validation error raised # p['integer'] = '1' with pytest.raises(ValidationError) as err_info: p.validate() err: ValidationError = err_info.value # # Should not raise a validation error, because requires flag is # set on integer only # assert 'string' not in err.errors.keys() # # Should raise a validation error, because it has a requires setting # assert str(err.errors['integer']) == 'Requires string' # # Assert no validation errors # p['string'] = 'integer satisfied' p.validate() # # Should be no validation error if bool is falsy # p['bool'] = 'False' p.validate() # # Should have a validation error when truthy # p['bool'] = 'True' with pytest.raises(ValidationError) as err_info: p.validate() err: ValidationError = err_info.value assert str(err.errors['bool']) == 'Requires sub_requirement'
def test_dump(): s = { 'bool': settings.BooleanSetting(), 'list_bool': settings.BooleanSetting(list=True), 'integer': settings.IntegerSetting(), 'list_integer': settings.IntegerSetting(list=True), 'string': settings.StringSetting(), 'list_string': settings.StringSetting(list=True), 'file': settings.FileSetting(must_exist=False) } p = ConfigurationValidator(s) # # Test data loading # data_to_load = { 'bool': 'True', 'list_bool': 'True, false', 'integer': '3', 'list_integer': '4, 5', 'string': 'abc', 'list_string': 'wz, yz', 'file': 'file.txt' } p.load(data_to_load) # # Perform full validation # p.validate(full=True) # # Assert that dumped data is properly transformed # expected_data = { 'bool': True, 'list_bool': [True, False], 'integer': 3, 'list_integer': [4, 5], 'string': 'abc', 'list_string': ['wz', 'yz'], 'file': 'file.txt' } assert p.dump() == expected_data
def test_defaults(): s = { 'integer': settings.IntegerSetting(default='3'), 'string': settings.StringSetting(default='abc') } p = ConfigurationValidator(s) # # Assert default values are set # assert p['integer'] == '3' assert p['string'] == 'abc'
SETTINGS = { # # Instances # 'tags': settings.TagListSetting( display_name='Tags', key_validation_regex='^(?!aws:).{0,127}', value_validation_regex='.{0,256}', description='A comma-separated list of tags in the form of ' 'key=value', **GROUP_INSTANCES), 'instancetype': settings.StringSetting(display_name='Instance type', description='AWS compute node instance type', required=True, **GROUP_INSTANCES), 'ami': settings.StringSetting( display_name='AMI', description='AMI ID to use for launching node instances', required=True, **GROUP_INSTANCES), 'block_device_map': settings.StringSetting( display_name='Block device map', description='Used to define block devices (virtual hard drives)', **GROUP_INSTANCES), 'cloud_init_script_template': settings.FileSetting(display_name='cloud-init script template', description='Path to cloud-init script template',
class Oracleadapter(ResourceAdapter): """ Drive Oracle Cloud Infrastructure. """ __adaptername__ = 'oraclecloud' settings = { 'availability_domain': settings.StringSetting(required=True), 'compartment_id': settings.StringSetting(required=True), 'shape': settings.StringSetting(default='VM.Standard1.1'), 'vcpus': settings.IntegerSetting(), 'subnet_id': settings.StringSetting(required=True), 'image_id': settings.StringSetting(required=True), 'user_data_script_template': settings.FileSetting( base_path='/tortuga/config/', default='oci_bootstrap.tmpl' ), 'override_dns_domain': settings.BooleanSetting(default='False'), 'dns_options': settings.StringSetting(), 'dns_search': settings.StringSetting(), 'dns_nameservers': settings.StringSetting( list=True, list_separator=' ' ) } def __init__(self, addHostSession=None): """ Upon instantiation, read and validate config file. :return: Oci instance """ super(Oracleadapter, self).__init__(addHostSession=addHostSession) config = { 'region': None, 'log_requests': False, 'tenancy': None, 'user': None, 'pass_phrase': None, 'fingerprint': None, 'additional_user_agent': '', 'key_file': os.path.join(os.path.expanduser('~'), '.ssh/id_rsa') } override_config = self.getResourceAdapterConfig() if override_config and isinstance(override_config, dict): config.update(override_config) self._timeouts = { 'launch': override_config['launch_timeout'] if 'launch_timeout' in override_config else 300 } oci.config.validate_config(config) self.__vcpus = None self.__installer_ip = None self.__client = oci.core.compute_client.ComputeClient(config) self.__net_client = \ oci.core.virtual_network_client.VirtualNetworkClient(config) self.__identity_client = \ oci.identity.identity_client.IdentityClient(config) def __validate_keys(self, config): """ Check all the required keys exist. :param config: Dictionary :return: None """ provided_keys = set(config.keys()) required_keys = { 'availability_domain', 'compartment_id', 'shape', 'subnet_id', 'image_id' } missing_keys = required_keys.difference(provided_keys) if missing_keys: error_message = \ 'Required configuration setting(s) [%s] are missing' % ( ' '.join(missing_keys) ) self.getLogger().error(error_message) @staticmethod def __cloud_instance_metadata() -> dict: """ Get the cloud metadata. :returns: Dictionary metadata """ response = urlopen('http://169.254.169.254/opc/v1/instance/') return json.load(response) @staticmethod def __cloud_vnic_metadata() -> dict: """ Get the VNIC cloud metadata. :returns: Dictionary metadata """ response = urlopen('http://169.254.169.254/opc/v1/vnics/') return json.load(response) @property def __cloud_launch_metadata(self) -> dict: """ Get metadata needed to create metadata. :returns: Dictionary metadata """ compute = self.__cloud_instance_metadata() vnic = self.__cloud_vnic_metadata() full_vnic = self.__net_client.get_vnic( vnic['vnicId'] ).data return { 'availability_domain': compute['availabilityDomain'], 'compartment_id': compute['compartmentId'], 'subnet_id': full_vnic.subnet_id, 'image_id': compute['image'], 'region': compute['region'], 'tenancy_id': '', 'user_id': '', 'shape': compute['shape'] } def start(self, addNodesRequest, dbSession, dbHardwareProfile, dbSoftwareProfile=None): """ Create a cloud and bind with Tortuga. :return: List Instance objects """ self.getLogger().debug( 'start(): addNodesRequest=[%s], dbSession=[%s],' ' dbHardwareProfile=[%s], dbSoftwareProfile=[%s]' % ( addNodesRequest, dbSession, dbHardwareProfile, dbSoftwareProfile ) ) with StopWatch() as stop_watch: nodes = self.__add_nodes( addNodesRequest, dbSession, dbHardwareProfile, dbSoftwareProfile ) if len(nodes) < addNodesRequest['count']: self.getLogger().warning( '%s node(s) requested, only %s launched' ' successfully' % ( addNodesRequest['count'], len(nodes) ) ) self.getLogger().debug( 'start() session [%s] completed in' ' %0.2f seconds' % ( self.addHostSession, stop_watch.result.seconds + stop_watch.result.microseconds / 1000000.0 ) ) self.addHostApi.clear_session_nodes(nodes) return nodes def __add_nodes(self, add_nodes_request, db_session, db_hardware_profile, db_software_profile): """ Add nodes to the infrastructure. :return: List Nodes objects """ # TODO: this validation needs to be moved # self.__validate_keys(session.config) node_spec = { 'db_hardware_profile': db_hardware_profile, 'db_software_profile': db_software_profile, 'db_session': db_session, 'configDict': self.getResourceAdapterConfig(), } return [result for result in self.__oci_add_nodes( count=int(add_nodes_request['count']), node_spec=node_spec)] def __oci_add_nodes(self, count=1, node_spec=None): """ Wrapper around __oci_add_node() method. Launches Greenlets to perform add nodes operation in parallel using gevent. :param count: number of nodes to add :param node_spec: dict containing instance launch specification :return: list of Nodes """ greenlets = [] for _ in range(count): greenlets.append(gevent.spawn(self.__oci_add_node, node_spec)) for result in gevent.iwait(greenlets): if result.value: yield result.value def __oci_add_node(self, node_spec): """ Add one node and backing instance to Tortuga. :param node_spec: instance launch specification :return: Nodes object (or None, on failure) """ with gevent.Timeout(self._timeouts['launch'], TimeoutError): node_dict = self.__oci_pre_launch_instance(node_spec=node_spec) try: instance = self._launch_instance(node_dict=node_dict, node_spec=node_spec) except Exception as exc: if 'node' in node_dict: self.__client.terminate_instance( node_dict['instance_ocid']) self._wait_for_instance_state( node_dict['instance_ocid'], 'TERMINATED') node_spec['db_session'].delete(node_dict['node']) node_spec['db_session'].commit() self.getLogger().error( 'Error launching instance: [{}]'.format(exc) ) return return self._instance_post_launch( instance, node_dict=node_dict, node_spec=node_spec) def __oci_pre_launch_instance(self, node_spec=None): """ Creates Nodes object if Tortuga-generated host names are enabled, otherwise returns empty node dict. :param node_spec: dict containing instance launch specification :return: node dict """ if node_spec['db_hardware_profile'].nameFormat == '*': return {} result = {} # Generate node name hostname, _ = self.addHostApi.generate_node_name( node_spec['db_session'], node_spec['db_hardware_profile'].nameFormat, dns_zone=self.private_dns_zone).split('.', 1) _, domain = self.installer_public_hostname.split('.', 1) name = '%s.%s' % (hostname, domain) # Create Nodes object node = self.__initialize_node( name, node_spec['db_hardware_profile'], node_spec['db_software_profile'] ) node.state = state.NODE_STATE_LAUNCHING result['node'] = node # Add to database and commit database session node_spec['db_session'].add(node) node_spec['db_session'].commit() return result def __initialize_node(self, name, db_hardware_profile, db_software_profile): node = Node(name=name) node.softwareprofile = db_software_profile node.hardwareprofile = db_hardware_profile node.isIdle = False node.addHostSession = self.addHostSession return node def _launch_instance(self, node_dict=None, node_spec=None): """ Launch instance and wait for it to reach RUNNING state. :param node_dict: Dictionary :param node_spec: Object :return: Instance object """ session = OciSession(node_spec['configDict']) session.config['metadata']['user_data'] = \ self.__get_user_data(session.config) # TODO: this is a temporary workaround until the OciSession # functionality is validated for this workflow launch_config = session.launch_config # TODO: make this work better. Need to # find a way of injecting this into the # `get_node_vcpus` method. self.__vcpus = session.config['vcpus'] if \ session.config['vcpus'] else \ session.cores_from_shape self.getLogger().debug( 'setting vcpus to %d' % ( self.__vcpus ) ) if 'node' in node_dict: node = node_dict['node'] self.getLogger().debug( 'overriding instance name [%s]' % ( node.name) ) launch_config.display_name = node.name launch_config.hostname_label = node.name.split('.', 1)[0] launch_instance = self.__client.launch_instance(launch_config) instance_ocid = launch_instance.data.id node_dict['instance_ocid'] = instance_ocid log_adapter = CustomAdapter( self.getLogger(), {'instance_ocid': instance_ocid}) log_adapter.debug('launched') # TODO: implement a timeout waiting for an instance to start; this # will currently wait forever # TODO: check for launch error def logging_callback(instance, state): log_adapter.debug('state: %s; waiting...' % state) self._wait_for_instance_state( instance_ocid, 'RUNNING', callback=logging_callback) log_adapter.debug('state: RUNNING') return self.__client.get_instance(instance_ocid).data def get_node_vcpus(self, name): """ Return resolved number of VCPUs. :param name: String node hostname :return: Integer vcpus """ instance_cache = self.instanceCacheGet(name) if 'vcpus' in list(instance_cache.keys()): return int(instance_cache['vcpus']) return self.__vcpus def _instance_post_launch(self, instance, node_dict=None, node_spec=None): """ Called after instance has launched successfully. :param instance: Oracle instance :param node_dict: instance/node mapping dict :param node_spec: instance launch specification :return: Nodes object """ self.getLogger().debug( 'Instance post-launch action for instance [%s]' % ( instance.id) ) if 'node' not in node_dict: domain = self.installer_public_hostname.split('.')[1:] fqdn = '.'.join([instance.display_name] + domain) node = self.__initialize_node( fqdn, node_spec['db_hardware_profile'], node_spec['db_software_profile'] ) node_spec['db_session'].add(node) node_dict['node'] = node else: node = node_dict['node'] node.state = state.NODE_STATE_PROVISIONED # Get ip address from instance nics = [] for ip in self.__get_instance_private_ips( instance.id, instance.compartment_id): nics.append( Nic(ip=ip, boot=True) ) node.nics = nics node_spec['db_session'].commit() self.instanceCacheSet( node.name, { 'id': instance.id, 'compartment_id': instance.id, 'shape': node_spec['configDict']['shape'], 'vcpus': str(node_spec['configDict']['shape'].split('.')[-1]) } ) ip = [nic for nic in node.nics if nic.boot][0].ip self._pre_add_host( node.name, node.hardwareprofile.name, node.softwareprofile.name, ip) self.getLogger().debug( '_instance_post_launch(): node=[%s]' % ( node) ) self.fire_provisioned_event(node) return node def __get_instance_public_ips(self, instance_id, compartment_id): """ Get public IP from the attached VNICs. :param instance_id: String instance id :param compartment_id: String compartment id :return: Generator String IPs """ for vnic in self.__get_vnics_for_instance(instance_id, compartment_id): attached_vnic = self.__net_client.get_vnic(vnic.vnic_id) if attached_vnic: yield attached_vnic.data.public_ip def __get_instance_private_ips(self, instance_id, compartment_id): """ Get private IP from the attached VNICs. :param instance_id: String instance id :param compartment_id: String compartment id :return: Generator String IPs """ for vnic in self.__get_vnics_for_instance(instance_id, compartment_id): attached_vnic = self.__net_client.get_vnic(vnic.vnic_id) if attached_vnic: yield attached_vnic.data.private_ip def __get_vnics_for_instance(self, instance_id, compartment_id): """ Get all VNICs attached to instance. :param instance_id: String instance id :param compartment_id: String compartment id :return: Generator VNIC objects """ for vnic in self.__get_vnics(compartment_id): if vnic.instance_id == instance_id \ and vnic.lifecycle_state == 'ATTACHED': yield vnic def __get_vnics(self, compartment_id): """ Get VNICs in compartment. :param compartment_id: String id :return: List VNIC objects """ vnics = self.__client.list_vnic_attachments(compartment_id) return vnics.data def __get_common_user_data_settings(self, config, node=None): """ Format resource adapters for the bootstrap template. :param config: Dictionary :param node: Node instance :return: Dictionary """ installer_ip = self.__get_installer_ip( hardwareprofile=node.hardwareprofile if node else None) settings_dict = { 'installerHostName': self.installer_public_hostname, 'installerIp': '\'{0}\''.format(installer_ip) if installer_ip else 'None', 'adminport': self._cm.getAdminPort(), 'cfmuser': self._cm.getCfmUser(), 'cfmpassword': self._cm.getCfmPassword(), 'override_dns_domain': str(config['override_dns_domain']), 'dns_options': '\'{0}\''.format(config['dns_options']) if config['dns_options'] else None, 'dns_search': '\'{0}\''.format(config['dns_search']) if config['dns_search'] else None, 'dns_nameservers': self.__get_encoded_list( config['dns_nameservers']), } return settings_dict def __get_common_user_data_content(self, settings_dict): """ Create header for bootstrap file. :param settings_dict: Dictionary :return: String """ result = """\ installerHostName = '%(installerHostName)s' installerIpAddress = %(installerIp)s port = %(adminport)d cfmUser = '******' cfmPassword = '******' # DNS resolution settings override_dns_domain = %(override_dns_domain)s dns_options = %(dns_options)s dns_search = %(dns_search)s dns_nameservers = %(dns_nameservers)s """ % settings_dict return result def __get_user_data(self, config, node=None): """ Compile the cloud-init script from bootstrap template and encode into base64. :param config: Dictionary :param node: Node instance :return: String """ self.getLogger().info( 'Using cloud-init script template [%s]' % ( config['user_data_script_template'])) settings_dict = self.__get_common_user_data_settings(config, node) with open(config['user_data_script_template']) as fp: result = '' for line in fp.readlines(): if line.startswith('### SETTINGS'): result += self.__get_common_user_data_content( settings_dict) else: result += line combined_message = MIMEMultipart() if node and not config['use_instance_hostname']: # Use cloud-init to set fully-qualified domain name of instance cloud_init = """#cloud-config fqdn: %s """ % node.name sub_message = MIMEText( cloud_init, 'text/cloud-config', sys.getdefaultencoding()) filename = 'user-data.txt' sub_message.add_header( 'Content-Disposition', 'attachment; filename="%s"' % filename) combined_message.attach(sub_message) sub_message = MIMEText( result, 'text/x-shellscript', sys.getdefaultencoding()) filename = 'bootstrap.py' sub_message.add_header( 'Content-Disposition', 'attachment; filename="%s"' % filename) combined_message.attach(sub_message) return b64encode(str(combined_message).encode()).deocde() # Fallback to default behaviour return b64encode(result.encode()).decode() def deleteNode(self, dbNodes): """ Delete a node from the infrastructure. :param dbNodes: List Nodes object :return: None """ self._async_delete_nodes(dbNodes) self.getLogger().info( '%d node(s) deleted' % ( len(dbNodes)) ) def _wait_for_instance_state(self, instance_ocid, state, callback=None, timeout=None): """ Wait for instance to reach state :param instance_ocid: Instance OCID :param state: Expected state of instance :param timeout: (optional) operation timeout :return: None """ # TODO: implement timeout for nRetries in itertools.count(0): instance = self.__client.get_instance(instance_ocid) if instance.data.lifecycle_state == state: break if callback: # Only call the callback if the requested state hasn't yet been # reached callback(instance_ocid, instance.data.lifecycle_state) gevent.sleep(get_random_sleep_time(retries=nRetries) / 1000.0) def _delete_node(self, node): # TODO: add error handling; if the instance termination request # fails, we shouldn't be removing the node from the system try: instance_cache = self.instanceCacheGet(node.name) # TODO: what happens when you attempt to terminate an already # terminated instance? Exception? instance = self.__client.get_instance(instance_cache['id']) log_adapter = CustomAdapter( self.getLogger(), {'instance_ocid': instance_cache['id']}) # Issue terminate request log_adapter.debug('Terminating...') self.__client.terminate_instance(instance.data.id) # Wait 3 seconds before checking state gevent.sleep(3) # Wait until state is 'TERMINATED' self._wait_for_instance_state(instance_cache['id'], 'TERMINATED') # Clean up the instance cache. self.instanceCacheDelete(node.name) except ResourceNotFound: pass # Remove Puppet certificate bhm = osUtility.getOsObjectFactory().getOsBootHostManager() bhm.deleteNodeCleanup(node) def __get_installer_ip(self, hardwareprofile=None): """ Get IP address of the installer node. :param hardwareprofile: Object :return: String ip address """ if self.__installer_ip is None: if hardwareprofile and hardwareprofile.nics: self.__installer_ip = hardwareprofile.nics[0].ip else: self.__installer_ip = self.installer_public_ipaddress return self.__installer_ip @staticmethod def __get_encoded_list(items): """ Return Python list encoded in a string. :param items: List :return: String """ return '[' + ', '.join(['\'%s\'' % item for item in items]) + ']' \ if items else '[]'
GROUP_PREEMPTIBLE = { 'group': 'Preemptible', 'group_order': 4 } GROUP_COST = { 'group': 'Cost Sync', 'group_order': 9 } SETTINGS = { # # Instances # 'project': settings.StringSetting( display_name='Project', required=True, description='Name of Google Compute Engine project', **GROUP_INSTANCES ), 'zone': settings.StringSetting( display_name='Zone', required=True, description='Zone in which compute resources are created', **GROUP_INSTANCES ), 'type': settings.StringSetting( display_name='Type', required=True, description='Virtual machine type; ror example, "n1-standard-1"', **GROUP_INSTANCES ), 'image': settings.StringSetting(
SETTINGS = { # # Instances # 'tags': settings.TagListSetting( display_name='Tags', description='A comma-separated list of tags in the form of ' 'key=value', key_validation_regex='[^<>%&\\?/]{0,512}', value_validation_regex='.{0,256}', **GROUP_INSTANCES), 'security_group': settings.StringSetting( display_name='Security Group', description='Azure security group to associate with created ' 'virtual machines', required=True, **GROUP_INSTANCES), 'resource_group': settings.StringSetting( display_name='Resource Group', required=True, description='Azure resource group where Tortuga will create ' 'virtual machines', **GROUP_INSTANCES), 'storage_account': settings.StringSetting( display_name='Storage Account', required=True, description='Azure storage account where virtual disks for ' 'Tortuga-managed nodes will be created',
# # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import shlex from typing import List from tortuga.resourceAdapterConfiguration import settings AZURE_SETTINGS_DICT = { 'subscription_id': settings.StringSetting( required=True, description='Azure subscription ID; obtainable from azure CLI or ' 'Management Portal'), 'client_id': settings.StringSetting( required=True, description='Azure client ID; obtainable from azure CLI or ' 'Management Portal'), 'tenant_id': settings.StringSetting( required=True, description='Azure tenant ID; obtainable from azure CLI or ' 'Management Portal'), 'secret': settings.StringSetting( required=True, description='Azure client secret; obtainable from azure CLI or '
class Dummyadapter(ResourceAdapter): __adaptername__ = 'dummy' settings = { 'state': settings.StringSetting( description='The final state of the node after creation', default=state.NODE_STATE_INSTALLED, values=[ state.NODE_STATE_CREATED, state.NODE_STATE_PROVISIONED, state.NODE_STATE_INSTALLED ] ), } # # The various states that a node will transition through, in order # STATE_TRANSITIONS = [ state.NODE_STATE_CREATED, state.NODE_STATE_PROVISIONED, state.NODE_STATE_INSTALLED ] def start(self, addNodesRequest: dict, dbSession: Session, dbHardwareProfile: HardwareProfile, dbSoftwareProfile: Optional[SoftwareProfile] = None): """ Create nodes """ # # Load resource adapter settings # config = self.getResourceAdapterConfig( sectionName=addNodesRequest.get( 'resource_adapter_configuration', 'default') ) nodes = [] for _ in range(addNodesRequest['count']): random_host_name_suffix = get_random_host_name_suffix() node = Node(name='compute-{}'.format(random_host_name_suffix)) node.softwareprofile = dbSoftwareProfile node.hardwareprofile = dbHardwareProfile node.isIdle = False node.state = self.STATE_TRANSITIONS[0] # create dummy nic nic = Nic(boot=True, ip=generate_fake_ip()) node.nics.append(nic) self._simulate_state_changes( node, config.get('state', self.settings['state'].default) ) nodes.append(node) return nodes def _simulate_state_changes(self, node: Node, final_state: str): """ Simulate the node transitioning through multiple states by firing state change events. :param Node node: the node to transition through the state changes :param str final_state: the final state the node must reach in the simulation """ initial_state_found = False for state in self.STATE_TRANSITIONS: # # Find the current node state in the list of transitions # if not initial_state_found: if state == node.state: initial_state_found = True continue # # Fire a state change event to get to the next state # previous_state = node.state node.state = state self.fire_state_change_event(node, previous_state) # # If this is the final state, exit the loop # if state == final_state: break