def find_best_available_network(self, server_opts): """ High level API to be able to locate the best network available ips for a as server to be attached to. It updates the command options with the network :param server_opts: dictionary of server options :return: dictionary of server options """ if self.config_params.get('best_available_network', False) == 'True': self.logger.info("Looking for best available network.") nets = dict() net_filter = self.config_params.get('best_available_network_filter', []) if net_filter: net_filter = [net for net in net_filter.replace(' ', '').split(',')] count = self.config_params.get('best_available_network_count', None) ip_opts = dict(ip_version=str(4)) if 'best_available_network_ip_version' not in self.config_params \ else dict(ip_version=self.config_params.get('best_available_network_ip_version', {})) results = self.osp_client.network.ip_availability_list(ip_opts) if results['rc'] != 0: self.logger.error('Command did not execute successfully') self.logger.error(results['stderr']) raise TefloProvisionerError('There was an error trying to check the available ips of network.') networks = results['stdout'] if filter: networks = [net for net in networks for fil in net_filter if net['Network Name'] == fil] for net in networks: nets[net.get('Network Name')] = net.get('Total IPs') - net.get('Used IPs') nets = sorted(nets.items(), key=lambda x: x[1], reverse=True) if count and count > len(nets): self.logger.error("The count value is higher than the available networks.") raise TefloProvisionerError('Failed to find the available list of networks.') elif count: if 'network' in server_opts: server_opts.get('network').extend([nets[i][0] for i in range(0, count)]) else: server_opts.update(dict(network= [nets[i][0] for i in range(0, count)] ) ) else: self.logger.info("Found network %s with the most ips." % nets[0][0]) if 'network' in server_opts: nets = list(set([nets[0][0]]).union(server_opts.get('network'))) server_opts.update(dict(network=nets)) else: server_opts.update(dict(network=[nets[0][0]])) return server_opts
def _wait_for_active_state(self, *args, **kwargs): """ This implements the low-level status checking for Server resources. :param args: a tuple of arguments :param kwargs: a dictionary of arguments :return: a dictionary of the response status of the asset """ cmd = args[0][0] name = args[1] status = 0 attempt = 1 cmd = '%s_show' % cmd cg = "" for cg, cs in getattr(self.osp_client, 'all_osp_cmd_groups').items(): for c in cs: if cmd == c: while attempt <= 30: results = getattr(getattr(self.osp_client, cg), cmd)(dict(res=name)) if results['rc'] != 0: self.logger.error(results['stderr']) raise TefloProvisionerError('Waiting for asset status failed.') results = results['stdout'] msg = '%s. ASSET %s, STATE=%s' % (attempt, results.get('name'), results.get('status')) if results.get('status').lower() == 'error': self.logger.info(msg) self.logger.error('Asset %s got an into an error state!' % results.get('name')) break elif results.get('status').lower() == 'active': self.logger.info(msg) self.logger.info('Asset %s successfully finished building!' % results.get('name')) status = 1 break else: self.logger.info('%s, rechecking in 20 seconds.', msg) time.sleep(20) if status: self.logger.info('Node %s successfully finished building.' % results.get('name')) return results else: raise TefloProvisionerError('Node was unable to build, %s' % results.get('name'))
def concurrent_cmd_execute(func, *args, **kwargs): """ provides the ability to execute a function concurrently using threads :param func: the function to execute :param args: args to pass to function :param kwargs: kwargs to pass to function :return: list """ resp = [] future_to_inst = {} # We can use a with statement to ensure threads are cleaned up promptly with ThreadPoolExecutor(max_workers=5) as executor: # Start the load operations and mark each future with it's instance name if len(args) == 3: # Assume we are running a function like run_action that takes 3 arguments future_to_inst = { executor.submit(func, args[0], args[1], name): name for name in args[-1] } if len(args) == 2: # Assume it's a function that only takes two arguments future_to_inst = { executor.submit(func, args[0], name): name for name in args[-1] } for future in concurrent.futures.as_completed(future_to_inst): inst = future_to_inst[future] try: data = future.result() resp.append(data) except Exception as exc: raise TefloProvisionerError(exc) return resp
def delete(self): """Delete method. (must implement!) Teardown the host supplied. """ host = getattr(self.asset, 'name') self._create_pinfile() self._load_credentials() try: txid = self.provider_params.get('tx_id') except KeyError: txid = None self.logger.warning( 'No tx_id found for Asset: %s, this could mean it was not successfully' ' provisioned. Attempting to perform the destroy without a tx_id' ' but this might not work, so you may need to manually cleanup resources.' % host) self.logger.info('Delete Asset %s.' % host) rc, results = self._lp_action(txid=txid) if rc > 0: raise TefloProvisionerError( 'Failed to destroy the asset %s with return code %s.' ' Refer to the Linchpin stdout for more information.' % (host, rc)) # Run post hooks if any self.linchpin_api.run_hooks('post', 'destroy') self.logger.info('Linchpin successfully deleted asset %s.' % host)
def _create(self): host = getattr(self.asset, 'name') code, results = self._lp_action() if code: raise TefloProvisionerError("Failed to provision asset %s" % host) try: if self.provider_params.get('count', 1) > 1: self.logger.info('Successfully created %s host resources' % self.provider_params.get('count')) else: self.logger.info('Successfully created asset %s' % host) except KeyError: self.logger.info('Successfully created asset %s' % host) tx_id = list(results)[0] results = self.linchpin_api.get_run_data(tx_id, ('cfgs', 'inputs', 'outputs')) self.logger.debug(json.dumps(results, indent=2)) self._create_inventory(results) # Run post hooks once the inventory has been generated self.linchpin_api.run_hooks('post', 'up') resources = [ res for target in results for res in results.get(target).get('outputs').get('resources') ] if self._create_inv: return LinchpinResponseBuilder.generate_teflo_response( tx_id=tx_id, lp_res_resource=[], asset=self.asset) else: return LinchpinResponseBuilder.generate_teflo_response( tx_id=tx_id, lp_res_resource=resources, asset=self.asset)
def _create(self, cp_opts, name): """ Implements the actually create/add/set logic to the ospclientsdk :param cp_opts: tuple of command and dictionary options :param name: string anme of the resource :return: dictionary information about the asset from the provider """ cmd = cp_opts[0] opts = cp_opts[1] if not self.osp_client.is_valid_command(cmd): cmd = '%s_create' % cmd if not self.osp_client.is_valid_command(cmd): raise TefloProvisionerError('%s is not a valid command supported by this provisioner.' % cmd) if cmd == 'server_create': opts = self.find_best_available_network(opts) if not opts.get('res', False): opts.update(res=name) try: for cg, cs in getattr(self.osp_client, 'all_osp_cmd_groups').items(): for c in cs: if cmd == c: results = getattr(getattr(self.osp_client, cg), cmd)(opts) break except AttributeError as ex: results = self.osp_client.run_command(cmd, opts) return results
def validate(self): # Validate commands with the osp client api for cmd in self.provider_params: if cmd.find('add') != -1 or cmd.find('set') != -1: if not self.osp_client.is_valid_command(cmd): raise TefloProvisionerError('There is no command group that supports this command: %s.' % cmd) continue if not self.osp_client.is_valid_command("%s_create" % cmd) \ or not self.osp_client.is_valid_command("%s_delete" % cmd): raise TefloProvisionerError('There is no command group that supports this command: %s.' % cmd) continue # validate teflo plugin schema schema_validator(schema_data=self.build_profile(self.asset), schema_files=[self.__schema_file_path__], schema_ext_files=[self.__schema_ext_path__])
def validate(self): # validate teflo plugin schema first schema_validator(schema_data=self.build_profile(self.asset), schema_files=[self.__schema_file_path__], schema_ext_files=[self.__schema_ext_path__]) # validate linchpin pinfile self._create_pinfile() code, results = self._lp_action(validate=True) self.logger.info(code) self.logger.info(results) if code != 0: self.logger.error('linchpin topology rc: %s' % code) self.logger.error(results) raise TefloProvisionerError('Linchpin failed to validate pinfile.')
def _run_action(self, *args, **kwargs): """ Runs the action desired and provides the retry logic when an action fails :param args: a tuple of arguments :param kwargs: a dictionary of arguments :return: a dictionary of resource information from the provider """ attempts = 1 action = args[0] cp_opts = args[1] name = args[2] retries = int(self.config_params.get('retry_attempts', 0)) wait_time = int(self.config_params.get('retry_wait', 0)) results = getattr(self, action)(cp_opts, name) cleaned_up = False if results['rc'] != 0: if retries > 0 and wait_time > 0: # let's a attempt a cleanup before we do the retries just in case the # failure is the type that leaves a partially provisioned resource. # Perfect example is when an instance is being provisioned but ends up in # an error state because ips on the network have been exhausted. if action == '_create' and not cleaned_up: rs = self._delete(cp_opts, name) if rs['rc'] == 0: self.logger.info("Successfully cleaned up failed attempt.") else: self.logger.warning("Attempted cleanup up failed. Resource might not exist.") cleaned_up = True while attempts <= retries: self.logger.error(results['stderr']) self.logger.warning("Failed but retrying in %s seconds." % wait_time) time.sleep(wait_time) results = getattr(self, action)(cp_opts, name) if results and results['rc'] != 0: self.logger.warning("Failed retry attempt %s." % attempts) attempts += 1 continue return results self.logger.error('Command did not execute successfully') raise TefloProvisionerError(results['stderr']) return results
def _load_credentials(self): if self.provider_credentials and [ i for i in self.get_schema_keys() if hasattr(self.asset, i) and i in ['pinfile', 'topology'] ]: self.logger.error( 'Trying to use Teflo credentials mapping with Linchpin pinfile/topology file.' ) raise TefloProvisionerError('Incompatible credential mechanism.') if self.provider_credentials and \ 'resource_group_type' in self.provider_params and not \ [i for i in self.get_schema_keys() if hasattr(self.asset, i) and i in ['pinfile', 'topology']] or \ hasattr(self.asset, 'provider'): if self.provider_params.get('resource_group_type', False) == 'openstack' or \ self.provider_params.get('name') == 'openstack': # Linchpin supports Openstack environment variables # https://linchpin.readthedocs.io/en/latest/openstack.html#credentials-management # It is better to keep the credentials in memory # This is also reduce complexity by not calling openstack directly environ['OS_USERNAME'] = self.provider_credentials['username'] environ['OS_PASSWORD'] = self.provider_credentials['password'] environ['OS_AUTH_URL'] = self.provider_credentials['auth_url'] environ['OS_PROJECT_NAME'] = self.provider_credentials[ 'tenant_name'] if 'domain_name' in self.provider_credentials: environ['OS_DOMAIN_NAME'] = self.provider_credentials[ 'domain_name'] elif self.provider_params.get('resource_group_type', False) == 'beaker' or \ self.provider_params.get('name') == 'beaker': bkr_conf = path.join(path.abspath(self.data_folder), 'beaker.conf') environ['BEAKER_CONF'] = bkr_conf creds = self.provider_credentials with open(bkr_conf, 'w') as conf: if 'hub_url' in self.provider_credentials: conf.write('HUB_URL = "%s"\n' % creds['hub_url']) if 'ca_cert' in self.provider_credentials: conf.write('CA_CERT = "%s"\n' % creds['ca_cert']) if 'password' in self.provider_credentials: conf.write('AUTH_METHOD = "password"\n') conf.write('USERNAME = "******"\n' % creds['username']) conf.write('PASSWORD = "******"\n' % creds['password']) elif 'keytab' in self.provider_credentials: conf.write('AUTH_METHOD = "krbv"\n') conf.write('KRB_PRINCIPAL = "%s"\n' % creds['keytab_principal']) conf.write('KRB_KEYTAB = "%s"\n' % creds['keytab']) if 'realm' in self.provider_credentials: conf.write('KRB_REALM = "%s"\n' % creds['realm']) if 'service' in self.provider_credentials: conf.write('KRB_SERVICE = "%s"\n' % creds['service']) if 'ccache' in self.provider_credentials: conf.write('KRB_CCACHE = "%s"\n' % creds['ccache']) elif self.provider_params.get('resource_group_type', False) == 'libvirt': creds = self.provider_credentials if not path.exists(path.expanduser('~/.config/libvirt')): os.makedirs(path.expanduser('~/.config/libvirt')) libvirt_auth = path.join(path.expanduser('~/.config/libvirt'), 'auth.conf') environ['LIBVIRT_AUTH_FILE'] = libvirt_auth if path.exists(libvirt_auth): os.remove(libvirt_auth) config = ConfigParser() config.add_section('credentials-teflo') config.set('credentials-teflo', 'username', creds['username']) config.set('credentials-teflo', 'password', creds['password']) with open(libvirt_auth, 'w') as cfg: config.write(cfg) elif self.provider_params.get('resource_group_type', False) == 'aws': creds = self.provider_credentials if not path.exists(path.expanduser('~/.aws/')): os.makedirs(path.expanduser('~/.aws/')) aws_auth = path.join(path.expanduser('~/.aws/'), 'credentials') environ['AWS_PROFILE'] = 'Credentials' if path.exists(aws_auth): os.remove(aws_auth) config = ConfigParser() config.add_section('Credentials') for k, v in creds.items(): config.set('Credentials', k, v) with open(aws_auth, 'w') as cfg: config.write(cfg) elif self.provider_params.get('resource_group_type', False) == 'gcloud': if not self.provider_credentials.get('service_account_email', False) or \ not self.provider_credentials.get('credentials_file', False) or \ not self.provider_credentials.get('project_id', False): self.logger.error( 'Missing one or more Gcloud credential parameters.') raise TefloProvisionerError( 'Missing required credential parameters') environ['GCE_EMAIL'] = self.provider_credentials[ 'service_account_email'] environ[ 'GOOGLE_APPLICATION_CREDENTIALS'] = self.provider_credentials[ 'credentials_file'] environ['GOOGLE_CLOUD_PROJECT'] = self.provider_credentials[ 'project_id'] elif self.provider_params.get('resource_group_type', False) == 'azure': if not self.provider_credentials.get('subscription_id', False) or \ not self.provider_credentials.get('tenant', False): self.logger.error( 'Missing one or more Azure credential parameter - subscription_id, tenant' ) raise TefloProvisionerError( 'Missing required credential parameters') if self.provider_credentials.get('client_id', False) and \ self.provider_credentials.get('ad_user', False): self.logger.error('Found both client_id and ad_user') raise TefloProvisionerError( 'Found conflicting credential parameters.') if self.provider_credentials.get('secret', False) and \ self.provider_credentials.get('ad_password', False): self.logger.error('Found both secret and ad_password') raise TefloProvisionerError( 'Found conflicting credential parameters.') if self.provider_credentials.get('subscription_id', False): environ[ 'AZURE_SUBSCRIPTION_ID'] = self.provider_credentials[ 'subscription_id'] if self.provider_credentials.get('subscription_id', False): environ['AZURE_TENANT'] = self.provider_credentials[ 'tenant'] if self.provider_credentials.get('client_id', False): environ['AZURE_CLIENT_ID'] = self.provider_credentials[ 'client_id'] if self.provider_credentials.get('secret', False): environ['AZURE_SECRET'] = self.provider_credentials[ 'secret'] if self.provider_credentials.get('ad_user', False): environ['AZURE_AD_USER'] = self.provider_credentials[ 'ad_user'] if self.provider_credentials.get('password', False): environ['AZURE_PASSWORD'] = self.provider_credentials[ 'password'] elif self.provider_params.get('resource_group_type', False) == 'vmware': if not self.provider_credentials.get('hostname', False) or \ not self.provider_credentials.get('username', False) or \ not self.provider_credentials.get('password', False): self.logger.error( 'Missing one or more VMware credential parameter - hostname, username, password' ) raise TefloProvisionerError( 'Missing required credential parameters') environ['VMWARE_HOST'] = self.provider_credentials['hostname'] environ['VMWARE_USER'] = self.provider_credentials['username'] environ['VMWARE_PASSWORD'] = self.provider_credentials[ 'password'] if self.provider_credentials.get('port', False): environ['VMWARE_PORT'] = self.provider_credentials['port'] if self.provider_credentials.get('validate_certs', False): environ[ 'VMWARE_VALIDATE_CERTS'] = self.provider_credentials[ 'validate_certs'] elif self.provider_params.get('resource_group_type', False) == 'ovirt': if not self.provider_credentials.get('ovirt_url', False) or \ not self.provider_credentials.get('ovirt_username', False) or \ not self.provider_credentials.get('ovirt_password', False): self.logger.error( 'Missing one or more oVirt credential parameter - ovirt_url, ' 'ovirt_username, ovirt_password') raise TefloProvisionerError( 'Missing required credential parameters') environ['OVIRT_URL'] = self.provider_credentials['ovirt_url'] environ['OVIRT_USERNAME'] = self.provider_credentials[ 'ovirt_username'] environ['OVIRT_PASSWORD'] = self.provider_credentials[ 'ovirt_password'] if self.provider_credentials.get('ovirt_ca_file', False): environ['OVIRT_CA_FILE'] = self.provider_credentials[ 'ovirt_ca_file'] elif self.provider_params.get('resource_group_type', False) == 'openshift': if not self.provider_credentials.get('api_url', False): self.logger.error( 'Missing one or more OpenShift credential parameter - api_url' ) raise TefloProvisionerError( 'Missing required credential parameters') environ['K8S_AUTH_HOST'] = self.provider_credentials['api_url'] if self.provider_credentials.get('api_token', False): environ['K8S_AUTH_API_KEY'] = self.provider_credentials[ 'api_token'] if self.provider_credentials.get('kubeconfig', False): environ['K8S_AUTH_KUB_CONFIG'] = self.provider_credentials[ 'kubeconfig'] if self.provider_credentials.get('context', False): environ['K8S_AUTH_CONTEXT'] = self.provider_credentials[ 'context'] if self.provider_credentials.get('cert_file', False): environ['K8S_AUTH_CERT_FILE'] = self.provider_credentials[ 'cert_file'] if self.provider_credentials.get('key_file', False): environ['K8S_AUTH_KEY_FILE'] = self.provider_credentials[ 'key_file'] if self.provider_credentials.get('ssl_ca_cert', False): environ[ 'K8S_AUTH_SSL_CA_CERT'] = self.provider_credentials[ 'ssl_ca_cert'] if self.provider_credentials.get('verify_ssl', False): environ['K8S_AUTH_VERIFY_SSL'] = self.provider_credentials[ 'verify_ssl'] if self.provider_credentials.get('username', False): environ['K8S_AUTH_USERNAME'] = self.provider_credentials[ 'username'] if self.provider_credentials.get('password', False): environ['K8S_AUTH_PASSWORD'] = self.provider_credentials[ 'password'] else: self.logger.warning( 'No teflo credential is being used. Assuming using provisioner specific method. ' )
def test_teflo_provisioner_error(): with pytest.raises(TefloProvisionerError): raise TefloProvisionerError('error message')