def delete_container(self, container): """ :param container: (str) :raises ClientError: 404 Container does not exist :raises ClientError: 409 Container not empty """ self._assert_account() path = path4url(self.account, container) r = self.delete(path, success=(204, 404, 409)) if r.status_code == 404: raise ClientError("Container does not exist", r.status_code) elif r.status_code == 409: raise ClientError("Container is not empty", r.status_code)
def get_flavor_id(token): """From kamaki flavor list get all possible flavors """ auth = check_credentials(token) endpoints, user_id = endpoints_and_user_id(auth) cyclades = init_cyclades(endpoints['cyclades'], token) try: flavor_list = cyclades.list_flavors(True) except ClientError: msg = ' Could not get list of flavors' raise ClientError(msg, error_flavor_list) cpu_list = [] ram_list = [] disk_list = [] disk_template_list = [] for flavor in flavor_list: if flavor['SNF:allow_create']: if flavor['vcpus'] not in cpu_list: cpu_list.append(flavor['vcpus']) if flavor['ram'] not in ram_list: ram_list.append(flavor['ram']) if flavor['disk'] not in disk_list: disk_list.append(flavor['disk']) if flavor['SNF:disk_template'] not in disk_template_list: disk_template_list.append(flavor['SNF:disk_template']) cpu_list = sorted(cpu_list) ram_list = sorted(ram_list) disk_list = sorted(disk_list) flavors = { 'cpus': cpu_list, 'ram': ram_list, 'disk': disk_list, 'disk_template': disk_template_list } return flavors
def get_user_quota(auth): """Return user quota""" try: return auth.get_quotas() except ClientError: msg = ' Could not get user quota' raise ClientError(msg, error_user_quota)
def destroy_server(token, id): """Destroys a VRE server in ~okeanos .""" current_task.update_state(state="Started") vre_server = VreServer.objects.get(id=id) auth = check_credentials(token) current_task.update_state(state="Authenticated") set_server_state(token, id, 'Deleting VRE server and its public IP') endpoints, user_id = endpoints_and_user_id(auth) cyclades = init_cyclades(endpoints['cyclades'], token) nc = init_cyclades_netclient(endpoints['network'], token) cyclades.delete_server(vre_server.server_id) new_status = cyclades.wait_server(vre_server.server_id, current_status='ACTIVE', max_wait=MAX_WAIT) if new_status != 'DELETED': state = 'Error while deleting VRE server' set_server_state(token, id, state, status='Destroyed') raise ClientError('Error while deleting VRE server', error_fatal) ip_to_delete = get_public_ip_id(nc, vre_server.server_IP) nc.delete_floatingip(ip_to_delete['id']) state = 'VRE server {0} and its public IP {1} were deleted'.format( vre_server.server_name, vre_server.server_IP) set_server_state(token, id, state, status='Destroyed') return vre_server.server_name
def check_network_quotas(self): """ Checks if the user quota is enough to create a new private network Subtracts the number of networks used and pending from the max allowed number of networks """ dict_quotas = get_user_quota(self.auth) pending_net = self.pending_quota['Network'] limit_net = dict_quotas[ self.project_id]['cyclades.network.private']['limit'] usage_net = dict_quotas[ self.project_id]['cyclades.network.private']['usage'] project_limit_net = dict_quotas[ self.project_id]['cyclades.network.private']['project_limit'] project_usage_net = dict_quotas[ self.project_id]['cyclades.network.private']['project_usage'] available_networks = limit_net - usage_net if (available_networks > (project_limit_net - project_usage_net)): available_networks = project_limit_net - project_usage_net available_networks -= pending_net if available_networks >= 1: logging.log(REPORT, ' Private Network quota is ok') return 0 else: msg = 'Private Network quota exceeded in project: ' + self.opts[ 'project_name'] raise ClientError(msg, error_quotas_network)
def get_project_id(token, project_name): """ Return the id of an active ~okeanos project. """ auth = check_credentials(token) dict_quotas = auth.get_quotas() try: list_of_projects = auth.get_projects(state='active') except ClientError: msg = ' Could not get list of active projects' raise ClientError(msg, error_get_list_projects) for project in list_of_projects: if project['name'] == project_name and project['id'] in dict_quotas: return project['id'] msg = ' No project id was found for ' + project_name raise ClientError(msg, error_proj_id)
def check_ip_quotas(self): """Checks user's quota for unattached public ips.""" dict_quotas = get_user_quota(self.auth) list_float_ips = self.net_client.list_floatingips() pending_ips = self.pending_quota['Ip'] limit_ips = dict_quotas[ self.project_id]['cyclades.floating_ip']['limit'] usage_ips = dict_quotas[ self.project_id]['cyclades.floating_ip']['usage'] project_limit_ips = dict_quotas[ self.project_id]['cyclades.floating_ip']['project_limit'] project_usage_ips = dict_quotas[ self.project_id]['cyclades.floating_ip']['project_usage'] available_ips = limit_ips - usage_ips if (available_ips > (project_limit_ips - project_usage_ips)): available_ips = project_limit_ips - project_usage_ips available_ips -= pending_ips for d in list_float_ips: if d['instance_id'] is None and d['port_id'] is None: available_ips += 1 if available_ips > 0: logging.log(REPORT, ' Floating IP quota is ok') return 0 else: msg = 'Floating IP not available in project: ' + self.opts[ 'project_name'] raise ClientError(msg, error_get_ip)
def project_list_flavor_quota(user): """Creates the list of resources for every project a user has quota""" okeanos_token = user.okeanos_token list_of_resources = list() flavors = get_flavor_id(okeanos_token) auth = check_credentials(okeanos_token) ssh_info = ssh_key_list(okeanos_token) ssh_keys_names = list() dict_quotas = auth.get_quotas() try: list_of_projects = auth.get_projects(state='active') except ClientError: msg = ' Could not get list of projects' raise ClientError(msg, error_get_list_projects) # Id for ember-data, will use it for store.push the different projects ember_project_id = 1 ssh_info = ssh_key_list(okeanos_token) for item in ssh_info: if item.has_key('name'): ssh_keys_names.append(item['name']) for project in list_of_projects: if project['name'] == 'system:' + str(project['id']): list_of_projects.remove(project) list_of_projects.insert(0, project) for project in list_of_projects: if project['id'] in dict_quotas: quotas = check_quota(okeanos_token, project['id']) images = check_images(okeanos_token, project['id']) list_of_resources.append( retrieve_ClusterCreationParams(flavors, quotas, images, project['name'], user, ember_project_id, ssh_keys_names)) ember_project_id = ember_project_id + 1 return list_of_resources
def test___init__(self): from kamaki.clients import ClientError for msg, status, details, exp_msg, exp_status, exp_details in ( ('some msg', 42, 0.28, 0, 0, 0), ('some msg', 'fail', [], 0, 0, 0), ('some msg', 42, 'details on error', 0, 0, 0), ('404 {"ExampleError":' ' {"message": "a msg", "code": 42, "details": "dets"}}', 404, 0, '404 ExampleError (a msg)\n', 42, ['dets']), ('404 {"ExampleError":' ' {"message": "a msg", "code": 42}}', 404, 'details on error', '404 ExampleError (a msg)\n', 42, 0), ('404 {"ExampleError":' ' {"details": "Explain your error"}}', 404, 'details on error', '404 ExampleError', 0, ['details on error', 'Explain your error' ]), ('some msg\n', -10, ['details', 'on', 'error'], 0, 0, 0)): ce = ClientError(msg, status, details) exp_msg = exp_msg or (msg if msg.endswith('\n') else msg + '\n') exp_status = exp_status or status exp_details = exp_details or details self.assertEqual('%s' % ce, exp_msg) self.assertEqual(exp_status if isinstance(exp_status, int) else 0, ce.status) self.assertEqual(exp_details, ce.details)
def test_enroll_to_project(self, client): method = 'astavoms.identity.IdentityClient.enroll_to_project' snf_admin = identity.IdentityClient(None, None) email = '*****@*****.**' project = 's0me-proj3ct-1d' with mock.patch(method) as enroll: server.enroll_to_project(snf_admin, email, project) enroll.assert_called_once_with(email, project) with mock.patch(method, side_effect=ClientError('err', 409)) as enroll: server.enroll_to_project(snf_admin, email, project) # User already enrolled, fail but supress the error enroll.assert_called_once_with(email, project) with mock.patch(method, side_effect=ClientError('err', 404)): with self.assertRaises(ClientError): server.enroll_to_project(snf_admin, email, project)
def get_from_kamaki_conf(section, option, action=None): """ Process option 'option' from section 'section' from .kamakirc file applying optional 'action' to it and return it """ parser = RawConfigParser() user_home = expanduser('~') config_file = join(user_home, ".kamakirc") parser.read(config_file) try: option_value = parser.get(section, option) except NoSectionError: msg = ' Could not find section \'{0}\' in .kamakirc'.format(section) raise ClientError(msg, error_syntax_auth_token) except NoOptionError: msg = ' Could not find option \'{0}\' in section \'{1}\' in .kamakirc'.format( option, section) raise ClientError(msg, error_syntax_auth_token) if option_value: if not action: return option_value else: if action == 'login': url_login = '******'.format(option_value, login_endpoint) return url_login if action == 'cluster': url_cluster = '{0}{1}'.format(option_value, cluster_endpoint) return url_cluster if action == 'job': url_job = '{0}{1}'.format(option_value, job_endpoint) return url_job if action == 'hdfs': url_hdfs = '{0}{1}'.format(option_value, hdfs_endpoint) return url_hdfs if action == 'vre': url_vre = '{0}{1}'.format(option_value, vre_endpoint) return url_vre else: logging.log( SUMMARY, ' Url to be returned from .kamakirc not specified') return 0
def get_user_id(token): """Check kamaki and returns user uuid from matching ~okeanos token""" auth = AstakosClient(auth_url, token) try: logging.info(' Get the uuid') uuid = auth.user_info['id'] return uuid except ClientError: msg = 'Failed to get uuid from identity server' raise ClientError(msg)
def init_plankton(endpoint, token): """ Plankton/Initialize Imageclient. ImageClient has all registered images. """ logging.log(REPORT, ' Initialize ImageClient') try: return ImageClient(endpoint, token) except ClientError: msg = ' Failed to initialize the Image client' raise ClientError(msg)
def check_credentials(token, auth_url=auth_url): """Identity,Account/Astakos. Test authentication credentials""" logging.log(REPORT, ' Test the credentials') try: auth = AstakosClient(auth_url, token) auth.authenticate() except ClientError: msg = ' Authentication failed with url %s and token %s'\ % (auth_url, token) raise ClientError(msg, error_authentication) return auth
def get_user_name(token): """Check kamaki and return user name / email from matching ~okeanos token""" cached = CachedAstakosClient(auth_url, token) uuid = get_user_id(token) try: logging.info(' Get the user_name') user_name = cached.uuids2usernames((uuid,), token).get(uuid,'') return user_name except ClientError: msg = 'Failed to get user_name from identity server' raise ClientError(msg)
def init_cyclades(endpoint, token): """ Compute / Initialize Cyclades client.CycladesClient is used to create virtual machines """ logging.log(REPORT, ' Initialize a cyclades client') try: return CycladesClient(endpoint, token) except ClientError: msg = ' Failed to initialize cyclades client' raise ClientError(msg)
def __init__(self, opts): """Initialization of YarnCluster data attributes""" self.opts = opts # Master VM ip, placeholder value self.HOSTNAME_MASTER_IP = '127.0.0.1' # master VM root password file, placeholder value self.pass_file = 'PLACEHOLDER' self.orka_image_uuid = False # List of cluster VMs self.server_dict = {} if self.opts['disk_template'] == 'Archipelago': self.opts['disk_template'] = 'ext_vlmc' elif self.opts['disk_template'] == 'Standard': self.opts['disk_template'] = 'drbd' # project id of project name given as argument self.project_id = get_project_id(self.opts['token'], self.opts['project_name']) self.status = {} # Instance of an AstakosClient object self.auth = check_credentials(self.opts['token'], self.opts.get('auth_url', auth_url)) # Check if project has actual quota if self.check_project_quota() != 0: msg = 'Project %s exists but you have no quota to request' % \ self.opts['project_name'] raise ClientError(msg, error_project_quota) # ~okeanos endpoints and user id self.endpoints, self.user_id = endpoints_and_user_id(self.auth) # Instance of CycladesClient self.cyclades = init_cyclades(self.endpoints['cyclades'], self.opts['token']) # Instance of CycladesNetworkClient self.net_client = init_cyclades_netclient(self.endpoints['network'], self.opts['token']) # Instance of Plankton/ImageClient self.plankton = init_plankton(self.endpoints['plankton'], self.opts['token']) # Get resources of pending clusters self.pending_quota = retrieve_pending_clusters( self.opts['token'], self.opts['project_name']) self._DispatchCheckers = {} self._DispatchCheckers[len(self._DispatchCheckers) + 1] =\ self.check_cluster_size_quotas self._DispatchCheckers[len(self._DispatchCheckers) + 1] =\ self.check_network_quotas self._DispatchCheckers[len(self._DispatchCheckers) + 1] =\ self.check_ip_quotas self._DispatchCheckers[len(self._DispatchCheckers) + 1] =\ self.check_cpu_valid self._DispatchCheckers[len(self._DispatchCheckers) + 1] =\ self.check_ram_valid self._DispatchCheckers[len(self._DispatchCheckers) + 1] =\ self.check_disk_valid
def get_account_info(self): """ :returns: (dict) """ self._assert_account() path = path4url(self.account) r = self.head(path, success=(204, 401)) if r.status_code == 401: raise ClientError("No authorization", status=401) reply = r.headers return reply
def create_container(self, container): """ :param container: (str) :raises ClientError: 202 Container already exists """ self._assert_account() path = path4url(self.account, container) r = self.put(path, success=(201, 202)) if r.status_code == 202: raise ClientError("Container already exists", r.status_code)
def delete_object(self, obj): """ :param obj: (str) :raises ClientError: 404 Object not found """ self._assert_container() path = path4url(self.account, self.container, obj) r = self.delete(path, success=(204, 404)) if r.status_code == 404: raise ClientError("Object %s not found" % obj, r.status_code)
def get_user_clusters(token, server_url, choice='clusters'): """ Get by default the clusters of the user. If choice argument is different e.g vreservers, returns info of user's VRE servers. """ try: escience_token = authenticate_escience(token, server_url) except TypeError: msg = ' Authentication error: Invalid Token' raise ClientError(msg, error_authentication) except Exception, e: print ' ' + str(e.args[0])
def get_image_details(self, image_id, **kwargs): """ :returns: dict :raises ClientError: 404 if image not available """ r = self.images_get(image_id, **kwargs) try: return r.json['image'] except KeyError: raise ClientError('Image not available', 404, details=[ 'Image %d not found or not accessible'])
def del_account_meta(self, metakey): """ :param metakey: (str) metadatum key """ headers = self.get_account_info() self.headers = filter_out(headers, 'X-Account-Meta-' + metakey, exactMatch=True) if len(self.headers) == len(headers): raise ClientError('X-Account-Meta-%s not found' % metakey, 404) path = path4url(self.account) self.post(path, success=202)
def init_cyclades_netclient(endpoint, token): """ Initialize CycladesNetworkClient Cyclades Network client needed for all network functions e.g. create network,create floating IP """ logging.log(REPORT, ' Initialize a cyclades network client') try: return CycladesNetworkClient(endpoint, token) except ClientError: msg = ' Failed to initialize cyclades network client' raise ClientError(msg)
def test_create_server(self): with patch.object(compute.ComputeClient, 'servers_post', side_effect=ClientError('REQUEST ENTITY TOO LARGE', status=403)): self.assertRaises(ClientError, self.client.create_server, vm_name, fid, img_ref) for params in product(('security_group', None), ('user_data', None), ('availability_zone', None), (None, { 'os': 'debian', 'users': 'root' })): kwargs = dict() for i, k in enumerate( ('security_group', 'user_data', 'availability_zone')): if params[i]: kwargs[k] = params[i] with patch.object(compute.ComputeClient, 'servers_post', return_value=FR()) as post: r = self.client.create_server(vm_name, fid, img_ref, **kwargs) self.assertEqual(r, FR.json['server']) exp_json = dict( server=dict(flavorRef=fid, name=vm_name, imageRef=img_ref)) for k in set( ['security_group', 'user_data', 'availability_zone']).difference(kwargs): kwargs[k] = None self.assertEqual(post.mock_calls[-1], call(json_data=exp_json, **kwargs)) prsn = 'Personality string (does not work with real servers)' self.client.create_server(vm_name, fid, img_ref, personality=prsn, **kwargs) exp_json['server']['personality'] = prsn self.assertEqual(post.mock_calls[-1], call(json_data=exp_json, **kwargs)) kwargs.pop('personality', None) exp_json['server'].pop('personality', None) mtdt = 'Metadata dict here' self.client.create_server(vm_name, fid, img_ref, metadata=mtdt, **kwargs) exp_json['server']['metadata'] = mtdt self.assertEqual(post.mock_calls[-1], call(json_data=exp_json, **kwargs))
def check_user_resources(self): """ Checks user resources before the starting cluster creation. Also, returns the flavor id of master and slave VMs and the id of the image chosen by the user. """ flavor_master = self.get_flavor_id('master') flavor_slaves = self.get_flavor_id('slaves') if flavor_master == 0 or flavor_slaves == 0: msg = 'Combination of cpu, ram, disk and disk_template do' \ ' not match an existing id' raise ClientError(msg, error_flavor_id) retval = self.check_all_resources() # check image metadata in database and pithos and set orka_image_uuid accordingly self.orka_image_uuid = OrkaImage.objects.get( image_name=self.opts['os_choice']).image_pithos_uuid list_current_images = self.plankton.list_public(True, 'default') for image in list_current_images: if self.orka_image_uuid == image['id']: return flavor_master, flavor_slaves, image['id'] msg = 'Image {0} exists on database but cannot be found or has different id' ' on Pithos+'.format(self.opts['os_choice']) raise ClientError(msg, error_flavor_id)
def list_objects(self, limit=None, marker=None, prefix=None, format=None, delimiter=None, path=None): """ :param limit: (integer) The amount of results requested :param marker: (string) Return containers with name lexicographically after marker :param prefix: (string) Return objects starting with prefix :param format: (string) reply format can be json or xml (default:json) :param delimiter: (string) Return objects up to the delimiter :param path: (string) assume prefix = path and delimiter = / (overwrites prefix and delimiter) :returns: (dict) :raises ClientError: 404 Invalid account """ self._assert_container() restpath = path4url(self.account, self.container) self.set_param('format', format or 'json') self.set_param('limit', limit, iff=limit) self.set_param('marker', marker, iff=marker) if path: self.set_param('path', path) else: self.set_param('prefix', prefix, iff=prefix) self.set_param('delimiter', delimiter, iff=delimiter) r = self.get( restpath, success=(200, 204, 304, 404), ) if r.status_code == 404: raise ClientError( "Invalid account (%s) for that container" % self.account, r.status_code) elif r.status_code == 304: return [] return r.json
def check_cluster_size_quotas(self): """ Checks if the user quota is enough to create the requested number of VMs. """ dict_quotas = get_user_quota(self.auth) pending_vm = self.pending_quota['VMs'] limit_vm = dict_quotas[self.project_id]['cyclades.vm']['limit'] usage_vm = dict_quotas[self.project_id]['cyclades.vm']['usage'] available_vm = limit_vm - usage_vm - pending_vm if available_vm < self.opts['cluster_size']: msg = 'Cyclades VMs out of limit' raise ClientError(msg, error_quotas_cluster_size) else: return 0
def get_container_info(self, container): """ :param container: (str) :returns: (dict) :raises ClientError: 404 Container does not exist """ self._assert_account() path = path4url(self.account, container) r = self.head(path, success=(204, 404)) if r.status_code == 404: raise ClientError("Container does not exist", r.status_code) reply = r.headers return reply
def get_image_id(self): """ Return id of given image """ chosen_image = {} list_current_images = self.plankton.list_public(True, 'default') # Check availability of resources # Find image id of the operating system arg given for lst in list_current_images: if lst['name'] == self.opts['os_choice']: chosen_image = lst return chosen_image['id'] if not chosen_image: msg = self.opts['os_choice'] + ' is not a valid image' raise ClientError(msg, error_image_id)