def bind_okeanos_resources(self): ''' Binds all available private networks in ~okeanos for the user running the test. ''' auth = check_credentials(self.token) endpoints, user_id = endpoints_and_user_id(auth) net_client = init_cyclades_netclient(endpoints['network'], self.token) dict_quotas = auth.get_quotas() project_id = get_project_id() limit_net = dict_quotas[project_id]['cyclades.network.private']['limit'] usage_net = dict_quotas[project_id]['cyclades.network.private']['usage'] pending_net = dict_quotas[project_id]['cyclades.network.private']['pending'] available_networks = limit_net - usage_net - pending_net network_ids = [] if available_networks >= 1: logging.info(' Private Network quota is ok') try: for i in range(available_networks): new_network = net_client.create_network('MAC_FILTERED', 'mycluster ' + str(i), project_id=project_id) network_ids.append(new_network['id']) return net_client, network_ids except Exception: logging.exception('Error in creating network') sys.exit(error_create_network) else: logging.error('Private Network quota exceeded') return error_quotas_network
def bind_okeanos_resources(self): ''' Create every available public ip for the user running the test, call bind_floating_ip to attach the ips and return whatever is needed for the selenium test. ''' auth = check_credentials(self.token) endpoints, user_id = endpoints_and_user_id(auth) net_client = init_cyclades_netclient(endpoints['network'], self.token) dict_quotas = auth.get_quotas() # Find and create available public ips project_id = get_project_id() limit_ips = dict_quotas[project_id]['cyclades.floating_ip']['limit'] usage_ips = dict_quotas[project_id]['cyclades.floating_ip']['usage'] pending_ips = dict_quotas[project_id]['cyclades.floating_ip']['pending'] available_ips = limit_ips - (usage_ips + pending_ips) if available_ips > 0: for i in range(available_ips): # Create all available public ips status = self.get_flo_net_id(net_client, project_id) if status != 0: logging.error('Error in creating float ip') sys.exit(error_get_ip) # Call bind_floating_ip to attach every unused ip float_ids, port_ids = self.bind_floating_ip(net_client) return float_ids, port_ids, net_client
def __init__(self, opts): """Initialization of YarnCluster data attributes""" self.opts = opts # Master VM ip, placeholder value self.HOSTNAME_MASTER_IP = '127.0.0.1' # master VM root password file, placeholder value self.pass_file = 'PLACEHOLDER' self.orka_image_uuid = False # List of cluster VMs self.server_dict = {} if self.opts['disk_template'] == 'Archipelago': self.opts['disk_template'] = 'ext_vlmc' elif self.opts['disk_template'] == 'Standard': self.opts['disk_template'] = 'drbd' # project id of project name given as argument self.project_id = get_project_id(unmask_token(encrypt_key, self.opts['token']), self.opts['project_name']) self.status = {} # Instance of an AstakosClient object self.auth = check_credentials(unmask_token(encrypt_key, self.opts['token']), self.opts.get('auth_url', auth_url)) # Check if project has actual quota if self.check_project_quota() != 0: msg = 'Project %s exists but you have no quota to request' % \ self.opts['project_name'] raise ClientError(msg, error_project_quota) # ~okeanos endpoints and user id self.endpoints, self.user_id = endpoints_and_user_id(self.auth) # Instance of CycladesClient self.cyclades = init_cyclades(self.endpoints['cyclades'], unmask_token(encrypt_key, self.opts['token'])) # Instance of CycladesNetworkClient self.net_client = init_cyclades_netclient(self.endpoints['network'], unmask_token(encrypt_key, self.opts['token'])) # Instance of Plankton/ImageClient self.plankton = init_plankton(self.endpoints['plankton'], unmask_token(encrypt_key, self.opts['token'])) # Get resources of pending clusters self.pending_quota = retrieve_pending_clusters(unmask_token(encrypt_key, self.opts['token']), self.opts['project_name']) self._DispatchCheckers = {} self._DispatchCheckers[len(self._DispatchCheckers) + 1] =\ self.check_cluster_size_quotas # Check for private network availability only when cluster is created # and not for Vre server creation if self.opts['cluster_size'] > 1: self._DispatchCheckers[len(self._DispatchCheckers) + 1] =\ self.check_network_quotas self._DispatchCheckers[len(self._DispatchCheckers) + 1] =\ self.check_ip_quotas self._DispatchCheckers[len(self._DispatchCheckers) + 1] =\ self.check_cpu_valid self._DispatchCheckers[len(self._DispatchCheckers) + 1] =\ self.check_ram_valid self._DispatchCheckers[len(self._DispatchCheckers) + 1] =\ self.check_disk_valid
def __init__(self, opts): """Initialization of YarnCluster data attributes""" self.opts = opts # Master VM ip, placeholder value self.HOSTNAME_MASTER_IP = '127.0.0.1' # master VM root password file, placeholder value self.pass_file = 'PLACEHOLDER' self.orka_image_uuid = False # List of cluster VMs self.server_dict = {} if self.opts['disk_template'] == 'Archipelago': self.opts['disk_template'] = 'ext_vlmc' elif self.opts['disk_template'] == 'Standard': self.opts['disk_template'] = 'drbd' # project id of project name given as argument self.project_id = get_project_id(self.opts['token'], self.opts['project_name']) self.status = {} # Instance of an AstakosClient object self.auth = check_credentials(self.opts['token'], self.opts.get('auth_url', auth_url)) # Check if project has actual quota if self.check_project_quota() != 0: msg = 'Project %s exists but you have no quota to request' % \ self.opts['project_name'] raise ClientError(msg, error_project_quota) # ~okeanos endpoints and user id self.endpoints, self.user_id = endpoints_and_user_id(self.auth) # Instance of CycladesClient self.cyclades = init_cyclades(self.endpoints['cyclades'], self.opts['token']) # Instance of CycladesNetworkClient self.net_client = init_cyclades_netclient(self.endpoints['network'], self.opts['token']) # Instance of Plankton/ImageClient self.plankton = init_plankton(self.endpoints['plankton'], self.opts['token']) # Get resources of pending clusters self.pending_quota = retrieve_pending_clusters( self.opts['token'], self.opts['project_name']) self._DispatchCheckers = {} self._DispatchCheckers[len(self._DispatchCheckers) + 1] =\ self.check_cluster_size_quotas self._DispatchCheckers[len(self._DispatchCheckers) + 1] =\ self.check_network_quotas self._DispatchCheckers[len(self._DispatchCheckers) + 1] =\ self.check_ip_quotas self._DispatchCheckers[len(self._DispatchCheckers) + 1] =\ self.check_cpu_valid self._DispatchCheckers[len(self._DispatchCheckers) + 1] =\ self.check_ram_valid self._DispatchCheckers[len(self._DispatchCheckers) + 1] =\ self.check_disk_valid
def __init__(self, opts): if not opts or len(opts) == 0: self.opts = _defaults.copy() else: self.opts = opts self.HOSTNAME_MASTER_IP = '127.0.0.1' self.server_dict = {} self.project_id = get_project_id() self.status = {} self.uuid = get_project_id() self.auth = check_credentials(self.opts.get('token', _defaults['token']), self.opts.get('auth_url', _defaults['auth_url'])) self._DispatchCheckers = {} self._DispatchCheckers[len(self._DispatchCheckers) + 1] = self.check_clustersize_quotas self._DispatchCheckers[len(self._DispatchCheckers) + 1] = self.check_network_quotas self._DispatchCheckers[len(self._DispatchCheckers) + 1] = self.check_ip_quotas self._DispatchCheckers[len(self._DispatchCheckers) + 1] = self.check_cpu_valid self._DispatchCheckers[len(self._DispatchCheckers) + 1] = self.check_ram_valid self._DispatchCheckers[len(self._DispatchCheckers) + 1] = self.check_disk_valid
def __init__(self, opts): """Initialization of YarnCluster data attributes""" self.opts = opts # Master VM ip, placeholder value self.HOSTNAME_MASTER_IP = '127.0.0.1' # master VM root password file, placeholder value self.pass_file = 'PLACEHOLDER' self.hadoop_image = False # List of cluster VMs self.server_dict = {} if self.opts['disk_template'] == 'Archipelago': self.opts['disk_template'] = 'ext_vlmc' elif self.opts['disk_template'] == 'Standard': self.opts['disk_template'] = 'drbd' # project id of project name given as argument self.project_id = get_project_id(self.opts['token'], self.opts['project_name']) self.status = {} # Instance of an AstakosClient object self.auth = check_credentials(self.opts['token'], self.opts.get('auth_url', auth_url)) # Check if project has actual quota if self.check_project_quota() != 0: msg = 'Project %s exists but you have no quota to request' % \ self.opts['project_name'] raise ClientError(msg, error_project_quota) # ~okeanos endpoints and user id self.endpoints, self.user_id = endpoints_and_user_id(self.auth) # Instance of CycladesClient self.cyclades = init_cyclades(self.endpoints['cyclades'], self.opts['token']) # Instance of CycladesNetworkClient self.net_client = init_cyclades_netclient(self.endpoints['network'], self.opts['token']) # Instance of Plankton/ImageClient self.plankton = init_plankton(self.endpoints['plankton'], self.opts['token']) # Get resources of pending clusters self.pending_quota = retrieve_pending_clusters(self.opts['token'], self.opts['project_name']) # check escienceconf flag and set hadoop_image accordingly list_current_images = self.plankton.list_public(True, 'default') for image in list_current_images: if self.opts['os_choice'] == image['name']: try: if image['properties']['escienceconf']: image_metadata = json.loads(image['properties']['escienceconf']) if image_metadata['hadoop'] == 'True' and image_metadata['hue'] == 'True': self.hadoop_image = 'hue' elif image_metadata['hadoop'] == 'False': self.hadoop_image = 'debianbase' else: self.hadoop_image = 'hadoopbase' except: # if property hasn't been set then hadoop_image flag is false self.hadoop_image = 'debianbase' self._DispatchCheckers = {} self._DispatchCheckers[len(self._DispatchCheckers) + 1] =\ self.check_cluster_size_quotas self._DispatchCheckers[len(self._DispatchCheckers) + 1] =\ self.check_network_quotas self._DispatchCheckers[len(self._DispatchCheckers) + 1] =\ self.check_ip_quotas self._DispatchCheckers[len(self._DispatchCheckers) + 1] =\ self.check_cpu_valid self._DispatchCheckers[len(self._DispatchCheckers) + 1] =\ self.check_ram_valid self._DispatchCheckers[len(self._DispatchCheckers) + 1] =\ self.check_disk_valid