def bind_okeanos_resources(self):
        '''
        Create every available public ip for the user running the test, call
        bind_floating_ip to attach the ips and return whatever is needed
        for the selenium test.
        '''
        auth = check_credentials(self.token)
        endpoints, user_id = endpoints_and_user_id(auth)
        net_client = init_cyclades_netclient(endpoints['network'], self.token)

        dict_quotas = auth.get_quotas()
        # Find and create available public ips
        limit_ips = dict_quotas[
            self.project_id]['cyclades.floating_ip']['limit']
        usage_ips = dict_quotas[
            self.project_id]['cyclades.floating_ip']['usage']
        pending_ips = dict_quotas[
            self.project_id]['cyclades.floating_ip']['pending']
        available_ips = limit_ips - (usage_ips + pending_ips)

        if available_ips > 0:
            for i in range(available_ips):
                # Create all available public ips
                status = self.get_flo_net_id(net_client)
                if status != 0:
                    logging.error('Error in creating float ip')
                    sys.exit(error_get_ip)
        # Call bind_floating_ip to attach every unused ip
        float_ids, port_ids = self.bind_floating_ip(net_client)
        return float_ids, port_ids, net_client
    def bind_okeanos_resources(self):
        '''
        Binds all available private networks in ~okeanos
        for the user running the test.
        '''
        auth = check_credentials(self.token)
        endpoints, user_id = endpoints_and_user_id(auth)
        net_client = init_cyclades_netclient(endpoints['network'], self.token)
        dict_quotas = auth.get_quotas()
	project_id = get_project_id()
        limit_net = dict_quotas[project_id]['cyclades.network.private']['limit']
        usage_net = dict_quotas[project_id]['cyclades.network.private']['usage']
        pending_net = dict_quotas[project_id]['cyclades.network.private']['pending']
        available_networks = limit_net - usage_net - pending_net
        network_ids = []
        if available_networks >= 1:
            logging.info(' Private Network quota is ok')
            try:
                for i in range(available_networks):
                    new_network = net_client.create_network('MAC_FILTERED',
                                                            'mycluster '
                                                            + str(i),
                                                            project_id=project_id)
                    network_ids.append(new_network['id'])
                return net_client, network_ids
            except Exception:
                logging.exception('Error in creating network')
                sys.exit(error_create_network)
        else:
            logging.error('Private Network quota exceeded')
            return error_quotas_network
Beispiel #3
0
    def bind_okeanos_resources(self):
        '''
        Create every available public ip for the user running the test, call
        bind_floating_ip to attach the ips and return whatever is needed
        for the selenium test.
        '''
        auth = check_credentials(self.token)
        endpoints, user_id = endpoints_and_user_id(auth)
        net_client = init_cyclades_netclient(endpoints['network'],
                                             self.token)

        dict_quotas = auth.get_quotas()
        # Find and create available public ips
        limit_ips = dict_quotas[self.project_id]['cyclades.floating_ip']['limit']
        usage_ips = dict_quotas[self.project_id]['cyclades.floating_ip']['usage']
        pending_ips = dict_quotas[self.project_id]['cyclades.floating_ip']['pending']
        available_ips = limit_ips - (usage_ips + pending_ips)

        if available_ips > 0:
            for i in range(available_ips):
                # Create all available public ips
                status = self.get_flo_net_id(net_client)
                if status != 0:
                    logging.error('Error in creating float ip')
                    sys.exit(error_get_ip)
        # Call bind_floating_ip to attach every unused ip
        float_ids, port_ids = self.bind_floating_ip(net_client)
        return float_ids, port_ids, net_client
Beispiel #4
0
def project_list_flavor_quota(user):
    """Creates the list of resources for every project a user has quota"""
    okeanos_token = user.okeanos_token
    list_of_resources = list()
    flavors = get_flavor_id(okeanos_token)
    auth = check_credentials(okeanos_token)
    ssh_info = ssh_key_list(okeanos_token)
    ssh_keys_names = list()
    dict_quotas = auth.get_quotas()
    try:
        list_of_projects = auth.get_projects(state='active')
    except ClientError:
        msg = ' Could not get list of projects'
        raise ClientError(msg, error_get_list_projects)
    # Id for ember-data, will use it for store.push the different projects
    ember_project_id = 1
    ssh_info = ssh_key_list(okeanos_token)
    for item in ssh_info:
        if item.has_key('name'):
            ssh_keys_names.append(item['name'])
    for project in list_of_projects:
        if project['name'] == 'system:' + str(project['id']):
            list_of_projects.remove(project)
            list_of_projects.insert(0, project)
    for project in list_of_projects:
        if project['id'] in dict_quotas:
            quotas = check_quota(okeanos_token, project['id'])
            images = check_images(okeanos_token, project['id'])
            list_of_resources.append(
                retrieve_ClusterCreationParams(flavors, quotas, images,
                                               project['name'], user,
                                               ember_project_id,
                                               ssh_keys_names))
            ember_project_id = ember_project_id + 1
    return list_of_resources
Beispiel #5
0
 def bind_okeanos_resources(self):
     '''
     Binds all available private networks in ~okeanos
     for the user running the test.
     '''
     auth = check_credentials(self.token)
     endpoints, user_id = endpoints_and_user_id(auth)
     net_client = init_cyclades_netclient(endpoints['network'], self.token)
     dict_quotas = auth.get_quotas()
     limit_net = dict_quotas[self.project_id]['cyclades.network.private']['limit']
     usage_net = dict_quotas[self.project_id]['cyclades.network.private']['usage']
     pending_net = dict_quotas[self.project_id]['cyclades.network.private']['pending']
     available_networks = limit_net - usage_net - pending_net
     network_ids = []
     if available_networks >= 1:
         logging.info(' Private Network quota is ok')
         try:
             for i in range(available_networks):
                 new_network = net_client.create_network('MAC_FILTERED',
                                                         'mycluster '
                                                         + str(i),
                                                         project_id=self.project_id)
                 network_ids.append(new_network['id'])
             return net_client, network_ids
         except Exception:
             logging.exception('Error in creating network')
             sys.exit(error_create_network)
     else:
         logging.error('Private Network quota exceeded')
         return error_quotas_network
Beispiel #6
0
    def create_bare_cluster(self):
        """
        This function of our script takes the arguments given and calls the
        check_quota function. Also, calls get_flavor_id to find the matching
        flavor_ids from the arguments given and finds the image id of the
        image given as argument. Then instantiates the Cluster and creates
        the virtual machine cluster of one master and clustersize-1 slaves.
        Calls the function to install hadoop to the cluster.
        """
        logging.log(REPORT, ' 1.Credentials  and  Endpoints')
        # Finds user public ssh key
        USER_HOME = expanduser('~')
        pub_keys_path = join(USER_HOME, ".ssh/id_rsa.pub")
        auth = check_credentials(self.opts['token'], self.opts['auth_url'])
        endpoints, user_id = endpoints_and_user_id(auth)
        cyclades = init_cyclades(endpoints['cyclades'], self.opts['token'])
        flavor_master = self.get_flavor_id_master(cyclades)
        flavor_slaves = self.get_flavor_id_slave(cyclades)
        if flavor_master == 0 or flavor_slaves == 0:
            logging.error('Combination of cpu, ram, disk and disk_template do'
                          ' not match an existing id')

            exit(error_flavor_id)
        # Total cpu,ram and disk needed for cluster
        cpu = self.opts['cpu_master'] + (self.opts['cpu_slave']) * (self.opts['clustersize'] - 1)
        ram = self.opts['ram_master'] + (self.opts['ram_slave']) * (self.opts['clustersize'] - 1)
        cyclades_disk = self.opts['disk_master'] + (self.opts['disk_slave']) * (self.opts['clustersize'] - 1)
        # The resources requested by user in a dictionary
        req_quotas = {'cpu': cpu, 'ram': ram, 'cyclades_disk': cyclades_disk,
                      'vms': self.opts['clustersize']}
        self.check_quota(auth, req_quotas)
        plankton = init_plankton(endpoints['plankton'], self.opts['token'])
        list_current_images = plankton.list_public(True, 'default')
        # Find image id of the arg given
        for lst in list_current_images:
            if lst['name'] == self.opts['image']:
                chosen_image = lst
                break
        else:
            logging.error(self.opts['image'] + ' is not a valid image option')
            exit(error_image_id)

        logging.log(REPORT, ' 2.Create  virtual  cluster')
        cluster = Cluster(cyclades,
                          prefix=self.opts['name'],
                          flavor_id_master=flavor_master,
                          flavor_id_slave=flavor_slaves,
                          image_id=chosen_image['id'],
                          size=self.opts['clustersize'],
                          net_client=init_cyclades_netclient(endpoints['network'],
                                                             self.opts['token']),
                          auth_cl=auth)

        self.HOSTNAME_MASTER_IP, self.server_dict = cluster.create('', pub_keys_path, '')
        sleep(15)
        # wait for the machines to be pingable
        logging.log(REPORT, ' Bare cluster has been created.')
        # Return master node ip and server dict
        return self.HOSTNAME_MASTER_IP, self.server_dict
    def __init__(self, opts):
        """Initialization of YarnCluster data attributes"""
        self.opts = opts
        # Master VM ip, placeholder value
        self.HOSTNAME_MASTER_IP = '127.0.0.1'
        # master VM root password file, placeholder value
        self.pass_file = 'PLACEHOLDER'
        self.orka_image_uuid = False
        # List of cluster VMs
        self.server_dict = {}
        if self.opts['disk_template'] == 'Archipelago':
            self.opts['disk_template'] = 'ext_vlmc'
        elif self.opts['disk_template'] == 'Standard':
            self.opts['disk_template'] = 'drbd'
        # project id of project name given as argument
        self.project_id = get_project_id(unmask_token(encrypt_key, self.opts['token']),
                                         self.opts['project_name'])
        self.status = {}
        # Instance of an AstakosClient object
        self.auth = check_credentials(unmask_token(encrypt_key, self.opts['token']),
                                      self.opts.get('auth_url',
                                                    auth_url))
        # Check if project has actual quota
        if self.check_project_quota() != 0:
            msg = 'Project %s exists but you have no quota to request' % \
                self.opts['project_name']
            raise ClientError(msg, error_project_quota)
        # ~okeanos endpoints and user id
        self.endpoints, self.user_id = endpoints_and_user_id(self.auth)

        # Instance of CycladesClient
        self.cyclades = init_cyclades(self.endpoints['cyclades'],
                                      unmask_token(encrypt_key, self.opts['token']))
        # Instance of CycladesNetworkClient
        self.net_client = init_cyclades_netclient(self.endpoints['network'],
                                                  unmask_token(encrypt_key, self.opts['token']))
        # Instance of Plankton/ImageClient
        self.plankton = init_plankton(self.endpoints['plankton'],
                                      unmask_token(encrypt_key, self.opts['token']))
        # Get resources of pending clusters
        self.pending_quota = retrieve_pending_clusters(unmask_token(encrypt_key, self.opts['token']),
                                                       self.opts['project_name'])
        self._DispatchCheckers = {}
        self._DispatchCheckers[len(self._DispatchCheckers) + 1] =\
            self.check_cluster_size_quotas
        # Check for private network availability only when cluster is created
        # and not for Vre server creation
        if self.opts['cluster_size'] > 1:
            self._DispatchCheckers[len(self._DispatchCheckers) + 1] =\
                self.check_network_quotas
        self._DispatchCheckers[len(self._DispatchCheckers) + 1] =\
            self.check_ip_quotas
        self._DispatchCheckers[len(self._DispatchCheckers) + 1] =\
            self.check_cpu_valid
        self._DispatchCheckers[len(self._DispatchCheckers) + 1] =\
            self.check_ram_valid
        self._DispatchCheckers[len(self._DispatchCheckers) + 1] =\
            self.check_disk_valid
Beispiel #8
0
    def __init__(self, opts):
        """Initialization of YarnCluster data attributes"""
        self.opts = opts
        # Master VM ip, placeholder value
        self.HOSTNAME_MASTER_IP = '127.0.0.1'
        # master VM root password file, placeholder value
        self.pass_file = 'PLACEHOLDER'
        self.orka_image_uuid = False
        # List of cluster VMs
        self.server_dict = {}
        if self.opts['disk_template'] == 'Archipelago':
            self.opts['disk_template'] = 'ext_vlmc'
        elif self.opts['disk_template'] == 'Standard':
            self.opts['disk_template'] = 'drbd'
        # project id of project name given as argument
        self.project_id = get_project_id(self.opts['token'],
                                         self.opts['project_name'])
        self.status = {}
        # Instance of an AstakosClient object
        self.auth = check_credentials(self.opts['token'],
                                      self.opts.get('auth_url', auth_url))
        # Check if project has actual quota
        if self.check_project_quota() != 0:
            msg = 'Project %s exists but you have no quota to request' % \
                self.opts['project_name']
            raise ClientError(msg, error_project_quota)
        # ~okeanos endpoints and user id
        self.endpoints, self.user_id = endpoints_and_user_id(self.auth)

        # Instance of CycladesClient
        self.cyclades = init_cyclades(self.endpoints['cyclades'],
                                      self.opts['token'])
        # Instance of CycladesNetworkClient
        self.net_client = init_cyclades_netclient(self.endpoints['network'],
                                                  self.opts['token'])
        # Instance of Plankton/ImageClient
        self.plankton = init_plankton(self.endpoints['plankton'],
                                      self.opts['token'])
        # Get resources of pending clusters
        self.pending_quota = retrieve_pending_clusters(
            self.opts['token'], self.opts['project_name'])
        self._DispatchCheckers = {}
        self._DispatchCheckers[len(self._DispatchCheckers) + 1] =\
            self.check_cluster_size_quotas
        self._DispatchCheckers[len(self._DispatchCheckers) + 1] =\
            self.check_network_quotas
        self._DispatchCheckers[len(self._DispatchCheckers) + 1] =\
            self.check_ip_quotas
        self._DispatchCheckers[len(self._DispatchCheckers) + 1] =\
            self.check_cpu_valid
        self._DispatchCheckers[len(self._DispatchCheckers) + 1] =\
            self.check_ram_valid
        self._DispatchCheckers[len(self._DispatchCheckers) + 1] =\
            self.check_disk_valid
Beispiel #9
0
 def __init__(self, opts):
     if not opts or len(opts) == 0:
         self.opts = _defaults.copy()
     else:
         self.opts = opts
     self.HOSTNAME_MASTER_IP = '127.0.0.1'
     self.server_dict = {}
     self.status = {}
     self.auth = check_credentials(self.opts['token'], self.opts['auth_url'])
     self._DispatchCheckers = {}
     self._DispatchCheckers[len(self._DispatchCheckers) + 1] = self.check_clustersize_quotas
     self._DispatchCheckers[len(self._DispatchCheckers) + 1] = self.check_network_quotas
     self._DispatchCheckers[len(self._DispatchCheckers) + 1] = self.check_ip_quotas
     self._DispatchCheckers[len(self._DispatchCheckers) + 1] = self.check_cpu_valid
     self._DispatchCheckers[len(self._DispatchCheckers) + 1] = self.check_ram_valid
     self._DispatchCheckers[len(self._DispatchCheckers) + 1] = self.check_disk_valid
def project_list_flavor_quota(user):
    """Creates the list of resources for every project a user has quota"""
    okeanos_token = user.okeanos_token
    list_of_resources = list()
    flavors = get_flavor_id(okeanos_token)
    auth = check_credentials(okeanos_token)
    ssh_info = ssh_key_list(okeanos_token)
    ssh_keys_names =list()
    dict_quotas = auth.get_quotas()
    try:
        list_of_projects = auth.get_projects(state='active')
    except ClientError:
        msg = ' Could not get list of projects'
        raise ClientError(msg, error_get_list_projects)
    # Id for ember-data, will use it for store.push the different projects
    ember_project_id = 1
    ssh_info = ssh_key_list(okeanos_token)
    for item in ssh_info:
        if item.has_key('name'):
            ssh_keys_names.append(item['name'])
    for project in list_of_projects:
        if project['name'] == 'system:'+str(project['id']):
            list_of_projects.remove(project)
            list_of_projects.insert(0,project)
    for project in list_of_projects:   
        if project['id'] in dict_quotas:
            quotas = check_quota(okeanos_token, project['id'])
            images = check_images(okeanos_token, project['id'])
            list_of_resources.append(retrieve_ClusterCreationParams(flavors,
                                                                    quotas,
                                                                    images,
                                                                    project['name'],
                                                                    user,
                                                                    ember_project_id,
                                                                    ssh_keys_names))
            ember_project_id = ember_project_id + 1
    return list_of_resources
    def __init__(self, opts):
        """Initialization of YarnCluster data attributes"""
        self.opts = opts
        # Master VM ip, placeholder value
        self.HOSTNAME_MASTER_IP = '127.0.0.1'
        # master VM root password file, placeholder value
        self.pass_file = 'PLACEHOLDER'
        self.hadoop_image = False
        # List of cluster VMs
        self.server_dict = {}
        if self.opts['disk_template'] == 'Archipelago':
            self.opts['disk_template'] = 'ext_vlmc'
        elif self.opts['disk_template'] == 'Standard':
            self.opts['disk_template'] = 'drbd'
        # project id of project name given as argument
        self.project_id = get_project_id(self.opts['token'],
                                         self.opts['project_name'])
        self.status = {}
        # Instance of an AstakosClient object
        self.auth = check_credentials(self.opts['token'],
                                      self.opts.get('auth_url',
                                                    auth_url))
        # Check if project has actual quota
        if self.check_project_quota() != 0:
            msg = 'Project %s exists but you have no quota to request' % \
                self.opts['project_name']
            raise ClientError(msg, error_project_quota)
        # ~okeanos endpoints and user id
        self.endpoints, self.user_id = endpoints_and_user_id(self.auth)

        # Instance of CycladesClient
        self.cyclades = init_cyclades(self.endpoints['cyclades'],
                                      self.opts['token'])
        # Instance of CycladesNetworkClient
        self.net_client = init_cyclades_netclient(self.endpoints['network'],
                                                  self.opts['token'])
        # Instance of Plankton/ImageClient
        self.plankton = init_plankton(self.endpoints['plankton'],
                                      self.opts['token'])
        # Get resources of pending clusters
        self.pending_quota = retrieve_pending_clusters(self.opts['token'],
                                                       self.opts['project_name'])
        # check escienceconf flag and set hadoop_image accordingly
        list_current_images = self.plankton.list_public(True, 'default')
        for image in list_current_images:
            if self.opts['os_choice'] == image['name']:
                try:
                    if image['properties']['escienceconf']:
                        image_metadata = json.loads(image['properties']['escienceconf'])
                        if image_metadata['hadoop'] == 'True' and image_metadata['hue'] == 'True':
                            self.hadoop_image = 'hue'
                        elif image_metadata['hadoop'] == 'False':
                            self.hadoop_image = 'debianbase'
                        else:
                            self.hadoop_image = 'hadoopbase'

                except:
                    # if property hasn't been set then hadoop_image flag is false
                    self.hadoop_image = 'debianbase'
                        
        self._DispatchCheckers = {}
        self._DispatchCheckers[len(self._DispatchCheckers) + 1] =\
            self.check_cluster_size_quotas
        self._DispatchCheckers[len(self._DispatchCheckers) + 1] =\
            self.check_network_quotas
        self._DispatchCheckers[len(self._DispatchCheckers) + 1] =\
            self.check_ip_quotas
        self._DispatchCheckers[len(self._DispatchCheckers) + 1] =\
            self.check_cpu_valid
        self._DispatchCheckers[len(self._DispatchCheckers) + 1] =\
            self.check_ram_valid
        self._DispatchCheckers[len(self._DispatchCheckers) + 1] =\
            self.check_disk_valid