Example #1
0
    def __start_keystone_session(
            self, retries=3, ca_cert=None, insecure=not VERIFY_SSL):
        exc_type, exc_value, exc_traceback = None, None, None
        for i in xrange(retries):
            try:
                if insecure:
                    self.keystone_session = KeystoneSession(
                        auth=self.__keystone_auth, verify=False)
                elif ca_cert:
                    self.keystone_session = KeystoneSession(
                        auth=self.__keystone_auth, verify=ca_cert)
                else:
                    self.keystone_session = KeystoneSession(
                        auth=self.__keystone_auth)
                self.keystone_session.get_auth_headers()
                return

            except ClientException as exc:
                exc_type, exc_value, exc_traceback = sys.exc_info()
                err = "Try nr {0}. Could not get keystone token, error: {1}"
                logger.warning(err.format(i + 1, exc))
                time.sleep(5)
        if exc_type and exc_traceback and exc_value:
            six.reraise(exc_type, exc_value, exc_traceback)
        raise RuntimeError()
Example #2
0
    def __start_keystone_session(self,
                                 retries=3,
                                 ca_cert=None,
                                 insecure=not VERIFY_SSL):
        exc_type, exc_value, exc_traceback = None, None, None
        for i in xrange(retries):
            try:
                if insecure:
                    self.keystone_session = KeystoneSession(
                        auth=self.__keystone_auth, verify=False)
                elif ca_cert:
                    self.keystone_session = KeystoneSession(
                        auth=self.__keystone_auth, verify=ca_cert)
                else:
                    self.keystone_session = KeystoneSession(
                        auth=self.__keystone_auth)
                self.keystone_session.get_auth_headers()
                return

            except ClientException as exc:
                exc_type, exc_value, exc_traceback = sys.exc_info()
                err = "Try nr {0}. Could not get keystone token, error: {1}"
                logger.warning(err.format(i + 1, exc))
                time.sleep(5)
        if exc_type and exc_traceback and exc_value:
            six.reraise(exc_type, exc_value, exc_traceback)
        raise RuntimeError()
 def __init__(self, auth=None):
     super().__init__()
     self.scrapers = Group()
     self.scrape_pool = Pool(size=Config().scraper.pool_size)
     self.session = Session(auth=auth)
     self.session.mount(
         "http://",
         HTTPAdapter(
             max_retries=Retry(
                 total=Config().scraper.max_retry,
                 connect=Config().scraper.max_retry,
                 read=Config().scraper.max_retry,
                 backoff_factor=0.3,
             ),
             pool_connections=10,
         ),
     )
     metric_types = {}
     for metric_config in Config().metrics:
         if metric_config.uve_type not in metric_types:
             metric_types[metric_config.uve_type] = []
         metric_types[metric_config.uve_type].append(metric_config)
     for uve_type, metric_configs in metric_types.items():
         self.append(
             MetricTypeCollection(
                 self.session,
                 uve_type,
                 metric_configs,
                 self.scrapers,
                 self.scrape_pool,
             ))
Example #4
0
 def create_keystone_session(self):
     try:
         auth = KeystonePassword(**self.__keystoneCredentials)
         session = KeystoneSession(auth=auth, verify=self.__certificatesPath)
         session.get_project_id()
         return session
     except Exception as error:
         raise Exception('Connection to Keystone failed: {}'.format(error))
Example #5
0
def get_nodes(admin_ip):
    keystone_auth = V2Password(auth_url="http://{}:5000/v2.0".format(admin_ip),
                               username=KEYSTONE_CREDS['username'],
                               password=KEYSTONE_CREDS['password'],
                               tenant_name=KEYSTONE_CREDS['tenant_name'])
    keystone_session = KeystoneSession(auth=keystone_auth, verify=False)
    nodes = keystone_session.get('/nodes',
                                 endpoint_filter={'service_type': 'fuel'})
    return nodes.json()
def _token_to_keystone_scoped_project(
        auth_url, token,
        project_name, domain_name="default"):
    """
    Given an auth_url and scoped/unscoped token:
    Create an auth,session and token for a specific project_name and domain_name (Required to access a serviceCatalog for neutron/nova/etc!)
    """
    auth = v3.Token(auth_url=auth_url, token=token, project_name=project_name, project_domain_id=domain_name)
    sess = Session(auth=auth)
    token = sess.get_token()
    return (auth, sess, token)
Example #7
0
def get_heat():
    keystone_auth = V2Password(auth_url=os.environ['OS_AUTH_URL'],
                               username=os.environ['OS_USERNAME'],
                               password=os.environ['OS_PASSWORD'],
                               tenant_name=os.environ['OS_TENANT_NAME'])
    session = KeystoneSession(auth=keystone_auth, verify=False)
    endpoint_url = session.get_endpoint(service_type='orchestration',
                                        endpoint_type='publicURL')
    heat = heatclient.Client(version='1',
                             endpoint=endpoint_url,
                             token=session.get_token())
    return heat
Example #8
0
def get_nodes(admin_ip):
    keystone_auth = V2Password(
        auth_url="http://{}:5000/v2.0".format(admin_ip),
        username=KEYSTONE_CREDS['username'],
        password=KEYSTONE_CREDS['password'],
        tenant_name=KEYSTONE_CREDS['tenant_name'])
    keystone_session = KeystoneSession(auth=keystone_auth, verify=False)
    nodes = keystone_session.get(
        '/nodes',
        endpoint_filter={'service_type': 'fuel'}
    )
    return nodes.json()
Example #9
0
def _connect_to_keystone_auth_v3(auth_url, auth_token, project_name,
                                 domain_name, **kwargs):
    """
    Give a auth_url and auth_token,
    authenticate with keystone version 3 to get a scoped_token,
    Exchange token to receive an auth, session, token scoped to a sepcific project_name and domain_name.
    """
    token_auth = identity.Token(auth_url=auth_url,
                                token=auth_token,
                                project_domain_id=domain_name,
                                project_name=project_name)
    token_sess = Session(auth=token_auth)
    token_token = token_sess.get_token()
    return (token_auth, token_sess, token_token)
Example #10
0
def _token_to_keystone_scoped_project(auth_url,
                                      token,
                                      project_name,
                                      domain_name="default"):
    """
    Given an auth_url and scoped/unscoped token:
    Create an auth,session and token for a specific project_name and domain_name (Required to access a serviceCatalog for neutron/nova/etc!)
    """
    auth = v3.Token(auth_url=auth_url,
                    token=token,
                    project_name=project_name,
                    project_domain_id=domain_name)
    sess = Session(auth=auth)
    token = sess.get_token()
    return (auth, sess, token)
def _connect_to_keystone_password(
        auth_url, username, password,
        project_name, user_domain_name=None, project_domain_name=None, **kwargs):
    """
    Given a username and password,
    authenticate with keystone to get an unscoped token
    Exchange token to receive an auth,session,token scoped to a specific project_name and domain_name.
    """
    password_auth = identity.Password(
        auth_url=auth_url,
        username=username, password=password, project_name=project_name,
        user_domain_name=user_domain_name, project_domain_name=project_domain_name)
    password_sess = Session(auth=password_auth)
    password_token = password_sess.get_token()
    return (password_auth, password_sess, password_token)
def _connect_to_keystone_auth_v3(
        auth_url, auth_token, project_name, domain_name, **kwargs):
    """
    Give a auth_url and auth_token,
    authenticate with keystone version 3 to get a scoped_token,
    Exchange token to receive an auth, session, token scoped to a sepcific project_name and domain_name.
    """
    token_auth = identity.Token(
        auth_url=auth_url,
        token=auth_token,
        project_domain_id=domain_name,
        project_name=project_name)
    token_sess = Session(auth=token_auth)
    token_token = token_sess.get_token()
    return (token_auth, token_sess, token_token)
    def __init__(self, admin_node_ip=None, session=None, **kwargs):
        if session:
            logger.info(
                'Initialization of NailgunClient using shared session \n'
                '(auth_url={})'.format(session.auth.auth_url))
            self.session = session
        else:
            warn(
                'Initialization of NailgunClient by IP is deprecated, '
                'please use keystonesession1.session.Session',
                DeprecationWarning)

            if FORCE_HTTPS_MASTER_NODE:
                url = "https://{0}:8443".format(admin_node_ip)
            else:
                url = "http://{0}:8000".format(admin_node_ip)
            logger.info('Initiate Nailgun client with url %s', url)
            keystone_url = "http://{0}:5000/v2.0".format(admin_node_ip)

            creds = dict(KEYSTONE_CREDS, **kwargs)

            auth = V2Password(
                auth_url=keystone_url,
                username=creds['username'],
                password=creds['password'],
                tenant_name=creds['tenant_name'])
            # TODO: in v3 project_name

            self.session = KeystoneSession(auth=auth, verify=False)
Example #14
0
def session():
    auth_kwargs = dict(auth_url=get("auth_url"))

    token = get("token")

    if token:
        auth_klass = v3.Token
        auth_kwargs.update(token=token)
    else:
        auth_klass = v3.Password
        auth_kwargs.update(
            username=get("username"),
            user_domain_name=get("user_domain_name"),
            password=get("password"),
        )

    project_id = get("project_id")

    if project_id:
        auth_kwargs.update(project_id=project_id)
    else:
        auth_kwargs.update(
            project_name=get("project_name"),
            project_domain_name=get("project_domain_name"),
        )

    auth = auth_klass(**auth_kwargs)
    sess = Session(auth=auth)
    return Adapter(session=sess,
                   interface=get("interface"),
                   region_name=get("region_name"))
Example #15
0
def get_session(auth_url,
                project_id,
                project_domain_id,
                admin_username,
                admin_password,
                admin_domain_id,
                api_version='3',
                identity_type='identity',
                original_ip=None,
                verify=False,
                cache=None,
                timeout=None):

    assert project_id, 'Invalid project received for keystone session: {}'.format(
        project_id)

    auth_plugin = FleioKeystoneAuthPlugin(auth_url=auth_url,
                                          project_id=project_id,
                                          project_domain_id=project_domain_id,
                                          admin_username=admin_username,
                                          admin_password=admin_password,
                                          admin_domain_id=admin_domain_id,
                                          api_version=api_version,
                                          identity_type=identity_type,
                                          cache=cache)

    return Session(auth=auth_plugin,
                   original_ip=original_ip,
                   verify=verify,
                   timeout=timeout)
Example #16
0
def keystone_session(env_overrides: dict = {}) -> Session:
    """Obtain Keystone authentication credentials for given OpenStack RC params.

    Args:
        env_overrides (dict): a dictionary of OpenStack RC parameters. These
            parameters are assumed to be as if they were pulled off of the
            environment, e.g. are like {'OS_USERNAME': '', 'OS_PASSWORD: ''}
            with uppercase and underscores.

    Returns:
        keystoneauth1.session.Session: a KSA session object, which can be used
            to authenticate any OpenStack client.
    """
    # We are abusing the KSA loading mechanism here. The arg parser will default
    # the various OpenStack auth params from the environment, which is what
    # we're after.
    fake_argv = [
        f'--{key.lower().replace("_", "-")}={value}'
        for key, value in env_overrides.items()
        # NOTE(jason): we ignore some environment variables, as they are not
        # supported as KSA command-line args.
        if key not in ['OS_IDENTITY_API_VERSION']
    ]
    parser = argparse.ArgumentParser()
    loading.cli.register_argparse_arguments(
        parser, fake_argv, default='token')
    loading.session.register_argparse_arguments(parser)
    loading.adapter.register_argparse_arguments(parser)
    args = parser.parse_args(fake_argv)
    auth = loading.cli.load_from_argparse_arguments(args)
    return Session(auth=auth)
Example #17
0
def get_resource_utilization(vim: vims.OpenStackVim):
    try:
        nova = Client(
            version="2",
            session=Session(
                auth=get_plugin_loader("password").load_from_options(
                    auth_url="http://{}/identity".format(vim.address),
                    username=vim.username,
                    password=vim.password,
                    user_domain_id="default",
                    project_id=vim.tenant.id,
                ),
                timeout=5,
            ),
        )
        limits = nova.limits.get(tenant_id=vim.tenant.id).to_dict()["absolute"]

        return {
            "cores": {
                "used": limits["totalCoresUsed"],
                "total": limits["maxTotalCores"],
            },
            "memory": {
                "used": limits["totalRAMUsed"],
                "total": limits["maxTotalRAMSize"],
            },
        }

    except Unauthorized:
        raise VimConnectionError(
            "Authorization error. Please check the tenant id, username, and password."
        )
    except Exception as e:
        raise VimConnectionError(str(e))
class MetricCollection(UserList):
    """
    MetricCollection aggregates all metrics from config by uve_type.

    For each uve_type a MetricTypeCollection class is created.
    """
    def __init__(self, auth=None):
        super().__init__()
        self.scrapers = Group()
        self.scrape_pool = Pool(size=Config().scraper.pool_size)
        self.session = Session(auth=auth)
        self.session.mount(
            "http://",
            HTTPAdapter(
                max_retries=Retry(
                    total=Config().scraper.max_retry,
                    connect=Config().scraper.max_retry,
                    read=Config().scraper.max_retry,
                    backoff_factor=0.3,
                ),
                pool_connections=10,
            ),
        )
        metric_types = {}
        for metric_config in Config().metrics:
            if metric_config.uve_type not in metric_types:
                metric_types[metric_config.uve_type] = []
            metric_types[metric_config.uve_type].append(metric_config)
        for uve_type, metric_configs in metric_types.items():
            self.append(
                MetricTypeCollection(
                    self.session,
                    uve_type,
                    metric_configs,
                    self.scrapers,
                    self.scrape_pool,
                ))

    def scrape(self):
        for instance in self:
            instance.scrape()
        try:
            self.scrapers.join()
        except KeyboardInterrupt:
            self.scrape_pool.kill(StopScrape)
            self.scrapers.kill(StopScrape)
        return
Example #19
0
 def setup_os_session(self):
     os_username = self.global_options.get('os_username')
     if not os_username:
         return
     os_username += self.global_options.get('user_suffix', '')
     mpw = MasterPassword(os_username, self.password)
     host = "identity-3." + self.domain.split('.', 1)[1]
     password = mpw.derive('long', host)
     auth = Password(
         auth_url='https://' + host + '/v3',
         username=os_username,
         user_domain_name=self.global_options.get('os_user_domain_name'),
         project_name=self.global_options.get('os_project_name'),
         project_domain_name=self.global_options.get('os_project_domain_name'),
         password=password,
     )
     self.os_session = Session(auth=auth)
Example #20
0
def vitrageclient(request, password=None):
    endpoint = base.url_for(request, 'identity')
    token_id = request.user.token.id
    tenant_name = request.user.tenant_name
    project_domain_id = request.user.token.project.get('domain_id', 'Default')
    auth = Token(auth_url=endpoint, token=token_id,
                 project_name=tenant_name,
                 project_domain_id=project_domain_id)
    session = Session(auth=auth, timeout=600)
    return vitrage_client.Client('1', session)
Example #21
0
    def os_session(self):
        if self._os_session is None:
            from keystoneauth1.identity import v3
            from keystoneauth1.session import Session

            self._os_session = Session(
                auth=v3.Password(**self.configuration['os_auth']),
                verify=self.configuration['session'].get('cacert', False),
                cert=self.configuration['session'].get('cert'))
        return self._os_session
Example #22
0
 def session(self):
     if not self._session:
         auth_kwargs = dict(auth_url=self.auth_url, username=self.username,
                            password=self.password, project_name=self.tenant)
         if self.keystone_version == 3:
             auth_kwargs.update(dict(user_domain_id=self.domain_id,
                                     project_domain_name=self.domain_id))
         pass_auth = Password(**auth_kwargs)
         self._session = Session(auth=pass_auth, verify=False)
     return self._session
Example #23
0
def actions(request, action, nodetype):
    endpoint = base.url_for(request, 'identity')
    token_id = request.user.token.id
    tenant_name = request.user.tenant_name
    project_domain_id = request.user.token.project.get('domain_id', 'Default')
    auth = Token(auth_url=endpoint, token=token_id,
                 project_name=tenant_name,
                 project_domain_id=project_domain_id)
    session = Session(auth=auth, timeout=600)
    result = action_manager.ActionManager.getinfo(session, str(action),request)
    return result
Example #24
0
 def create_admin_session(self):
     keystone_authtoken = {
         'auth_url': config.OS_AUTH_URL,
         'username': config.OS_USERNAME,
         'password': config.OS_PASSWORD,
         'project_name': config.OS_PROJECT_NAME,
         'user_domain_name': config.OS_USER_DOMAIN_NAME,
         'project_domain_name': config.OS_PROJECT_DOMAIN_NAME
     }
     auth = v3.Password(**keystone_authtoken)
     return Session(auth=auth)
Example #25
0
def get_keystone_client():
    auth = Password(**keystone_args_from_env())
    session = Session(auth=auth,
                      app_name='keystone-init',
                      user_agent='keystone-init',
                      timeout=KEYSTONE_TIMEOUT,
                      verify=KEYSTONE_VERIFY,
                      cert=KEYSTONE_CERT)

    discover = Discover(session=session)
    return discover.create_client()
    def _init_barbican_client(self):
        """Creates barbican client instance.

        Verifies that client can communicate with Barbican, retrying
        multiple times in case either Barbican or Keystone services are
        still starting up.
        """
        max_attempts = 5
        sleep_time = 5
        n_attempts = 0
        while n_attempts <= max_attempts:
            n_attempts += 1
            try:
                if self.auth_version == "v3":
                    auth = v3.Password(
                        username=self.username,
                        password=self.password,
                        auth_url=self.auth_url,
                        user_domain_name=self.user_domain_name,
                        project_domain_name=self.project_domain_name,
                        project_name=self.project_name)

                else:
                    # assume v2 auth
                    auth = v2.Password(
                        username=self.username,
                        password=self.password,
                        auth_url=self.auth_url,
                        tenant_name=self.tenant_name)

                # NOTE: Session is deprecated in keystoneclient 2.1.0
                # and will be removed in a future keystoneclient release.
                sess = Session(auth=auth)
                self.barbican = Client(session=sess)

                # test barbican service
                self.barbican.containers.list()

                # success
                LOG.debug(
                    "Barbican client initialized using Keystone %s "
                    "authentication." % self.auth_version)
                break

            except Exception as exc:
                if n_attempts < max_attempts:
                    LOG.debug("Barbican client initialization failed. "
                              "Trying again.")
                    time.sleep(sleep_time)
                else:
                    raise InvalidBarbicanConfig(
                        "Unable to initialize Barbican client. %s" %
                        exc.message)
Example #27
0
def _connect_to_keystone_password(auth_url,
                                  username,
                                  password,
                                  project_name,
                                  user_domain_name=None,
                                  project_domain_name=None,
                                  **kwargs):
    """
    Given a username and password,
    authenticate with keystone to get an unscoped token
    Exchange token to receive an auth,session,token scoped to a specific project_name and domain_name.
    """
    password_auth = identity.Password(auth_url=auth_url,
                                      username=username,
                                      password=password,
                                      project_name=project_name,
                                      user_domain_name=user_domain_name,
                                      project_domain_name=project_domain_name)
    password_sess = Session(auth=password_auth)
    password_token = password_sess.get_token()
    return (password_auth, password_sess, password_token)
Example #28
0
def get_os_session(resource_config: OSResourceConfig,
                   logger: Logger) -> Session:
    logger.debug("Getting OpenStack Session")
    auth = v3.Password(
        auth_url=resource_config.controller_url,
        username=resource_config.user,
        password=resource_config.password,
        project_name=resource_config.os_project_name,
        user_domain_id=resource_config.os_domain_name,
        project_domain_id=resource_config.os_domain_name,
    )
    return Session(auth=auth, verify=False)
Example #29
0
File: utils.py Project: neujie/fuxi
def get_keystone_session(**kwargs):
    keystone_conf = CONF.keystone
    config = {}
    config['auth_url'] = keystone_conf.auth_url
    config['username'] = keystone_conf.admin_user
    config['password'] = keystone_conf.admin_password
    config['tenant_name'] = keystone_conf.admin_tenant_name
    config['token'] = keystone_conf.admin_token
    config.update(kwargs)

    if keystone_conf.auth_insecure:
        verify = False
    else:
        verify = keystone_conf.auth_ca_cert

    return Session(auth=_openstack_auth_from_config(**config), verify=verify)
Example #30
0
 def __init__(self,
              auth,
              image,
              flavor,
              network=None,
              az=None,
              count=1,
              api_timeout=60,
              build_timeout=120,
              callhome_timeout=300,
              test_script=None,
              console_logs=None,
              shim_type='bash',
              cloud_init_type='cloud-init',
              no_cleanup_on_error=False,
              **kwargs):
     super(SimpleTest, self).__init__(**kwargs)
     self.client = Client(
         '2', session=Session(auth=auth), timeout=api_timeout)
     self.image = image
     self.flavor = flavor
     self.network = network
     self.az = az
     self.count = count
     self.api_timeout = api_timeout
     self.build_timeout = build_timeout
     self.callhome_timeout = callhome_timeout
     self.test_script = test_script
     if console_logs is not None:
         self.console_logs = os.path.abspath(console_logs)
     else:
         self.console_logs = None
     self.servers = []
     self.userdata = None
     self.shim_type = shim_type
     self.cloud_init_type = cloud_init_type
     self.no_cleanup_on_error = no_cleanup_on_error
     self.next_state(self.state_prepare)
Example #31
0
    project_domain_id='default',
    user_domain_id='default',
    username='******',
    password='******',
    # The `plugin_creator` of `_create_auth_plugin` automatically add the
    # V3, but here we have to manually add it.
    auth_url="http://%s/identity/v3" % OS['url'],
    # Allow fetching a new token if the current one is going to expire
    reauthenticate=True,
    # Project scoping is mandatory to get the service catalog fill properly.
    project_name='admin',  # for project's scoping
    include_catalog=True,  # include the service catalog in the token
)

print(vars(auth))
sess = Session(auth=auth)

print("no auth_ref (token) %s" % auth.auth_ref)

# import ipdb; ipdb.set_trace()

# print(sess.get("http://%s/identity/v3" % OS['url']))
# Authenticate
auth.get_access(sess)
auth_ref = auth.auth_ref
print("Auth Token: %s" % auth_ref.auth_token)

import ipdb
ipdb.set_trace()

# Service catalog
Example #32
0
class Common(object):
    """Common."""  # TODO documentation

    def __make_endpoint(self, endpoint):
        parse = urllib.parse.urlparse(endpoint)
        return parse._replace(
            netloc='{}:{}'.format(
                self.controller_ip, parse.port)).geturl()

    def __init__(self, controller_ip, user, password, tenant):
        self.controller_ip = controller_ip

        self.keystone_session = None

        if DISABLE_SSL:
            auth_url = 'http://{0}:5000/v2.0/'.format(self.controller_ip)
            path_to_cert = None
        else:
            auth_url = 'https://{0}:5000/v2.0/'.format(self.controller_ip)
            path_to_cert = PATH_TO_CERT

        insecure = not VERIFY_SSL

        logger.debug('Auth URL is {0}'.format(auth_url))

        self.__keystone_auth = V2Password(
            auth_url=auth_url,
            username=user,
            password=password,
            tenant_name=tenant)  # TODO: in v3 project_name

        self.__start_keystone_session(ca_cert=path_to_cert, insecure=insecure)

    @property
    def keystone(self):
        return KeystoneClient(session=self.keystone_session)

    @property
    def glance(self):
        endpoint = self.__make_endpoint(
            self._get_url_for_svc(service_type='image'))
        return GlanceClient(
            version='1',
            session=self.keystone_session,
            endpoint_override=endpoint)

    @property
    def neutron(self):
        endpoint = self.__make_endpoint(
            self._get_url_for_svc(service_type='network'))
        return NeutronClient(
            session=self.keystone_session,
            endpoint_override=endpoint)

    @property
    def nova(self):
        endpoint = self.__make_endpoint(
            self._get_url_for_svc(service_type='compute'))
        return NovaClient(
            version='2',
            session=self.keystone_session,
            endpoint_override=endpoint)

    @property
    def cinder(self):
        endpoint = self.__make_endpoint(
            self._get_url_for_svc(service_type='volume'))
        return CinderClient(
            version='3',
            session=self.keystone_session,
            endpoint_override=endpoint)

    @property
    def heat(self):
        endpoint = self.__make_endpoint(
            self._get_url_for_svc(service_type='orchestration'))
        return HeatClient(
            session=self.keystone_session,
            endpoint_override=endpoint)

    @property
    def ironic(self):
        try:
            endpoint = self.__make_endpoint(
                self._get_url_for_svc(service_type='baremetal'))
            return get_ironic_client('1', session=self.keystone_session,
                                     insecure=True, ironic_url=endpoint)
        except ClientException as e:
            logger.warning('Could not initialize ironic client {0}'.format(e))
            raise

    @property
    def keystone_access(self):
        return self.__keystone_auth.get_access(session=self.keystone_session)

    def _get_url_for_svc(
            self, service_type=None, interface='public',
            region_name=None, service_name=None,
            service_id=None, endpoint_id=None
    ):
        return self.keystone_access.service_catalog.url_for(
            service_type=service_type, interface=interface,
            region_name=region_name, service_name=service_name,
            service_id=service_id, endpoint_id=endpoint_id
        )

    def goodbye_security(self):
        secgroup_list = self.nova.security_groups.list()
        logger.debug("Security list is {0}".format(secgroup_list))
        secgroup_id = [i.id for i in secgroup_list if i.name == 'default'][0]
        logger.debug("Id of security group default is {0}".format(
            secgroup_id))
        logger.debug('Permit all TCP and ICMP in security group default')
        self.nova.security_group_rules.create(secgroup_id,
                                              ip_protocol='tcp',
                                              from_port=1,
                                              to_port=65535)
        self.nova.security_group_rules.create(secgroup_id,
                                              ip_protocol='icmp',
                                              from_port=-1,
                                              to_port=-1)

    def update_image(self, image, **kwargs):
        self.glance.images.update(image.id, **kwargs)
        return self.glance.images.get(image.id)

    def delete_image(self, image_id):
        return self.glance.images.delete(image_id)

    def create_key(self, key_name):
        logger.debug('Try to create key {0}'.format(key_name))
        return self.nova.keypairs.create(key_name)

    def create_instance(self, flavor_name='test_flavor', ram=64, vcpus=1,
                        disk=1, server_name='test_instance', image_name=None,
                        neutron_network=True, label=None):
        logger.debug('Try to create instance')

        start_time = time.time()
        exc_type, exc_value, exc_traceback = None, None, None
        while time.time() - start_time < 100:
            try:
                if image_name:
                    image = [i.id for i in self.nova.images.list()
                             if i.name == image_name]
                else:
                    image = [i.id for i in self.nova.images.list()]
                break
            except Exception as e:
                exc_type, exc_value, exc_traceback = sys.exc_info()
                logger.warning('Ignoring exception: {!r}'.format(e))
                logger.debug(traceback.format_exc())
        else:
            if all((exc_type, exc_traceback, exc_value)):
                six.reraise(exc_type, exc_value, exc_traceback)
            raise Exception('Can not get image')

        kwargs = {}
        if neutron_network:
            net_label = label if label else 'net04'
            network = self.nova.networks.find(label=net_label)
            kwargs['nics'] = [{'net-id': network.id, 'v4-fixed-ip': ''}]

        logger.info('image uuid is {0}'.format(image))
        flavor = self.nova.flavors.create(
            name=flavor_name, ram=ram, vcpus=vcpus, disk=disk)
        logger.info('flavor is {0}'.format(flavor.name))
        server = self.nova.servers.create(
            name=server_name, image=image[0], flavor=flavor, **kwargs)
        logger.info('server is {0}'.format(server.name))
        return server

    @logwrap
    def get_instance_detail(self, server):
        details = self.nova.servers.get(server)
        return details

    def verify_instance_status(self, server, expected_state):
        def _verify_instance_state():
            curr_state = self.get_instance_detail(server).status
            assert_equal(expected_state, curr_state)

        try:
            _verify_instance_state()
        except AssertionError:
            logger.debug('Instance is not {0}, lets provide it the last '
                         'chance and sleep 60 sec'.format(expected_state))
            time.sleep(60)
            _verify_instance_state()

    def delete_instance(self, server):
        logger.debug('Try to delete instance')
        self.nova.servers.delete(server)

    def create_flavor(self, name, ram, vcpus, disk, flavorid="auto",
                      ephemeral=0, extra_specs=None):
        flavor = self.nova.flavors.create(name, ram, vcpus, disk, flavorid,
                                          ephemeral=ephemeral)
        if extra_specs:
            flavor.set_keys(extra_specs)
        return flavor

    def delete_flavor(self, flavor):
        return self.nova.flavors.delete(flavor)

    def create_aggregate(self, name, availability_zone=None,
                         metadata=None, hosts=None):
        aggregate = self.nova.aggregates.create(
            name=name, availability_zone=availability_zone)
        for host in hosts or []:
            aggregate.add_host(host)
        if metadata:
            aggregate.set_metadata(metadata)
        return aggregate

    def delete_aggregate(self, aggregate, hosts=None):
        for host in hosts or []:
            self.nova.aggregates.remove_host(aggregate, host)
        return self.nova.aggregates.delete(aggregate)

    def __start_keystone_session(
            self, retries=3, ca_cert=None, insecure=not VERIFY_SSL):
        exc_type, exc_value, exc_traceback = None, None, None
        for i in xrange(retries):
            try:
                if insecure:
                    self.keystone_session = KeystoneSession(
                        auth=self.__keystone_auth, verify=False)
                elif ca_cert:
                    self.keystone_session = KeystoneSession(
                        auth=self.__keystone_auth, verify=ca_cert)
                else:
                    self.keystone_session = KeystoneSession(
                        auth=self.__keystone_auth)
                self.keystone_session.get_auth_headers()
                return

            except ClientException as exc:
                exc_type, exc_value, exc_traceback = sys.exc_info()
                err = "Try nr {0}. Could not get keystone token, error: {1}"
                logger.warning(err.format(i + 1, exc))
                time.sleep(5)
        if exc_type and exc_traceback and exc_value:
            six.reraise(exc_type, exc_value, exc_traceback)
        raise RuntimeError()

    @staticmethod
    def rebalance_swift_ring(controller_ip, retry_count=5, sleep=600):
        """Check Swift ring and rebalance it if needed.

        Replication should be performed on primary controller node.
        Retry check several times. Wait for replication due to LP1498368.
        """
        ssh = SSHManager()
        cmd = "/usr/local/bin/swift-rings-rebalance.sh"
        logger.debug('Check swift ring and rebalance it.')
        for _ in xrange(retry_count):
            try:
                checkers.check_swift_ring(controller_ip)
                break
            except AssertionError:
                result = ssh.execute_on_remote(ip=controller_ip, cmd=cmd)
                logger.debug("command execution result is {0}".format(result))
        else:
            checkers.check_swift_ring(controller_ip)
Example #33
0
 def from_argparse(cls, args):
     auth = ksloading.cli.load_from_argparse_arguments(args)
     session = Session(auth=auth)
     return cls(session)
Example #34
0
    for p in prefixes:
        for s in suffixes:
            key = ''.join([p, s])
            envvar = ('_'.join(['os', key])).upper()
            value = environ.get(envvar)
            if value:
                _args[key] = value
                break

    _args.update({
        'auth_url': environ.get('OS_AUTH_URL'),
        'password': environ.get('OS_PASSWORD'),
        'username': environ.get('OS_USERNAME')
    })
    return _args


_auth_args = build_auth_args()
_plugin = 'v3password'
region = environ.get('OS_REGION_NAME')
endpoint_filter = {
    'service_type': 'identity',
    'interface': 'admin',
    'region_name': region
}

loader = loading.get_plugin_loader(_plugin)
auth = loader.load_from_options(**_auth_args)
sess = Session(auth=auth, verify=environ.get('OS_CACERT'))
ks = client.Client(session=sess)
Example #35
0
    'username': username,
}

if __name__ == '__main__':
    auth_args = pauth_args
    plugin = 'v3password'
    region = os.environ.get('OS_REGION_NAME')
    endpoint_filter = {
        'service_type': 'identity',
        'interface': 'admin',
        'region_name': region
    }

    loader = loading.get_plugin_loader(plugin)
    auth = loader.load_from_options(**auth_args)
    sess = Session(auth=auth)
    ks = client.Client(session=sess)
    # Available projects
    print('Available projects:')
    pprint(ks.auth.projects())
    print('Available domains:')
    domains = ks.auth.domains()
    if domains:
        pprint(domains)
    else:
        resp = sess.get('/auth/domains', endpoint_filter=endpoint_filter)
        print(json.dumps(resp.json(), indent=2))

    token = sess.get_token()
    token_data = ks.tokens.get_token_data(token=token, include_catalog=False)
    print('Token data:')
class NailgunClient(object):
    """NailgunClient"""  # TODO documentation

    def __init__(self, admin_node_ip=None, session=None, **kwargs):
        if session:
            logger.info(
                'Initialization of NailgunClient using shared session \n'
                '(auth_url={})'.format(session.auth.auth_url))
            self.session = session
        else:
            warn(
                'Initialization of NailgunClient by IP is deprecated, '
                'please use keystonesession1.session.Session',
                DeprecationWarning)

            if FORCE_HTTPS_MASTER_NODE:
                url = "https://{0}:8443".format(admin_node_ip)
            else:
                url = "http://{0}:8000".format(admin_node_ip)
            logger.info('Initiate Nailgun client with url %s', url)
            keystone_url = "http://{0}:5000/v2.0".format(admin_node_ip)

            creds = dict(KEYSTONE_CREDS, **kwargs)

            auth = V2Password(
                auth_url=keystone_url,
                username=creds['username'],
                password=creds['password'],
                tenant_name=creds['tenant_name'])
            # TODO: in v3 project_name

            self.session = KeystoneSession(auth=auth, verify=False)

    def __repr__(self):
        klass, obj_id = type(self), hex(id(self))
        url = getattr(self, 'url', None)
        return "[{klass}({obj_id}), url:{url}]".format(klass=klass,
                                                       obj_id=obj_id,
                                                       url=url)

    def _get(self, url, **kwargs):
        if 'endpoint_filter' not in kwargs:
            kwargs.update(endpoint_filter={'service_type': 'fuel'})
        return self.session.get(url=url, connect_retries=1, **kwargs)

    def _delete(self, url, **kwargs):
        if 'endpoint_filter' not in kwargs:
            kwargs.update(endpoint_filter={'service_type': 'fuel'})
        return self.session.delete(url=url, connect_retries=1, **kwargs)

    def _post(self, url, **kwargs):
        if 'endpoint_filter' not in kwargs:
            kwargs.update(endpoint_filter={'service_type': 'fuel'})
        return self.session.post(url=url, connect_retries=1, **kwargs)

    def _put(self, url, **kwargs):
        if 'endpoint_filter' not in kwargs:
            kwargs.update(endpoint_filter={'service_type': 'fuel'})
        return self.session.put(url=url, connect_retries=1, **kwargs)

    def list_nodes(self):
        return self._get(url="/nodes/").json()

    def list_cluster_nodes(self, cluster_id):
        return self._get(url="/nodes/?cluster_id={}".format(cluster_id)).json()

    @logwrap
    def get_networks(self, cluster_id):
        net_provider = self.get_cluster(cluster_id)['net_provider']
        return self._get(
            url="/clusters/{}/network_configuration/{}".format(
                cluster_id, net_provider
            )).json()

    @logwrap
    def verify_networks(self, cluster_id):
        net_provider = self.get_cluster(cluster_id)['net_provider']
        return self._put(
            "/clusters/{}/network_configuration/{}/verify/".format(
                cluster_id, net_provider
            ),
            json=self.get_networks(cluster_id)
        ).json()

    def get_cluster_attributes(self, cluster_id):
        return self._get(
            url="/clusters/{}/attributes/".format(cluster_id)).json()

    def get_cluster_vmware_attributes(self, cluster_id):
        return self._get(
            url="/clusters/{}/vmware_attributes/".format(cluster_id),
        ).json()

    @logwrap
    def update_cluster_attributes(self, cluster_id, attrs):
        return self._put(
            "/clusters/{}/attributes/".format(cluster_id),
            json=attrs
        ).json()

    @logwrap
    def update_cluster_vmware_attributes(self, cluster_id, attrs):
        return self._put(
            "/clusters/{}/vmware_attributes/".format(cluster_id),
            json=attrs
        ).json()

    @logwrap
    def get_cluster(self, cluster_id):
        return self._get(url="/clusters/{}".format(cluster_id)).json()

    @logwrap
    def update_cluster(self, cluster_id, data):
        return self._put(
            "/clusters/{}/".format(cluster_id),
            json=data
        ).json()

    @logwrap
    def delete_cluster(self, cluster_id):
        return self._delete(url="/clusters/{}/".format(cluster_id)).json()

    @logwrap
    def get_node_by_id(self, node_id):
        return self._get(url="/nodes/{}".format(node_id)).json()

    @logwrap
    def update_node(self, node_id, data):
        return self._put(
            "/nodes/{}/".format(node_id), json=data
        ).json()

    @logwrap
    def update_nodes(self, data):
        return self._put(url="/nodes", json=data).json()

    @logwrap
    def delete_node(self, node_id):
        return self._delete(url="/nodes/{}/".format(node_id)).json()

    @logwrap
    def deploy_cluster_changes(self, cluster_id):
        return self._put(url="/clusters/{}/changes/".format(cluster_id)).json()

    @logwrap
    def deploy_custom_graph(self, cluster_id, graph_type, node_ids=None):
        """Method to deploy custom graph on cluster.

        :param cluster_id: Cluster to be custom deployed
        :param graph_type: Type of a graph to deploy
        :param node_ids: nodes to deploy. None or empty list means all.
        :return:
        """
        if not node_ids:
            nailgun_nodes = self.list_cluster_nodes(cluster_id)
            node_ids = [str(_node['id']) for _node in nailgun_nodes]
        return self._put(
            '/clusters/{0}/deploy/?graph_type={1}&nodes={2}'.format(
                cluster_id,
                graph_type,
                ','.join(node_ids))).json()

    @logwrap
    def get_release_tasks(self, release_id):
        """Method to get release deployment tasks.

        :param release_id: Id of release to get tasks
        :return: list of deployment graphs
        """
        return self._get(
            '/releases/{rel_id}/deployment_graphs/'.format(
                rel_id=release_id)).json()

    @logwrap
    def get_release_tasks_by_type(self, release_id, graph_type):
        """Method to get release deployment tasks by type.

        :param release_id: Id of release to get tasks
        :param graph_type: Type of a graph to deploy
        :return: list of deployment graphs for a given type
        """
        return self._get(
            "/releases/{0}/deployment_graphs/{1}".format(
                release_id, graph_type)).json()

    @logwrap
    def get_task(self, task_id):
        return self._get(url="/tasks/{}".format(task_id)).json()

    @logwrap
    def get_tasks(self):
        return self._get(url="/tasks").json()

    @logwrap
    def get_releases(self):
        return self._get(url="/releases/").json()

    @logwrap
    def get_release(self, release_id):
        return self._get(url="/releases/{}".format(release_id)).json()

    @logwrap
    def put_release(self, release_id, data):
        return self._put(
            url="/releases/{}".format(release_id), json=data).json()

    @logwrap
    def get_releases_details(self, release_id):
        warn('get_releases_details is deprecated in favor of get_release')
        return self._get(url="/releases/{}".format(release_id)).json()

    @logwrap
    def get_node_disks(self, node_id):
        return self._get(url="/nodes/{}/disks".format(node_id)).json()

    @logwrap
    def put_node_disks(self, node_id, data):
        return self._put(
            url="/nodes/{}/disks".format(node_id), json=data).json()

    @logwrap
    def get_release_id(self, release_name=OPENSTACK_RELEASE):
        for release in self.get_releases():
            if release["name"].lower().find(release_name.lower()) != -1:
                return release["id"]

    @logwrap
    def get_release_default_net_settings(self, release_id):
        return self._get(url="/releases/{}/networks".format(release_id)).json()

    @logwrap
    def put_release_default_net_settings(self, release_id, data):
        return self._put(
            "/releases/{}/networks".format(release_id),
            json=data).json()

    @logwrap
    def get_node_interfaces(self, node_id):
        return self._get(url="/nodes/{}/interfaces".format(node_id)).json()

    @logwrap
    def put_node_interfaces(self, data):
        return self._put(url="/nodes/interfaces", json=data).json()

    @logwrap
    def list_clusters(self):
        return self._get(url="/clusters/").json()

    @logwrap
    def clone_environment(self, environment_id, data):
        return self._post(
            "/clusters/{}/upgrade/clone".format(environment_id),
            json=data
        ).json()

    @logwrap
    def reassign_node(self, cluster_id, data):
        return self._post(
            "/clusters/{}/upgrade/assign".format(cluster_id),
            json=data
        ).json()

    @logwrap
    def create_cluster(self, data):
        logger.info('Before post to nailgun')
        return self._post(url="/clusters", json=data).json()

    # ## OSTF ###
    @logwrap
    def get_ostf_test_sets(self, cluster_id):
        return self._get(
            url="/testsets/{}".format(cluster_id),
            endpoint_filter={'service_type': 'ostf'}
        ).json()

    @logwrap
    def get_ostf_tests(self, cluster_id):
        return self._get(
            url="/tests/{}".format(cluster_id),
            endpoint_filter={'service_type': 'ostf'}
        ).json()

    @logwrap
    def get_ostf_test_run(self, cluster_id):
        return self._get(
            url="/testruns/last/{}".format(cluster_id),
            endpoint_filter={'service_type': 'ostf'}
        ).json()

    @logwrap
    def ostf_run_tests(self, cluster_id, test_sets_list):
        logger.info('Run OSTF tests at cluster #%s: %s',
                    cluster_id, test_sets_list)
        data = []
        for test_set in test_sets_list:
            data.append(
                {
                    'metadata': {'cluster_id': str(cluster_id), 'config': {}},
                    'testset': test_set
                }
            )
        # get tests otherwise 500 error will be thrown
        self.get_ostf_tests(cluster_id)
        return self._post(
            "/testruns",
            json=data,
            endpoint_filter={'service_type': 'ostf'})

    @logwrap
    def ostf_run_singe_test(self, cluster_id, test_sets_list, test_name):
        # get tests otherwise 500 error will be thrown
        self.get_ostf_tests(cluster_id)
        logger.info('Get tests finish with success')
        data = []
        for test_set in test_sets_list:
            data.append(
                {
                    'metadata': {'cluster_id': str(cluster_id), 'config': {}},
                    'tests': [test_name],
                    'testset': test_set
                }
            )
        return self._post(
            "/testruns",
            json=data,
            endpoint_filter={'service_type': 'ostf'}).json()
    # ## /OSTF ###

    @logwrap
    def update_network(self, cluster_id, networking_parameters=None,
                       networks=None):
        nc = self.get_networks(cluster_id)
        if networking_parameters is not None:
            for k in networking_parameters:
                nc["networking_parameters"][k] = networking_parameters[k]
        if networks is not None:
            nc["networks"] = networks

        net_provider = self.get_cluster(cluster_id)['net_provider']
        return self._put(
            "/clusters/{}/network_configuration/{}".format(
                cluster_id, net_provider
            ),
            json=nc,

        ).json()

    @logwrap
    def get_cluster_id(self, name):
        for cluster in self.list_clusters():
            if cluster["name"] == name:
                logger.info('Cluster name is {:s}'.format(name))
                logger.info('Cluster id is {:d}'.format(cluster["id"]))
                return cluster["id"]

    @logwrap
    def add_syslog_server(self, cluster_id, host, port):
        # Here we updating cluster editable attributes
        # In particular we set extra syslog server
        attributes = self.get_cluster_attributes(cluster_id)
        attributes["editable"]["syslog"]["syslog_server"]["value"] = host
        attributes["editable"]["syslog"]["syslog_port"]["value"] = port
        self.update_cluster_attributes(cluster_id, attributes)

    @logwrap
    def get_cluster_vlans(self, cluster_id):
        cluster_vlans = []
        nc = self.get_networks(cluster_id)['networking_parameters']
        vlans = nc["vlan_range"]
        cluster_vlans.extend(vlans)

        return cluster_vlans

    @logwrap
    def get_notifications(self):
        return self._get(url="/notifications").json()

    @logwrap
    def generate_logs(self):
        return self._put(url="/logs/package").json()

    @logwrap
    def provision_nodes(self, cluster_id, node_ids=None):
        return self.do_cluster_action(cluster_id, node_ids=node_ids)

    @logwrap
    def deploy_nodes(self, cluster_id, node_ids=None):
        return self.do_cluster_action(
            cluster_id, node_ids=node_ids, action="deploy")

    @logwrap
    def stop_deployment(self, cluster_id):
        return self.do_stop_reset_actions(cluster_id)

    @logwrap
    def reset_environment(self, cluster_id):
        return self.do_stop_reset_actions(cluster_id, action="reset")

    @logwrap
    def do_cluster_action(self, cluster_id, node_ids=None, action="provision"):
        if not node_ids:
            nailgun_nodes = self.list_cluster_nodes(cluster_id)
            # pylint: disable=map-builtin-not-iterating
            node_ids = map(lambda _node: str(_node['id']), nailgun_nodes)
            # pylint: enable=map-builtin-not-iterating
        return self._put(
            "/clusters/{0}/{1}?nodes={2}".format(
                cluster_id,
                action,
                ','.join(node_ids))
        ).json()

    @logwrap
    def do_stop_reset_actions(self, cluster_id, action="stop_deployment"):
        return self._put(
            "/clusters/{0}/{1}/".format(str(cluster_id), action)).json()

    @logwrap
    def get_api_version(self):
        return self._get(url="/version").json()

    @logwrap
    def run_update(self, cluster_id):
        return self._put(
            "/clusters/{0}/update/".format(str(cluster_id))).json()

    @logwrap
    def create_nodegroup(self, cluster_id, group_name):
        data = {"cluster_id": cluster_id, "name": group_name}
        return self._post(url="/nodegroups/", json=data).json()

    @logwrap
    def get_nodegroups(self):
        return self._get(url="/nodegroups/").json()

    @logwrap
    def assign_nodegroup(self, group_id, nodes):
        data = [{"group_id": group_id, "id": n["id"]} for n in nodes]
        return self._put(url="/nodes/", json=data).json()

    @logwrap
    def delete_nodegroup(self, group_id):
        return self._delete(url="/nodegroups/{0}/".format(group_id))

    @logwrap
    def update_settings(self, data=None):
        return self._put(url="/settings", json=data).json()

    @logwrap
    def get_settings(self, data=None):
        return self._get(url="/settings").json()

    @logwrap
    def send_fuel_stats(self, enabled=False):
        settings = self.get_settings()
        params = ('send_anonymous_statistic', 'user_choice_saved')
        for p in params:
            settings['settings']['statistics'][p]['value'] = enabled
        self.update_settings(data=settings)

    @logwrap
    def get_cluster_deployment_tasks(self, cluster_id):
        """ Get list of all deployment tasks for cluster."""
        return self._get(
            url='/clusters/{}/deployment_tasks'.format(cluster_id),
        ).json()

    @logwrap
    def get_release_deployment_tasks(self, release_id):
        """ Get list of all deployment tasks for release."""
        return self._get(
            url='/releases/{}/deployment_tasks'.format(release_id),
        ).json()

    @logwrap
    def get_custom_cluster_deployment_tasks(self, cluster_id, custom_type):
        """ Get list of all deployment tasks for cluster."""
        return self._get(
            '/clusters/{}/deployment_tasks/?graph_type={}'.format(
                cluster_id,
                custom_type
            )).json()

    @logwrap
    def get_end_deployment_tasks(self, cluster_id, end, start=None):
        """ Get list of all deployment tasks for cluster with end parameter.
        If  end=netconfig, return all tasks from the graph included netconfig
        """
        if not start:
            return self._get(
                url='/clusters/{0}/deployment_tasks?end={1}'.format(
                    cluster_id, end)
            ).json()
        return self._get(
            url='/clusters/{0}/deployment_tasks?start={1}&end={2}'.format(
                cluster_id, start, end),
        ).json()

    @logwrap
    def get_orchestrator_deployment_info(self, cluster_id):
        return self._get(
            url='/clusters/{}/orchestrator/deployment'.format(cluster_id),
        ).json()

    @logwrap
    def put_deployment_tasks_for_cluster(self, cluster_id, data, node_id,
                                         force=False):
        """Put  task to be executed on the nodes from cluster

        :param cluster_id: int, cluster id
        :param data: list, tasks ids
        :param node_id: str, Node ids where task should be run,
               can be node_id=1, or node_id =1,2,3,
        :param force: bool, run particular task on nodes and do not care
               if there were changes or not
        :return:
        """
        return self._put(
            '/clusters/{0}/deploy_tasks?nodes={1}{2}'.format(
                cluster_id, node_id, '&force=1' if force else ''),
            json=data).json()

    @logwrap
    def put_deployment_tasks_for_release(self, release_id, data):
        return self._put(
            '/releases/{}/deployment_tasks'.format(release_id),
            json=data).json()

    @logwrap
    def set_hostname(self, node_id, new_hostname):
        """ Set a new hostname for the node"""
        data = dict(hostname=new_hostname)
        return self._put(url='/nodes/{0}/'.format(node_id), json=data).json()

    @logwrap
    def get_network_template(self, cluster_id):
        return self._get(
            url='/clusters/{}/network_configuration/template'.format(
                cluster_id),
        ).json()

    @logwrap
    def upload_network_template(self, cluster_id, network_template):
        return self._put(
            '/clusters/{}/network_configuration/template'.format(cluster_id),
            json=network_template).json()

    @logwrap
    def delete_network_template(self, cluster_id):
        return self._delete(
            url='/clusters/{}/network_configuration/template'.format(
                cluster_id),
        ).json()

    @logwrap
    def get_network_groups(self):
        return self._get(url='/networks/').json()

    @logwrap
    def get_network_group(self, network_id):
        return self._get(url='/networks/{0}/'.format(network_id)).json()

    @logwrap
    def add_network_group(self, network_data):
        return self._post(url='/networks/', json=network_data).json()

    @logwrap
    def del_network_group(self, network_id):
        return self._delete(url='/networks/{0}/'.format(network_id))

    @logwrap
    def update_network_group(self, network_id, network_data):
        return self._put(url='/networks/{0}/'.format(network_id),
                         json=network_data).json()

    @logwrap
    def create_vm_nodes(self, node_id, data):
        logger.info("Uploading VMs configuration to node {0}: {1}".
                    format(node_id, data))
        url = "/nodes/{0}/vms_conf/".format(node_id)
        return self._put(url, json={'vms_conf': data}).json()

    @logwrap
    def spawn_vms(self, cluster_id):
        url = '/clusters/{0}/spawn_vms/'.format(cluster_id)
        return self._put(url).json()

    @logwrap
    def upload_configuration(self, config, cluster_id, role=None,
                             node_id=None, node_ids=None):
        """Upload configuration.

        :param config: a dictionary of configuration to upload.
        :param cluster_id: An integer number of cluster id.
        :param role: a string of role name.
        :param node_id: An integer number of node id.
        :param node_ids: a list of node ids
        :return: a decoded JSON response.
        """
        data = {'cluster_id': cluster_id, 'configuration': config}
        if role is not None:
            data['node_role'] = role
        if node_id is not None:
            data['node_id'] = node_id
        if node_ids is not None:
            data['node_ids'] = node_ids
        url = '/openstack-config/'
        return self._post(url, json=data).json()

    @logwrap
    def get_configuration(self, configuration_id):
        """Get uploaded configuration by id.

        :param configuration_id: An integer number of configuration id.
        :return: a decoded JSON response.
        """
        return self._get(
            url='/openstack-config/{0}'.format(configuration_id),
        ).json()

    @logwrap
    def list_configuration(self, cluster_id, role=None, node_id=None):
        """Get filtered list of configurations.

        :param cluster_id: An integer number of cluster id.
        :param role: a string of role name.
        :param node_id: An integer number of node id.
        :return: a decoded JSON response.
        """
        url = '/openstack-config/?cluster_id={0}'.format(cluster_id)
        if role is not None:
            url += '&node_role={0}'.format(role)
        if node_id is not None:
            url += '&node_id={0}'.format(node_id)
        return self._get(url=url).json()

    @logwrap
    def delete_configuration(self, configuration_id):
        """Delete configuration by id.

        :param configuration_id: An integer number of configuration id.
        :return: urllib2's object of response.
        """
        url = '/openstack-config/{0}'.format(configuration_id)
        return self._delete(url=url)

    @logwrap
    def apply_configuration(self, cluster_id, role=None, node_id=None):
        """Apply configuration.

        :param cluster_id: An integer number of cluster id.
        :param role: a string of role name.
        :param node_id: An integer number of node id.
        :return: a decoded JSON response.
        """
        data = {'cluster_id': cluster_id}
        if role is not None:
            data['node_role'] = role
        if node_id is not None:
            data['node_id'] = node_id
        url = '/openstack-config/execute/'
        return self._put(url, json=data).json()

    @logwrap
    def update_vip_ip(self, cluster_id, data):
        return self._post(
            "/clusters/{0}/network_configuration/ips/vips".format(cluster_id),
            json=data).json()

    @logwrap
    def upload_node_attributes(self, attributes, node_id):
        """Upload node attributes for specified node.

        :param attributes: a dictionary of attributes to upload.
        :param node_id: an integer number of node id.
        :return: a decoded JSON response.
        """
        url = '/nodes/{}/attributes/'.format(node_id)
        return self._put(url, json=attributes).json()

    @logwrap
    def get_node_attributes(self, node_id):
        """Get attributes for specified node.

        :param node_id: an integer number of node id.
        :return: a decoded JSON response.
        """
        return self._get(url='/nodes/{}/attributes/'.format(node_id)).json()

    @logwrap
    def get_deployed_cluster_attributes(self, cluster_id):
        url = '/clusters/{}/attributes/deployed/'.format(cluster_id)
        return self._get(url).json()

    @logwrap
    def get_deployed_network_configuration(self, cluster_id):
        url = '/clusters/{}/network_configuration/deployed'.format(
            cluster_id)
        return self._get(url).json()

    @logwrap
    def get_default_cluster_settings(self, cluster_id):
        url = '/clusters/{}/attributes/defaults'.format(cluster_id)
        return self._get(url).json()

    @logwrap
    def get_all_tasks_list(self):
        return self._get(url='/transactions/').json()

    @logwrap
    def get_deployment_task_hist(self, task_id):
        url = '/transactions/{task_id}/deployment_history'.format(
            task_id=task_id)
        return self._get(
            url=url,
        ).json()

    @logwrap
    def redeploy_cluster_changes(self, cluster_id, data=None):
        """Deploy the changes of cluster settings

        :param cluster_id: int, target cluster ID
        :param data: dict, updated cluster attributes (if empty, the already
                     uploaded attributes will be (re)applied)
        :return: a decoded JSON response
        """
        if data is None:
            data = {}
        return self._put(
            "/clusters/{}/changes/redeploy".format(cluster_id),
            json=data).json()

    @logwrap
    def assign_ip_address_before_deploy_start(self, cluster_id):
        return self._get(
            url='/clusters/{}/orchestrator/deployment/defaults/'.format(
                cluster_id)
        )

    @logwrap
    def get_deployment_info_for_task(self, task_id):
        return self._get(
            url='/transactions/{}/deployment_info'.format(task_id),
        ).json()

    @logwrap
    def get_cluster_settings_for_deployment_task(self, task_id):
        return self._get(
            url='/transactions/{}/settings'.format(task_id),
        ).json()

    @logwrap
    def get_network_configuration_for_deployment_task(self, task_id):
        return self._get(
            url='/transactions/{}/network_configuration/'.format(task_id),
        ).json()

    # ConfigDB Extension

    @logwrap
    def get_components(self, comp_id=None):
        """Get all existing components

        :param comp_id: component id
        :return: components data
        """
        endpoint = '/config/components'
        endpoint = '{path}/{component_id}'.format(
            path=endpoint, component_id=comp_id) if comp_id else endpoint
        return self._get(endpoint).json()

    @logwrap
    def create_component(self, data):
        """ Create component with specified data

        :param data:
        :return:
        """
        return self._post('/config/components', json=data).json()

    @logwrap
    def get_environments(self, env_id=None):
        """Get all existing environments

        :param env_id: environment id
        :return: env data
        """
        endpoint = '/config/environments'
        endpoint = '{path}/{env_id}'.format(
            env_id=env_id, path=endpoint) if env_id else endpoint
        return self._get(endpoint).json()

    @logwrap
    def create_environment(self, data):
        """ Create env with specified data

        :param data:
        :return:
        """
        return self._post('/config/environments', json=data).json()

    @logwrap
    def get_global_resource_id_value(self, env_id, resource_id,
                                     effective=False):
        """ Get global resource value for specified env and resource

        :param env_id:  str or int
        :param resource_id: int
        :param effective: true or false
        :return: global resource value
        """
        endpoint = '/config/environments/' \
                   '{env_id}/resources/{resource}' \
                   '/values'.format(env_id=env_id, resource=resource_id)
        endpoint = endpoint + '?effective' if effective else endpoint

        return self._get(endpoint).json()

    @logwrap
    def get_global_resource_name_value(self, env_id, resource_name,
                                       effective=False):
        """ Get global resource value for specified env and resource

        :param env_id:  str or int
        :param resource_name: str or int
        :param effective: true or false
        :return: global resource value
        """
        endpoint = '/config/environments/' \
                   '{env_id}/resources/{resource}' \
                   '/values'.format(env_id=env_id, resource=resource_name)
        endpoint = endpoint + '?effective' if effective else endpoint

        return self._get(endpoint).json()

    @logwrap
    def put_global_resource_value(self, env_id, resource, data):
        """Put global resource value

        :param env_id: str or int
        :param resource: name or id
        :param data: data in dict format
        """
        endpoint = '/config/environments/' \
                   '{env_id}/resources/{resource}' \
                   '/values'.format(env_id=env_id, resource=resource)
        return self._put(endpoint, json=data)

    @logwrap
    def put_global_resource_override(self, env_id, resource, data):
        """Put global resource override value

        :param env_id: str or int
        :param resource: name or id
        :param data: data in dict format
        """
        endpoint = '/config/environments/' \
                   '{env_id}/resources/{resource}' \
                   '/overrides'.format(env_id=env_id, resource=resource)
        return self._put(endpoint, json=data)

    @logwrap
    def get_node_resource_id_value(self, env_id, resource_id, node_id,
                                   effective=False):
        """ Get node level resource value for specified env, resource and node

        :param env_id: str or int
        :param resource_id: id
        :param node_id: str or int
        :param effective: true or false
        :return: node resource value
        """
        endpoint = '/config/environments/' \
                   '{env_id}/nodes/{node_id}/resources/{resource}' \
                   '/values'.format(env_id=env_id, resource=resource_id,
                                    node_id=node_id)
        endpoint = endpoint + '?effective' if effective else endpoint

        return self._get(endpoint).json()

    @logwrap
    def get_node_resource_name_value(self, env_id, resource_name, node_id,
                                     effective=False):
        """ Get node level resource value for specified env, resource and node

        :param env_id: str or int
        :param resource_name: name in string format
        :param node_id: str or int
        :param effective: true or false
        :return: node resource value
        """
        endpoint = '/config/environments/' \
                   '{env_id}/nodes/{node_id}/resources/{resource}' \
                   '/values'.format(env_id=env_id, resource=resource_name,
                                    node_id=node_id)
        endpoint = endpoint + '?effective' if effective else endpoint

        return self._get(endpoint).json()

    @logwrap
    def put_node_resource_value(self, env_id, resource, node_id, data):
        """ Put node resource value

        :param env_id: str or int
        :param resource: name or id
        :param node_id: str or int
        :param data: data in dict format
        """
        endpoint = '/config/environments/' \
                   '{env_id}/nodes/{node_id}/resources/{resource}' \
                   '/values'.format(env_id=env_id, resource=resource,
                                    node_id=node_id)
        return self._put(endpoint, json=data)

    @logwrap
    def put_node_resource_overrides(self, env_id, resource, node_id, data):
        """Put node resource override value

        :param env_id: str or int
        :param resource: name or id
        :param node_id: str or int
        :param data: data in dict format
        """
        endpoint = '/config/environments/' \
                   '{env_id}/nodes/{node_id}/resources/{resource}' \
                   '/overrides'.format(env_id=env_id, resource=resource,
                                       node_id=node_id)
        return self._put(endpoint, json=data)

    @logwrap
    def plugins_list(self):
        """Get list of installed plugins"""
        endpoint = '/plugins'
        return self._get(endpoint).json()
Example #37
0
 def _init_session_access_catalog(self):
     self._session = Session(auth=self.authentication)
     self._access = self.authentication.get_access(session=self.session)
     self._catalog = None if not self.access.has_service_catalog(
     ) else self.access.__dict__['_data']['access']['serviceCatalog']
Example #38
0
class Configurator:
    CLUSTER_MATCH = re.compile('^productionbb0*([1-9][0-9]*)$')
    EPH_MATCH = re.compile('^eph.*$')
    HAGROUP_MATCH = re.compile('.*_hg(?P<hagroup>[ab])$', re.IGNORECASE)
    BR_MATCH = re.compile('^br-(.*)$')

    def __init__(self, domain, global_options={}):
        self.global_options = global_options.copy()
        self.password = None
        self.mpw = None
        self.domain = domain
        self.os_session = None
        self.vcenters = dict()
        self.states = dict()
        self.poll_config()
        self.global_options['cells'] = {}
        self.global_options['domain'] = domain

        atexit.register(self._disconnect_vcenters)

    def _disconnect_vcenters(self):
        """Disconnect all vcenters we are connected to"""
        for host in self.vcenters:
            service_instance = self.vcenters[host].get('service_instance')
            if not service_instance:
                continue
            try:
                Disconnect(service_instance)
            except Exception:
                # best effort disconnection
                pass

    def __call__(self, added, removed):
        """Add/remove vcenters from our managed list of vcenters"""
        for name in added:
            host = f'{name}.{self.domain}'
            try:
                self._reconnect_vcenter_if_necessary(host)
            except VcConnectionFailed:
                LOG.error('Connecting to %s failed.', host)
                continue

        if removed:
            LOG.info(f"Gone vcs {removed}")

    def _connect_vcenter(self, host):
        """Create a connection to host and add it to self.vcenters"""
        # Vcenter doesn't accept / in password
        password = self.mpw.derive('long', host).replace("/", "")

        if host not in self.vcenters:
            self.vcenters[host] = {
                'username': self.username,
                'password': password,
                'host': host,
                'name': host.split('.', 1)[0],
                'retries': 0,
                'last_retry_time': time.time()
            }
            vc = self.vcenters[host]
        else:
            vc = self.vcenters[host]
            # remove the service_instance for reconnect so we can easily
            # detect a vcenter we are not connected to
            if 'service_instance' in vc:
                del vc['service_instance']

        retries = vc['retries']
        if retries:
            # wait a maximum of 10 minutes, a minium of 1
            wait_time = min(retries, 10) * 60
            if time.time() < vc['last_retry_time'] + wait_time:
                LOG.debug('Ignoring reconnection attempt to %s because of '
                          'incremental backoff (retry %s).', host, retries)
                raise VcConnectSkipped()

        try:
            LOG.info(f"Connecting to {host}")

            vc['retries'] += 1
            vc['last_retry_time'] = time.time()

            service_instance = None
            if hasattr(ssl, '_create_unverified_context'):
                context = ssl._create_unverified_context()

                service_instance = SmartConnect(host=host,
                                                user=self.username,
                                                pwd=password,
                                                port=443,
                                                sslContext=context)

            if service_instance:
                vc['service_instance'] = service_instance

        except vim.fault.InvalidLogin as e:
            LOG.error("%s: %s", host, e.msg)
        except (Exception, socket_error) as e:
            LOG.error("%s: %s", host, e)

        if vc.get('service_instance') is None:
            raise VcConnectionFailed()
        vc['retries'] = 0

    def _reconnect_vcenter_if_necessary(self, host):
        """Test a vcenter connection and reconnect if necessary"""
        needs_reconnect = \
            host not in self.vcenters or \
            'service_instance' not in self.vcenters[host]
        if not needs_reconnect:
            try:
                self.vcenters[host]['service_instance'].CurrentTime()
            except Exception as e:
                LOG.info('Trying to reconnect to %s because of %s', host, e)
                needs_reconnect = True

        if needs_reconnect:
            self._connect_vcenter(host)

    def _poll(self, host):
        self._reconnect_vcenter_if_necessary(host)
        vcenter_options = self.vcenters[host]
        values = {'clusters': {}, 'datacenters': {}}
        service_instance = vcenter_options['service_instance']

        nsx_t_clusters = set()

        with filter_spec_context(service_instance,
                                 obj_type=vim.HostSystem,
                                 path_set=['name', 'parent', 'config.network.opaqueSwitch']) as filter_spec:
            for h in vcu.collect_properties(service_instance, [filter_spec]):
                if 'config.network.opaqueSwitch' not in h:
                    LOG.debug("Broken ESXi host %s detected in cluster %s",
                              h['name'], h['parent'])
                    continue
                if len(h['config.network.opaqueSwitch']) > 0:
                    LOG.debug("(Possible) NSX-T switch found on %s", h['name'])
                    nsx_t_clusters.add(h['parent'])

        with filter_spec_context(service_instance) as filter_spec:
            availability_zones = set()
            cluster_options = None

            for cluster in vcu.collect_properties(service_instance, [filter_spec]):
                cluster_name = cluster['name']
                match = self.CLUSTER_MATCH.match(cluster_name)

                if not match:
                    LOG.debug(
                        "%s: Ignoring cluster %s "
                        "not matching naming scheme", host, cluster_name)
                    continue
                bb_name_no_zeroes = f'bb{match.group(1)}'

                nsx_t_enabled = cluster['obj'] in nsx_t_clusters
                if nsx_t_enabled:
                    LOG.debug('NSX-T enabled for %s', cluster_name)

                parent = cluster['parent']
                availability_zone = parent.parent.name.lower()

                availability_zones.add(availability_zone)
                cluster_options = self.global_options.copy()
                cluster_options.update(vcenter_options)
                cluster_options.pop('service_instance', None)
                cluster_options.update(name=bb_name_no_zeroes,
                                       cluster_name=cluster_name,
                                       availability_zone=availability_zone,
                                       nsx_t_enabled=nsx_t_enabled,
                                       vcenter_name=vcenter_options['name'])

                if cluster_options.get('pbm_enabled', 'false') != 'true':
                    datastores = cluster['datastore']
                    datastore_names = [datastore.name
                                       for datastore in datastores
                                       if self.EPH_MATCH.match(datastore.name)]
                    eph = commonprefix(datastore_names)
                    cluster_options.update(datastore_regex=f"^{eph}.*")
                    hagroups = set()
                    for name in datastore_names:
                        m = self.HAGROUP_MATCH.match(name)
                        if not m:
                            continue
                        hagroups.add(m.group('hagroup').lower())
                    if {'a', 'b'}.issubset(hagroups):
                        LOG.debug('ephemeral datastore hagroups enabled for %s', cluster_name)
                        cluster_options.update(datastore_hagroup_regex=self.HAGROUP_MATCH.pattern)

                for network in cluster['network']:
                    try:
                        match = self.BR_MATCH.match(network.name)
                        if match:
                            cluster_options['bridge'] = match.group(0).lower()
                            cluster_options['physical'] = match.group(1).lower()
                            break
                    except vim.ManagedObjectNotFound:
                        # sometimes a portgroup might be already deleted when
                        # we try to query its name here
                        continue

                if 'bridge' not in cluster_options and not nsx_t_enabled:
                    LOG.warning("%s: Skipping cluster %s, "
                                "cannot find bridge matching naming scheme",
                                host, cluster_name)
                    continue

                values['clusters'][cluster_name] = cluster_options

            for availability_zone in availability_zones:
                cluster_options = self.global_options.copy()
                cluster_options.update(vcenter_options)
                cluster_options.pop('service_instance', None)
                cluster_options.update(availability_zone=availability_zone)
                values['datacenters'][availability_zone] = cluster_options

        return values

    @property
    def _client(self):
        return client

    @property
    def username(self):
        return self.global_options['username']

    @property
    def namespace(self):
        return self.global_options['own_namespace']

    def poll_config(self):
        configmap = client.CoreV1Api().read_namespaced_config_map(
            namespace=self.namespace,
            name='vcenter-operator')

        password = configmap.data.pop('password')
        for key, value in configmap.data.items():
            try:
                self.global_options[key] = json.loads(value)
            except ValueError:
                self.global_options[key] = value
        if self.password != password:
            self.global_options.update(master_password=password)
            self.password = password
            self.mpw = MasterPassword(self.username, self.password)
            self.setup_os_session()

    def setup_os_session(self):
        os_username = self.global_options.get('os_username')
        if not os_username:
            return
        os_username += self.global_options.get('user_suffix', '')
        mpw = MasterPassword(os_username, self.password)
        host = "identity-3." + self.domain.split('.', 1)[1]
        password = mpw.derive('long', host)
        auth = Password(
            auth_url='https://' + host + '/v3',
            username=os_username,
            user_domain_name=self.global_options.get('os_user_domain_name'),
            project_name=self.global_options.get('os_project_name'),
            project_domain_name=self.global_options.get('os_project_domain_name'),
            password=password,
        )
        self.os_session = Session(auth=auth)

    def _poll_nova(self):
        if not self.os_session:
            return

        try:
            endpoint_filter = {'service_type': 'compute', 'interface': 'public'}
            resp = self.os_session.get('/os-cells', endpoint_filter=endpoint_filter)
            for cell in resp.json().get('cellsv2', []):
                self.global_options['cells'][cell['name']] = cell
        except (HttpError, ConnectionError) as e:
            LOG.error(f"Failed to get cells: {e}")

    def poll(self):
        self.poll_config()
        self._poll_nova()

        # If we fail to update the templates, we rather do not continue
        # to avoid rendering only half of the deployment
        if not DeploymentState.poll_templates():
            return

        for host in self.vcenters:
            try:
                values = self._poll(host)
                state = DeploymentState(
                    namespace=self.global_options['namespace'],
                    dry_run=(self.global_options.get('dry_run', 'False')
                             == 'True'))

                for options in values['clusters'].values():
                    state.render('vcenter_cluster', options)

                for options in values['datacenters'].values():
                    state.render('vcenter_datacenter', options)

                last = self.states.get(host)

                if last:
                    delta = last.delta(state)
                    delta.apply()
                else:
                    state.apply()

                self.states[host] = state
            except VcConnectionFailed:
                LOG.error(
                    "Reconnecting to %s failed. Ignoring VC for this run.", host
                )
            except VcConnectSkipped:
                LOG.info("Ignoring disconnected %s for this run.", host)
            except http.client.HTTPException as e:
                LOG.warning("%s: %r", host, e)