Example #1
0
def get_nodes(admin_ip):
    keystone_auth = V2Password(auth_url="http://{}:5000/v2.0".format(admin_ip),
                               username=KEYSTONE_CREDS['username'],
                               password=KEYSTONE_CREDS['password'],
                               tenant_name=KEYSTONE_CREDS['tenant_name'])
    keystone_session = KeystoneSession(auth=keystone_auth, verify=False)
    nodes = keystone_session.get('/nodes',
                                 endpoint_filter={'service_type': 'fuel'})
    return nodes.json()
Example #2
0
def get_nodes(admin_ip):
    keystone_auth = V2Password(
        auth_url="http://{}:5000/v2.0".format(admin_ip),
        username=KEYSTONE_CREDS['username'],
        password=KEYSTONE_CREDS['password'],
        tenant_name=KEYSTONE_CREDS['tenant_name'])
    keystone_session = KeystoneSession(auth=keystone_auth, verify=False)
    nodes = keystone_session.get(
        '/nodes',
        endpoint_filter={'service_type': 'fuel'}
    )
    return nodes.json()
Example #3
0
        'region_name': region
    }

    loader = loading.get_plugin_loader(plugin)
    auth = loader.load_from_options(**auth_args)
    sess = Session(auth=auth)
    ks = client.Client(session=sess)
    # Available projects
    print('Available projects:')
    pprint(ks.auth.projects())
    print('Available domains:')
    domains = ks.auth.domains()
    if domains:
        pprint(domains)
    else:
        resp = sess.get('/auth/domains', endpoint_filter=endpoint_filter)
        print(json.dumps(resp.json(), indent=2))

    token = sess.get_token()
    token_data = ks.tokens.get_token_data(token=token, include_catalog=False)
    print('Token data:')
    print(json.dumps(token_data, indent=2))

    dname = 'dev'
    try:
        domain = ks.domains.get(dname)
        domain_id = None
    except NotFound:
        # Direct API
        resp = sess.get('/domains?name={}'.format(dname),
                        endpoint_filter=endpoint_filter)
class NailgunClient(object):
    """NailgunClient"""  # TODO documentation

    def __init__(self, admin_node_ip=None, session=None, **kwargs):
        if session:
            logger.info(
                'Initialization of NailgunClient using shared session \n'
                '(auth_url={})'.format(session.auth.auth_url))
            self.session = session
        else:
            warn(
                'Initialization of NailgunClient by IP is deprecated, '
                'please use keystonesession1.session.Session',
                DeprecationWarning)

            if FORCE_HTTPS_MASTER_NODE:
                url = "https://{0}:8443".format(admin_node_ip)
            else:
                url = "http://{0}:8000".format(admin_node_ip)
            logger.info('Initiate Nailgun client with url %s', url)
            keystone_url = "http://{0}:5000/v2.0".format(admin_node_ip)

            creds = dict(KEYSTONE_CREDS, **kwargs)

            auth = V2Password(
                auth_url=keystone_url,
                username=creds['username'],
                password=creds['password'],
                tenant_name=creds['tenant_name'])
            # TODO: in v3 project_name

            self.session = KeystoneSession(auth=auth, verify=False)

    def __repr__(self):
        klass, obj_id = type(self), hex(id(self))
        url = getattr(self, 'url', None)
        return "[{klass}({obj_id}), url:{url}]".format(klass=klass,
                                                       obj_id=obj_id,
                                                       url=url)

    def _get(self, url, **kwargs):
        if 'endpoint_filter' not in kwargs:
            kwargs.update(endpoint_filter={'service_type': 'fuel'})
        return self.session.get(url=url, connect_retries=1, **kwargs)

    def _delete(self, url, **kwargs):
        if 'endpoint_filter' not in kwargs:
            kwargs.update(endpoint_filter={'service_type': 'fuel'})
        return self.session.delete(url=url, connect_retries=1, **kwargs)

    def _post(self, url, **kwargs):
        if 'endpoint_filter' not in kwargs:
            kwargs.update(endpoint_filter={'service_type': 'fuel'})
        return self.session.post(url=url, connect_retries=1, **kwargs)

    def _put(self, url, **kwargs):
        if 'endpoint_filter' not in kwargs:
            kwargs.update(endpoint_filter={'service_type': 'fuel'})
        return self.session.put(url=url, connect_retries=1, **kwargs)

    def list_nodes(self):
        return self._get(url="/nodes/").json()

    def list_cluster_nodes(self, cluster_id):
        return self._get(url="/nodes/?cluster_id={}".format(cluster_id)).json()

    @logwrap
    def get_networks(self, cluster_id):
        net_provider = self.get_cluster(cluster_id)['net_provider']
        return self._get(
            url="/clusters/{}/network_configuration/{}".format(
                cluster_id, net_provider
            )).json()

    @logwrap
    def verify_networks(self, cluster_id):
        net_provider = self.get_cluster(cluster_id)['net_provider']
        return self._put(
            "/clusters/{}/network_configuration/{}/verify/".format(
                cluster_id, net_provider
            ),
            json=self.get_networks(cluster_id)
        ).json()

    def get_cluster_attributes(self, cluster_id):
        return self._get(
            url="/clusters/{}/attributes/".format(cluster_id)).json()

    def get_cluster_vmware_attributes(self, cluster_id):
        return self._get(
            url="/clusters/{}/vmware_attributes/".format(cluster_id),
        ).json()

    @logwrap
    def update_cluster_attributes(self, cluster_id, attrs):
        return self._put(
            "/clusters/{}/attributes/".format(cluster_id),
            json=attrs
        ).json()

    @logwrap
    def update_cluster_vmware_attributes(self, cluster_id, attrs):
        return self._put(
            "/clusters/{}/vmware_attributes/".format(cluster_id),
            json=attrs
        ).json()

    @logwrap
    def get_cluster(self, cluster_id):
        return self._get(url="/clusters/{}".format(cluster_id)).json()

    @logwrap
    def update_cluster(self, cluster_id, data):
        return self._put(
            "/clusters/{}/".format(cluster_id),
            json=data
        ).json()

    @logwrap
    def delete_cluster(self, cluster_id):
        return self._delete(url="/clusters/{}/".format(cluster_id)).json()

    @logwrap
    def get_node_by_id(self, node_id):
        return self._get(url="/nodes/{}".format(node_id)).json()

    @logwrap
    def update_node(self, node_id, data):
        return self._put(
            "/nodes/{}/".format(node_id), json=data
        ).json()

    @logwrap
    def update_nodes(self, data):
        return self._put(url="/nodes", json=data).json()

    @logwrap
    def delete_node(self, node_id):
        return self._delete(url="/nodes/{}/".format(node_id)).json()

    @logwrap
    def deploy_cluster_changes(self, cluster_id):
        return self._put(url="/clusters/{}/changes/".format(cluster_id)).json()

    @logwrap
    def deploy_custom_graph(self, cluster_id, graph_type, node_ids=None):
        """Method to deploy custom graph on cluster.

        :param cluster_id: Cluster to be custom deployed
        :param graph_type: Type of a graph to deploy
        :param node_ids: nodes to deploy. None or empty list means all.
        :return:
        """
        if not node_ids:
            nailgun_nodes = self.list_cluster_nodes(cluster_id)
            node_ids = [str(_node['id']) for _node in nailgun_nodes]
        return self._put(
            '/clusters/{0}/deploy/?graph_type={1}&nodes={2}'.format(
                cluster_id,
                graph_type,
                ','.join(node_ids))).json()

    @logwrap
    def get_release_tasks(self, release_id):
        """Method to get release deployment tasks.

        :param release_id: Id of release to get tasks
        :return: list of deployment graphs
        """
        return self._get(
            '/releases/{rel_id}/deployment_graphs/'.format(
                rel_id=release_id)).json()

    @logwrap
    def get_release_tasks_by_type(self, release_id, graph_type):
        """Method to get release deployment tasks by type.

        :param release_id: Id of release to get tasks
        :param graph_type: Type of a graph to deploy
        :return: list of deployment graphs for a given type
        """
        return self._get(
            "/releases/{0}/deployment_graphs/{1}".format(
                release_id, graph_type)).json()

    @logwrap
    def get_task(self, task_id):
        return self._get(url="/tasks/{}".format(task_id)).json()

    @logwrap
    def get_tasks(self):
        return self._get(url="/tasks").json()

    @logwrap
    def get_releases(self):
        return self._get(url="/releases/").json()

    @logwrap
    def get_release(self, release_id):
        return self._get(url="/releases/{}".format(release_id)).json()

    @logwrap
    def put_release(self, release_id, data):
        return self._put(
            url="/releases/{}".format(release_id), json=data).json()

    @logwrap
    def get_releases_details(self, release_id):
        warn('get_releases_details is deprecated in favor of get_release')
        return self._get(url="/releases/{}".format(release_id)).json()

    @logwrap
    def get_node_disks(self, node_id):
        return self._get(url="/nodes/{}/disks".format(node_id)).json()

    @logwrap
    def put_node_disks(self, node_id, data):
        return self._put(
            url="/nodes/{}/disks".format(node_id), json=data).json()

    @logwrap
    def get_release_id(self, release_name=OPENSTACK_RELEASE):
        for release in self.get_releases():
            if release["name"].lower().find(release_name.lower()) != -1:
                return release["id"]

    @logwrap
    def get_release_default_net_settings(self, release_id):
        return self._get(url="/releases/{}/networks".format(release_id)).json()

    @logwrap
    def put_release_default_net_settings(self, release_id, data):
        return self._put(
            "/releases/{}/networks".format(release_id),
            json=data).json()

    @logwrap
    def get_node_interfaces(self, node_id):
        return self._get(url="/nodes/{}/interfaces".format(node_id)).json()

    @logwrap
    def put_node_interfaces(self, data):
        return self._put(url="/nodes/interfaces", json=data).json()

    @logwrap
    def list_clusters(self):
        return self._get(url="/clusters/").json()

    @logwrap
    def clone_environment(self, environment_id, data):
        return self._post(
            "/clusters/{}/upgrade/clone".format(environment_id),
            json=data
        ).json()

    @logwrap
    def reassign_node(self, cluster_id, data):
        return self._post(
            "/clusters/{}/upgrade/assign".format(cluster_id),
            json=data
        ).json()

    @logwrap
    def create_cluster(self, data):
        logger.info('Before post to nailgun')
        return self._post(url="/clusters", json=data).json()

    # ## OSTF ###
    @logwrap
    def get_ostf_test_sets(self, cluster_id):
        return self._get(
            url="/testsets/{}".format(cluster_id),
            endpoint_filter={'service_type': 'ostf'}
        ).json()

    @logwrap
    def get_ostf_tests(self, cluster_id):
        return self._get(
            url="/tests/{}".format(cluster_id),
            endpoint_filter={'service_type': 'ostf'}
        ).json()

    @logwrap
    def get_ostf_test_run(self, cluster_id):
        return self._get(
            url="/testruns/last/{}".format(cluster_id),
            endpoint_filter={'service_type': 'ostf'}
        ).json()

    @logwrap
    def ostf_run_tests(self, cluster_id, test_sets_list):
        logger.info('Run OSTF tests at cluster #%s: %s',
                    cluster_id, test_sets_list)
        data = []
        for test_set in test_sets_list:
            data.append(
                {
                    'metadata': {'cluster_id': str(cluster_id), 'config': {}},
                    'testset': test_set
                }
            )
        # get tests otherwise 500 error will be thrown
        self.get_ostf_tests(cluster_id)
        return self._post(
            "/testruns",
            json=data,
            endpoint_filter={'service_type': 'ostf'})

    @logwrap
    def ostf_run_singe_test(self, cluster_id, test_sets_list, test_name):
        # get tests otherwise 500 error will be thrown
        self.get_ostf_tests(cluster_id)
        logger.info('Get tests finish with success')
        data = []
        for test_set in test_sets_list:
            data.append(
                {
                    'metadata': {'cluster_id': str(cluster_id), 'config': {}},
                    'tests': [test_name],
                    'testset': test_set
                }
            )
        return self._post(
            "/testruns",
            json=data,
            endpoint_filter={'service_type': 'ostf'}).json()
    # ## /OSTF ###

    @logwrap
    def update_network(self, cluster_id, networking_parameters=None,
                       networks=None):
        nc = self.get_networks(cluster_id)
        if networking_parameters is not None:
            for k in networking_parameters:
                nc["networking_parameters"][k] = networking_parameters[k]
        if networks is not None:
            nc["networks"] = networks

        net_provider = self.get_cluster(cluster_id)['net_provider']
        return self._put(
            "/clusters/{}/network_configuration/{}".format(
                cluster_id, net_provider
            ),
            json=nc,

        ).json()

    @logwrap
    def get_cluster_id(self, name):
        for cluster in self.list_clusters():
            if cluster["name"] == name:
                logger.info('Cluster name is {:s}'.format(name))
                logger.info('Cluster id is {:d}'.format(cluster["id"]))
                return cluster["id"]

    @logwrap
    def add_syslog_server(self, cluster_id, host, port):
        # Here we updating cluster editable attributes
        # In particular we set extra syslog server
        attributes = self.get_cluster_attributes(cluster_id)
        attributes["editable"]["syslog"]["syslog_server"]["value"] = host
        attributes["editable"]["syslog"]["syslog_port"]["value"] = port
        self.update_cluster_attributes(cluster_id, attributes)

    @logwrap
    def get_cluster_vlans(self, cluster_id):
        cluster_vlans = []
        nc = self.get_networks(cluster_id)['networking_parameters']
        vlans = nc["vlan_range"]
        cluster_vlans.extend(vlans)

        return cluster_vlans

    @logwrap
    def get_notifications(self):
        return self._get(url="/notifications").json()

    @logwrap
    def generate_logs(self):
        return self._put(url="/logs/package").json()

    @logwrap
    def provision_nodes(self, cluster_id, node_ids=None):
        return self.do_cluster_action(cluster_id, node_ids=node_ids)

    @logwrap
    def deploy_nodes(self, cluster_id, node_ids=None):
        return self.do_cluster_action(
            cluster_id, node_ids=node_ids, action="deploy")

    @logwrap
    def stop_deployment(self, cluster_id):
        return self.do_stop_reset_actions(cluster_id)

    @logwrap
    def reset_environment(self, cluster_id):
        return self.do_stop_reset_actions(cluster_id, action="reset")

    @logwrap
    def do_cluster_action(self, cluster_id, node_ids=None, action="provision"):
        if not node_ids:
            nailgun_nodes = self.list_cluster_nodes(cluster_id)
            # pylint: disable=map-builtin-not-iterating
            node_ids = map(lambda _node: str(_node['id']), nailgun_nodes)
            # pylint: enable=map-builtin-not-iterating
        return self._put(
            "/clusters/{0}/{1}?nodes={2}".format(
                cluster_id,
                action,
                ','.join(node_ids))
        ).json()

    @logwrap
    def do_stop_reset_actions(self, cluster_id, action="stop_deployment"):
        return self._put(
            "/clusters/{0}/{1}/".format(str(cluster_id), action)).json()

    @logwrap
    def get_api_version(self):
        return self._get(url="/version").json()

    @logwrap
    def run_update(self, cluster_id):
        return self._put(
            "/clusters/{0}/update/".format(str(cluster_id))).json()

    @logwrap
    def create_nodegroup(self, cluster_id, group_name):
        data = {"cluster_id": cluster_id, "name": group_name}
        return self._post(url="/nodegroups/", json=data).json()

    @logwrap
    def get_nodegroups(self):
        return self._get(url="/nodegroups/").json()

    @logwrap
    def assign_nodegroup(self, group_id, nodes):
        data = [{"group_id": group_id, "id": n["id"]} for n in nodes]
        return self._put(url="/nodes/", json=data).json()

    @logwrap
    def delete_nodegroup(self, group_id):
        return self._delete(url="/nodegroups/{0}/".format(group_id))

    @logwrap
    def update_settings(self, data=None):
        return self._put(url="/settings", json=data).json()

    @logwrap
    def get_settings(self, data=None):
        return self._get(url="/settings").json()

    @logwrap
    def send_fuel_stats(self, enabled=False):
        settings = self.get_settings()
        params = ('send_anonymous_statistic', 'user_choice_saved')
        for p in params:
            settings['settings']['statistics'][p]['value'] = enabled
        self.update_settings(data=settings)

    @logwrap
    def get_cluster_deployment_tasks(self, cluster_id):
        """ Get list of all deployment tasks for cluster."""
        return self._get(
            url='/clusters/{}/deployment_tasks'.format(cluster_id),
        ).json()

    @logwrap
    def get_release_deployment_tasks(self, release_id):
        """ Get list of all deployment tasks for release."""
        return self._get(
            url='/releases/{}/deployment_tasks'.format(release_id),
        ).json()

    @logwrap
    def get_custom_cluster_deployment_tasks(self, cluster_id, custom_type):
        """ Get list of all deployment tasks for cluster."""
        return self._get(
            '/clusters/{}/deployment_tasks/?graph_type={}'.format(
                cluster_id,
                custom_type
            )).json()

    @logwrap
    def get_end_deployment_tasks(self, cluster_id, end, start=None):
        """ Get list of all deployment tasks for cluster with end parameter.
        If  end=netconfig, return all tasks from the graph included netconfig
        """
        if not start:
            return self._get(
                url='/clusters/{0}/deployment_tasks?end={1}'.format(
                    cluster_id, end)
            ).json()
        return self._get(
            url='/clusters/{0}/deployment_tasks?start={1}&end={2}'.format(
                cluster_id, start, end),
        ).json()

    @logwrap
    def get_orchestrator_deployment_info(self, cluster_id):
        return self._get(
            url='/clusters/{}/orchestrator/deployment'.format(cluster_id),
        ).json()

    @logwrap
    def put_deployment_tasks_for_cluster(self, cluster_id, data, node_id,
                                         force=False):
        """Put  task to be executed on the nodes from cluster

        :param cluster_id: int, cluster id
        :param data: list, tasks ids
        :param node_id: str, Node ids where task should be run,
               can be node_id=1, or node_id =1,2,3,
        :param force: bool, run particular task on nodes and do not care
               if there were changes or not
        :return:
        """
        return self._put(
            '/clusters/{0}/deploy_tasks?nodes={1}{2}'.format(
                cluster_id, node_id, '&force=1' if force else ''),
            json=data).json()

    @logwrap
    def put_deployment_tasks_for_release(self, release_id, data):
        return self._put(
            '/releases/{}/deployment_tasks'.format(release_id),
            json=data).json()

    @logwrap
    def set_hostname(self, node_id, new_hostname):
        """ Set a new hostname for the node"""
        data = dict(hostname=new_hostname)
        return self._put(url='/nodes/{0}/'.format(node_id), json=data).json()

    @logwrap
    def get_network_template(self, cluster_id):
        return self._get(
            url='/clusters/{}/network_configuration/template'.format(
                cluster_id),
        ).json()

    @logwrap
    def upload_network_template(self, cluster_id, network_template):
        return self._put(
            '/clusters/{}/network_configuration/template'.format(cluster_id),
            json=network_template).json()

    @logwrap
    def delete_network_template(self, cluster_id):
        return self._delete(
            url='/clusters/{}/network_configuration/template'.format(
                cluster_id),
        ).json()

    @logwrap
    def get_network_groups(self):
        return self._get(url='/networks/').json()

    @logwrap
    def get_network_group(self, network_id):
        return self._get(url='/networks/{0}/'.format(network_id)).json()

    @logwrap
    def add_network_group(self, network_data):
        return self._post(url='/networks/', json=network_data).json()

    @logwrap
    def del_network_group(self, network_id):
        return self._delete(url='/networks/{0}/'.format(network_id))

    @logwrap
    def update_network_group(self, network_id, network_data):
        return self._put(url='/networks/{0}/'.format(network_id),
                         json=network_data).json()

    @logwrap
    def create_vm_nodes(self, node_id, data):
        logger.info("Uploading VMs configuration to node {0}: {1}".
                    format(node_id, data))
        url = "/nodes/{0}/vms_conf/".format(node_id)
        return self._put(url, json={'vms_conf': data}).json()

    @logwrap
    def spawn_vms(self, cluster_id):
        url = '/clusters/{0}/spawn_vms/'.format(cluster_id)
        return self._put(url).json()

    @logwrap
    def upload_configuration(self, config, cluster_id, role=None,
                             node_id=None, node_ids=None):
        """Upload configuration.

        :param config: a dictionary of configuration to upload.
        :param cluster_id: An integer number of cluster id.
        :param role: a string of role name.
        :param node_id: An integer number of node id.
        :param node_ids: a list of node ids
        :return: a decoded JSON response.
        """
        data = {'cluster_id': cluster_id, 'configuration': config}
        if role is not None:
            data['node_role'] = role
        if node_id is not None:
            data['node_id'] = node_id
        if node_ids is not None:
            data['node_ids'] = node_ids
        url = '/openstack-config/'
        return self._post(url, json=data).json()

    @logwrap
    def get_configuration(self, configuration_id):
        """Get uploaded configuration by id.

        :param configuration_id: An integer number of configuration id.
        :return: a decoded JSON response.
        """
        return self._get(
            url='/openstack-config/{0}'.format(configuration_id),
        ).json()

    @logwrap
    def list_configuration(self, cluster_id, role=None, node_id=None):
        """Get filtered list of configurations.

        :param cluster_id: An integer number of cluster id.
        :param role: a string of role name.
        :param node_id: An integer number of node id.
        :return: a decoded JSON response.
        """
        url = '/openstack-config/?cluster_id={0}'.format(cluster_id)
        if role is not None:
            url += '&node_role={0}'.format(role)
        if node_id is not None:
            url += '&node_id={0}'.format(node_id)
        return self._get(url=url).json()

    @logwrap
    def delete_configuration(self, configuration_id):
        """Delete configuration by id.

        :param configuration_id: An integer number of configuration id.
        :return: urllib2's object of response.
        """
        url = '/openstack-config/{0}'.format(configuration_id)
        return self._delete(url=url)

    @logwrap
    def apply_configuration(self, cluster_id, role=None, node_id=None):
        """Apply configuration.

        :param cluster_id: An integer number of cluster id.
        :param role: a string of role name.
        :param node_id: An integer number of node id.
        :return: a decoded JSON response.
        """
        data = {'cluster_id': cluster_id}
        if role is not None:
            data['node_role'] = role
        if node_id is not None:
            data['node_id'] = node_id
        url = '/openstack-config/execute/'
        return self._put(url, json=data).json()

    @logwrap
    def update_vip_ip(self, cluster_id, data):
        return self._post(
            "/clusters/{0}/network_configuration/ips/vips".format(cluster_id),
            json=data).json()

    @logwrap
    def upload_node_attributes(self, attributes, node_id):
        """Upload node attributes for specified node.

        :param attributes: a dictionary of attributes to upload.
        :param node_id: an integer number of node id.
        :return: a decoded JSON response.
        """
        url = '/nodes/{}/attributes/'.format(node_id)
        return self._put(url, json=attributes).json()

    @logwrap
    def get_node_attributes(self, node_id):
        """Get attributes for specified node.

        :param node_id: an integer number of node id.
        :return: a decoded JSON response.
        """
        return self._get(url='/nodes/{}/attributes/'.format(node_id)).json()

    @logwrap
    def get_deployed_cluster_attributes(self, cluster_id):
        url = '/clusters/{}/attributes/deployed/'.format(cluster_id)
        return self._get(url).json()

    @logwrap
    def get_deployed_network_configuration(self, cluster_id):
        url = '/clusters/{}/network_configuration/deployed'.format(
            cluster_id)
        return self._get(url).json()

    @logwrap
    def get_default_cluster_settings(self, cluster_id):
        url = '/clusters/{}/attributes/defaults'.format(cluster_id)
        return self._get(url).json()

    @logwrap
    def get_all_tasks_list(self):
        return self._get(url='/transactions/').json()

    @logwrap
    def get_deployment_task_hist(self, task_id):
        url = '/transactions/{task_id}/deployment_history'.format(
            task_id=task_id)
        return self._get(
            url=url,
        ).json()

    @logwrap
    def redeploy_cluster_changes(self, cluster_id, data=None):
        """Deploy the changes of cluster settings

        :param cluster_id: int, target cluster ID
        :param data: dict, updated cluster attributes (if empty, the already
                     uploaded attributes will be (re)applied)
        :return: a decoded JSON response
        """
        if data is None:
            data = {}
        return self._put(
            "/clusters/{}/changes/redeploy".format(cluster_id),
            json=data).json()

    @logwrap
    def assign_ip_address_before_deploy_start(self, cluster_id):
        return self._get(
            url='/clusters/{}/orchestrator/deployment/defaults/'.format(
                cluster_id)
        )

    @logwrap
    def get_deployment_info_for_task(self, task_id):
        return self._get(
            url='/transactions/{}/deployment_info'.format(task_id),
        ).json()

    @logwrap
    def get_cluster_settings_for_deployment_task(self, task_id):
        return self._get(
            url='/transactions/{}/settings'.format(task_id),
        ).json()

    @logwrap
    def get_network_configuration_for_deployment_task(self, task_id):
        return self._get(
            url='/transactions/{}/network_configuration/'.format(task_id),
        ).json()

    # ConfigDB Extension

    @logwrap
    def get_components(self, comp_id=None):
        """Get all existing components

        :param comp_id: component id
        :return: components data
        """
        endpoint = '/config/components'
        endpoint = '{path}/{component_id}'.format(
            path=endpoint, component_id=comp_id) if comp_id else endpoint
        return self._get(endpoint).json()

    @logwrap
    def create_component(self, data):
        """ Create component with specified data

        :param data:
        :return:
        """
        return self._post('/config/components', json=data).json()

    @logwrap
    def get_environments(self, env_id=None):
        """Get all existing environments

        :param env_id: environment id
        :return: env data
        """
        endpoint = '/config/environments'
        endpoint = '{path}/{env_id}'.format(
            env_id=env_id, path=endpoint) if env_id else endpoint
        return self._get(endpoint).json()

    @logwrap
    def create_environment(self, data):
        """ Create env with specified data

        :param data:
        :return:
        """
        return self._post('/config/environments', json=data).json()

    @logwrap
    def get_global_resource_id_value(self, env_id, resource_id,
                                     effective=False):
        """ Get global resource value for specified env and resource

        :param env_id:  str or int
        :param resource_id: int
        :param effective: true or false
        :return: global resource value
        """
        endpoint = '/config/environments/' \
                   '{env_id}/resources/{resource}' \
                   '/values'.format(env_id=env_id, resource=resource_id)
        endpoint = endpoint + '?effective' if effective else endpoint

        return self._get(endpoint).json()

    @logwrap
    def get_global_resource_name_value(self, env_id, resource_name,
                                       effective=False):
        """ Get global resource value for specified env and resource

        :param env_id:  str or int
        :param resource_name: str or int
        :param effective: true or false
        :return: global resource value
        """
        endpoint = '/config/environments/' \
                   '{env_id}/resources/{resource}' \
                   '/values'.format(env_id=env_id, resource=resource_name)
        endpoint = endpoint + '?effective' if effective else endpoint

        return self._get(endpoint).json()

    @logwrap
    def put_global_resource_value(self, env_id, resource, data):
        """Put global resource value

        :param env_id: str or int
        :param resource: name or id
        :param data: data in dict format
        """
        endpoint = '/config/environments/' \
                   '{env_id}/resources/{resource}' \
                   '/values'.format(env_id=env_id, resource=resource)
        return self._put(endpoint, json=data)

    @logwrap
    def put_global_resource_override(self, env_id, resource, data):
        """Put global resource override value

        :param env_id: str or int
        :param resource: name or id
        :param data: data in dict format
        """
        endpoint = '/config/environments/' \
                   '{env_id}/resources/{resource}' \
                   '/overrides'.format(env_id=env_id, resource=resource)
        return self._put(endpoint, json=data)

    @logwrap
    def get_node_resource_id_value(self, env_id, resource_id, node_id,
                                   effective=False):
        """ Get node level resource value for specified env, resource and node

        :param env_id: str or int
        :param resource_id: id
        :param node_id: str or int
        :param effective: true or false
        :return: node resource value
        """
        endpoint = '/config/environments/' \
                   '{env_id}/nodes/{node_id}/resources/{resource}' \
                   '/values'.format(env_id=env_id, resource=resource_id,
                                    node_id=node_id)
        endpoint = endpoint + '?effective' if effective else endpoint

        return self._get(endpoint).json()

    @logwrap
    def get_node_resource_name_value(self, env_id, resource_name, node_id,
                                     effective=False):
        """ Get node level resource value for specified env, resource and node

        :param env_id: str or int
        :param resource_name: name in string format
        :param node_id: str or int
        :param effective: true or false
        :return: node resource value
        """
        endpoint = '/config/environments/' \
                   '{env_id}/nodes/{node_id}/resources/{resource}' \
                   '/values'.format(env_id=env_id, resource=resource_name,
                                    node_id=node_id)
        endpoint = endpoint + '?effective' if effective else endpoint

        return self._get(endpoint).json()

    @logwrap
    def put_node_resource_value(self, env_id, resource, node_id, data):
        """ Put node resource value

        :param env_id: str or int
        :param resource: name or id
        :param node_id: str or int
        :param data: data in dict format
        """
        endpoint = '/config/environments/' \
                   '{env_id}/nodes/{node_id}/resources/{resource}' \
                   '/values'.format(env_id=env_id, resource=resource,
                                    node_id=node_id)
        return self._put(endpoint, json=data)

    @logwrap
    def put_node_resource_overrides(self, env_id, resource, node_id, data):
        """Put node resource override value

        :param env_id: str or int
        :param resource: name or id
        :param node_id: str or int
        :param data: data in dict format
        """
        endpoint = '/config/environments/' \
                   '{env_id}/nodes/{node_id}/resources/{resource}' \
                   '/overrides'.format(env_id=env_id, resource=resource,
                                       node_id=node_id)
        return self._put(endpoint, json=data)

    @logwrap
    def plugins_list(self):
        """Get list of installed plugins"""
        endpoint = '/plugins'
        return self._get(endpoint).json()
Example #5
0
class Configurator:
    CLUSTER_MATCH = re.compile('^productionbb0*([1-9][0-9]*)$')
    EPH_MATCH = re.compile('^eph.*$')
    HAGROUP_MATCH = re.compile('.*_hg(?P<hagroup>[ab])$', re.IGNORECASE)
    BR_MATCH = re.compile('^br-(.*)$')

    def __init__(self, domain, global_options={}):
        self.global_options = global_options.copy()
        self.password = None
        self.mpw = None
        self.domain = domain
        self.os_session = None
        self.vcenters = dict()
        self.states = dict()
        self.poll_config()
        self.global_options['cells'] = {}
        self.global_options['domain'] = domain

        atexit.register(self._disconnect_vcenters)

    def _disconnect_vcenters(self):
        """Disconnect all vcenters we are connected to"""
        for host in self.vcenters:
            service_instance = self.vcenters[host].get('service_instance')
            if not service_instance:
                continue
            try:
                Disconnect(service_instance)
            except Exception:
                # best effort disconnection
                pass

    def __call__(self, added, removed):
        """Add/remove vcenters from our managed list of vcenters"""
        for name in added:
            host = f'{name}.{self.domain}'
            try:
                self._reconnect_vcenter_if_necessary(host)
            except VcConnectionFailed:
                LOG.error('Connecting to %s failed.', host)
                continue

        if removed:
            LOG.info(f"Gone vcs {removed}")

    def _connect_vcenter(self, host):
        """Create a connection to host and add it to self.vcenters"""
        # Vcenter doesn't accept / in password
        password = self.mpw.derive('long', host).replace("/", "")

        if host not in self.vcenters:
            self.vcenters[host] = {
                'username': self.username,
                'password': password,
                'host': host,
                'name': host.split('.', 1)[0],
                'retries': 0,
                'last_retry_time': time.time()
            }
            vc = self.vcenters[host]
        else:
            vc = self.vcenters[host]
            # remove the service_instance for reconnect so we can easily
            # detect a vcenter we are not connected to
            if 'service_instance' in vc:
                del vc['service_instance']

        retries = vc['retries']
        if retries:
            # wait a maximum of 10 minutes, a minium of 1
            wait_time = min(retries, 10) * 60
            if time.time() < vc['last_retry_time'] + wait_time:
                LOG.debug('Ignoring reconnection attempt to %s because of '
                          'incremental backoff (retry %s).', host, retries)
                raise VcConnectSkipped()

        try:
            LOG.info(f"Connecting to {host}")

            vc['retries'] += 1
            vc['last_retry_time'] = time.time()

            service_instance = None
            if hasattr(ssl, '_create_unverified_context'):
                context = ssl._create_unverified_context()

                service_instance = SmartConnect(host=host,
                                                user=self.username,
                                                pwd=password,
                                                port=443,
                                                sslContext=context)

            if service_instance:
                vc['service_instance'] = service_instance

        except vim.fault.InvalidLogin as e:
            LOG.error("%s: %s", host, e.msg)
        except (Exception, socket_error) as e:
            LOG.error("%s: %s", host, e)

        if vc.get('service_instance') is None:
            raise VcConnectionFailed()
        vc['retries'] = 0

    def _reconnect_vcenter_if_necessary(self, host):
        """Test a vcenter connection and reconnect if necessary"""
        needs_reconnect = \
            host not in self.vcenters or \
            'service_instance' not in self.vcenters[host]
        if not needs_reconnect:
            try:
                self.vcenters[host]['service_instance'].CurrentTime()
            except Exception as e:
                LOG.info('Trying to reconnect to %s because of %s', host, e)
                needs_reconnect = True

        if needs_reconnect:
            self._connect_vcenter(host)

    def _poll(self, host):
        self._reconnect_vcenter_if_necessary(host)
        vcenter_options = self.vcenters[host]
        values = {'clusters': {}, 'datacenters': {}}
        service_instance = vcenter_options['service_instance']

        nsx_t_clusters = set()

        with filter_spec_context(service_instance,
                                 obj_type=vim.HostSystem,
                                 path_set=['name', 'parent', 'config.network.opaqueSwitch']) as filter_spec:
            for h in vcu.collect_properties(service_instance, [filter_spec]):
                if 'config.network.opaqueSwitch' not in h:
                    LOG.debug("Broken ESXi host %s detected in cluster %s",
                              h['name'], h['parent'])
                    continue
                if len(h['config.network.opaqueSwitch']) > 0:
                    LOG.debug("(Possible) NSX-T switch found on %s", h['name'])
                    nsx_t_clusters.add(h['parent'])

        with filter_spec_context(service_instance) as filter_spec:
            availability_zones = set()
            cluster_options = None

            for cluster in vcu.collect_properties(service_instance, [filter_spec]):
                cluster_name = cluster['name']
                match = self.CLUSTER_MATCH.match(cluster_name)

                if not match:
                    LOG.debug(
                        "%s: Ignoring cluster %s "
                        "not matching naming scheme", host, cluster_name)
                    continue
                bb_name_no_zeroes = f'bb{match.group(1)}'

                nsx_t_enabled = cluster['obj'] in nsx_t_clusters
                if nsx_t_enabled:
                    LOG.debug('NSX-T enabled for %s', cluster_name)

                parent = cluster['parent']
                availability_zone = parent.parent.name.lower()

                availability_zones.add(availability_zone)
                cluster_options = self.global_options.copy()
                cluster_options.update(vcenter_options)
                cluster_options.pop('service_instance', None)
                cluster_options.update(name=bb_name_no_zeroes,
                                       cluster_name=cluster_name,
                                       availability_zone=availability_zone,
                                       nsx_t_enabled=nsx_t_enabled,
                                       vcenter_name=vcenter_options['name'])

                if cluster_options.get('pbm_enabled', 'false') != 'true':
                    datastores = cluster['datastore']
                    datastore_names = [datastore.name
                                       for datastore in datastores
                                       if self.EPH_MATCH.match(datastore.name)]
                    eph = commonprefix(datastore_names)
                    cluster_options.update(datastore_regex=f"^{eph}.*")
                    hagroups = set()
                    for name in datastore_names:
                        m = self.HAGROUP_MATCH.match(name)
                        if not m:
                            continue
                        hagroups.add(m.group('hagroup').lower())
                    if {'a', 'b'}.issubset(hagroups):
                        LOG.debug('ephemeral datastore hagroups enabled for %s', cluster_name)
                        cluster_options.update(datastore_hagroup_regex=self.HAGROUP_MATCH.pattern)

                for network in cluster['network']:
                    try:
                        match = self.BR_MATCH.match(network.name)
                        if match:
                            cluster_options['bridge'] = match.group(0).lower()
                            cluster_options['physical'] = match.group(1).lower()
                            break
                    except vim.ManagedObjectNotFound:
                        # sometimes a portgroup might be already deleted when
                        # we try to query its name here
                        continue

                if 'bridge' not in cluster_options and not nsx_t_enabled:
                    LOG.warning("%s: Skipping cluster %s, "
                                "cannot find bridge matching naming scheme",
                                host, cluster_name)
                    continue

                values['clusters'][cluster_name] = cluster_options

            for availability_zone in availability_zones:
                cluster_options = self.global_options.copy()
                cluster_options.update(vcenter_options)
                cluster_options.pop('service_instance', None)
                cluster_options.update(availability_zone=availability_zone)
                values['datacenters'][availability_zone] = cluster_options

        return values

    @property
    def _client(self):
        return client

    @property
    def username(self):
        return self.global_options['username']

    @property
    def namespace(self):
        return self.global_options['own_namespace']

    def poll_config(self):
        configmap = client.CoreV1Api().read_namespaced_config_map(
            namespace=self.namespace,
            name='vcenter-operator')

        password = configmap.data.pop('password')
        for key, value in configmap.data.items():
            try:
                self.global_options[key] = json.loads(value)
            except ValueError:
                self.global_options[key] = value
        if self.password != password:
            self.global_options.update(master_password=password)
            self.password = password
            self.mpw = MasterPassword(self.username, self.password)
            self.setup_os_session()

    def setup_os_session(self):
        os_username = self.global_options.get('os_username')
        if not os_username:
            return
        os_username += self.global_options.get('user_suffix', '')
        mpw = MasterPassword(os_username, self.password)
        host = "identity-3." + self.domain.split('.', 1)[1]
        password = mpw.derive('long', host)
        auth = Password(
            auth_url='https://' + host + '/v3',
            username=os_username,
            user_domain_name=self.global_options.get('os_user_domain_name'),
            project_name=self.global_options.get('os_project_name'),
            project_domain_name=self.global_options.get('os_project_domain_name'),
            password=password,
        )
        self.os_session = Session(auth=auth)

    def _poll_nova(self):
        if not self.os_session:
            return

        try:
            endpoint_filter = {'service_type': 'compute', 'interface': 'public'}
            resp = self.os_session.get('/os-cells', endpoint_filter=endpoint_filter)
            for cell in resp.json().get('cellsv2', []):
                self.global_options['cells'][cell['name']] = cell
        except (HttpError, ConnectionError) as e:
            LOG.error(f"Failed to get cells: {e}")

    def poll(self):
        self.poll_config()
        self._poll_nova()

        # If we fail to update the templates, we rather do not continue
        # to avoid rendering only half of the deployment
        if not DeploymentState.poll_templates():
            return

        for host in self.vcenters:
            try:
                values = self._poll(host)
                state = DeploymentState(
                    namespace=self.global_options['namespace'],
                    dry_run=(self.global_options.get('dry_run', 'False')
                             == 'True'))

                for options in values['clusters'].values():
                    state.render('vcenter_cluster', options)

                for options in values['datacenters'].values():
                    state.render('vcenter_datacenter', options)

                last = self.states.get(host)

                if last:
                    delta = last.delta(state)
                    delta.apply()
                else:
                    state.apply()

                self.states[host] = state
            except VcConnectionFailed:
                LOG.error(
                    "Reconnecting to %s failed. Ignoring VC for this run.", host
                )
            except VcConnectSkipped:
                LOG.info("Ignoring disconnected %s for this run.", host)
            except http.client.HTTPException as e:
                LOG.warning("%s: %r", host, e)