예제 #1
0
def record_user_action(cse_operation,
                       status=OperationStatus.SUCCESS,
                       message=None,
                       telemetry_settings=None):
    """Record CSE user action information in telemetry server.

    No exceptions should be leaked. Catch all exceptions and log them.

    :param CseOperation cse_operation:
    :param OperationStatus status: SUCCESS/FAILURE of the user action
    :param str message: any information about failure or custom message
    :param dict telemetry_settings: telemetry section CSE config->service
    """
    try:
        if not telemetry_settings:
            server_config = get_server_runtime_config()
            telemetry_settings = None if not server_config else \
                server_config.get('service', {}).get('telemetry')

        if telemetry_settings:
            if telemetry_settings.get('enable'):
                payload = get_payload_for_user_action(cse_operation, status,
                                                      message)  # noqa: E501
                _send_data_to_telemetry_server(payload, telemetry_settings)
        else:
            LOGGER.debug('No telemetry settings found.')
    except Exception as err:
        LOGGER.warning(f"Error in recording user action information:{str(err)}"
                       )  # noqa: E501
예제 #2
0
def template_list(request_data, tenant_auth_token):
    """Request handler for template list operation.

    :return: List of dictionaries with template info.
    """
    config = utils.get_server_runtime_config()
    templates = []
    for t in config['broker']['templates']:
        templates.append({
            'name':
            t[LocalTemplateKey.NAME],
            'revision':
            t[LocalTemplateKey.REVISION],
            'is_default':
            t[LocalTemplateKey.NAME]
            == config['broker']['default_template_name']
            and str(t[LocalTemplateKey.REVISION]) == str(
                config['broker']['default_template_revision']),  # noqa: E501
            'catalog':
            config['broker']['catalog'],
            'catalog_item':
            t[LocalTemplateKey.CATALOG_ITEM_NAME],
            'description':
            t[LocalTemplateKey.DESCRIPTION].replace("\\n", ", ")
        })
    return templates
def template_list(request_data, tenant_auth_token, is_jwt_token):
    """Request handler for template list operation.

    :return: List of dictionaries with template info.
    """
    config = utils.get_server_runtime_config()
    templates = []
    for t in config['broker']['templates']:
        template_name = t[LocalTemplateKey.NAME]
        template_revision = str(t[LocalTemplateKey.REVISION])
        default_template_name = config['broker']['default_template_name']
        default_template_revision = str(
            config['broker']['default_template_revision'])  # noqa: E501
        is_default = (template_name, template_revision) == (
            default_template_name, default_template_revision)  # noqa: E501

        templates.append({
            'name':
            template_name,
            'revision':
            template_revision,
            'is_default':
            'Yes' if is_default else 'No',
            'catalog':
            config['broker']['catalog'],
            'catalog_item':
            t[LocalTemplateKey.CATALOG_ITEM_NAME],
            'description':
            t[LocalTemplateKey.DESCRIPTION].replace("\\n", ", ")
        })

    return sorted(templates,
                  key=lambda i: (i['name'], i['revision']),
                  reverse=True)  # noqa: E501
def get_ovdc(operation_context: ctx.OperationContext, ovdc_id: str) -> dict:
    """Get ovdc info for a particular ovdc.

    :param ctx.OperationContext operation_context: context for the request
    :param str ovdc_id: ID of the ovdc
    :return: dictionary containing the ovdc information
    :rtype: dict
    """
    # NOTE: For CSE 3.0, if `enable_tkg_plus` flag in config is set to false,
    # Prevent showing information about TKG+ by skipping TKG+ from the result.
    cse_params = {
        RequestKey.OVDC_ID: ovdc_id
    }
    telemetry_handler.record_user_action_details(cse_operation=CseOperation.OVDC_INFO, # noqa: E501
                                                 cse_params=cse_params)
    config = utils.get_server_runtime_config()
    log_wire = utils.str_to_bool(config.get('service', {}).get('log_wire'))
    result = asdict(get_ovdc_k8s_runtime_details(operation_context.sysadmin_client, # noqa: E501
                                                 ovdc_id=ovdc_id,
                                                 log_wire=log_wire))
    # TODO: Find a better way to avoid sending remove_cp_from_vms_on_disable
    # flag
    if ClusterEntityKind.TKG_PLUS.value in result['k8s_runtime'] \
            and not utils.is_tkg_plus_enabled():
        result['k8s_runtime'].remove(ClusterEntityKind.TKG_PLUS.value)
    del result['remove_cp_from_vms_on_disable']
    return result
예제 #5
0
def get_sys_admin_client():
    server_config = get_server_runtime_config()
    if not server_config['vcd']['verify']:
        SERVER_LOGGER.warning("InsecureRequestWarning: Unverified HTTPS "
                              "request is being made. Adding certificate "
                              "verification is strongly advised.")
        requests.packages.urllib3.disable_warnings()
    log_filename = None
    log_wire = str_to_bool(server_config['service'].get('log_wire'))
    if log_wire:
        log_filename = SERVER_DEBUG_WIRELOG_FILEPATH
    client = vcd_client.Client(
        uri=server_config['vcd']['host'],
        api_version=server_config['vcd']['api_version'],
        verify_ssl_certs=server_config['vcd']['verify'],
        log_file=log_filename,
        log_requests=log_wire,
        log_headers=log_wire,
        log_bodies=log_wire)
    credentials = vcd_client.BasicLoginCredentials(
        server_config['vcd']['username'],
        SYSTEM_ORG_NAME,
        server_config['vcd']['password'])
    client.set_credentials(credentials)
    return client
def list_ovdc(operation_context: ctx.OperationContext) -> List[dict]:
    """List all ovdc and their k8s runtimes.

    :param ctx.OperationContext operation_context: context for the request
    :return: list of dictionary containing details about the ovdc
    :rtype: List[dict]
    """
    # NOTE: For CSE 3.0, if `enable_tkg_plus` flag in config is set to false,
    # Prevent showing information about TKG+ by skipping TKG+ from the result.
    # Record telemetry
    telemetry_handler.record_user_action_details(cse_operation=CseOperation.OVDC_LIST, # noqa: E501
                                                 cse_params={})

    ovdcs = []
    org_vdcs = vcd_utils.get_all_ovdcs(operation_context.client)
    for ovdc in org_vdcs:
        ovdc_name = ovdc.get('name')
        config = utils.get_server_runtime_config()
        log_wire = utils.str_to_bool(config.get('service', {}).get('log_wire'))
        ovdc_id = vcd_utils.extract_id(ovdc.get('id'))
        ovdc_details = asdict(
            get_ovdc_k8s_runtime_details(operation_context.sysadmin_client,
                                         ovdc_id=ovdc_id,
                                         ovdc_name=ovdc_name,
                                         log_wire=log_wire))
        if ClusterEntityKind.TKG_PLUS.value in ovdc_details['k8s_runtime'] \
                and not utils.is_tkg_plus_enabled():  # noqa: E501
            ovdc_details['k8s_runtime'].remove(ClusterEntityKind.TKG_PLUS.value)  # noqa: E501
        # TODO: Find a better way to remove remove_cp_from_vms_on_disable
        del ovdc_details['remove_cp_from_vms_on_disable']
        ovdcs.append(ovdc_details)
    return ovdcs
 def delete_nodes_thread(self):
     LOGGER.debug(f"About to delete nodes from cluster with name: "
                  f"{self.cluster_name}")
     try:
         vapp = VApp(self.tenant_client, href=self.cluster['vapp_href'])
         template = self._get_template()
         self._update_task(
             TaskStatus.RUNNING,
             message=f"Deleting "
                     f"{len(self.req_spec.get(RequestKey.NODE_NAMES_LIST))}"
                     f" node(s) from "
                     f"{self.cluster_name}({self.cluster_id})")
         try:
             server_config = get_server_runtime_config()
             delete_nodes_from_cluster(
                 server_config,
                 vapp,
                 template,
                 self.req_spec.get(RequestKey.NODE_NAMES_LIST),
                 self.req_spec.get(RequestKey.FORCE_DELETE))
         except Exception:
             LOGGER.error(f"Couldn't delete node "
                          f"{self.req_spec.get(RequestKey.NODE_NAMES_LIST)}"
                          f" from cluster:{self.cluster_name}")
         self._update_task(
             TaskStatus.RUNNING,
             message=f"Undeploying "
                     f"{len(self.req_spec.get(RequestKey.NODE_NAMES_LIST))}"
                     f" node(s) for {self.cluster_name}({self.cluster_id})")
         for vm_name in self.req_spec.get(RequestKey.NODE_NAMES_LIST):
             vm = VM(self.tenant_client, resource=vapp.get_vm(vm_name))
             try:
                 task = vm.undeploy()
                 self.tenant_client.get_task_monitor().wait_for_status(task)
             except Exception:
                 LOGGER.warning(f"Couldn't undeploy VM {vm_name}")
         self._update_task(
             TaskStatus.RUNNING,
             message=f"Deleting "
                     f"{len(self.req_spec.get(RequestKey.NODE_NAMES_LIST))}"
                     f" VM(s) for {self.cluster_name}({self.cluster_id})")
         task = vapp.delete_vms(self.req_spec.get(RequestKey.NODE_NAMES_LIST)) # noqa: E501
         self.tenant_client.get_task_monitor().wait_for_status(task)
         self._update_task(
             TaskStatus.SUCCESS,
             message=f"Deleted "
                     f"{len(self.req_spec.get(RequestKey.NODE_NAMES_LIST))}"
                     f" node(s) to cluster "
                     f"{self.cluster_name}({self.cluster_id})")
     except Exception as e:
         LOGGER.error(traceback.format_exc())
         error_obj = error_to_json(e)
         stack_trace = \
             ''.join(error_obj[ERROR_MESSAGE_KEY][ERROR_STACKTRACE_KEY])
         self._update_task(
             TaskStatus.ERROR,
             error_message=error_obj[ERROR_MESSAGE_KEY][ERROR_DESCRIPTION_KEY],  # noqa: E501
             stack_trace=stack_trace)
     finally:
         self._disconnect_sys_admin()
    def _get_nfs_exports(self, ip, vapp, node):
        """Get the exports from remote NFS server (helper method).

        :param ip: (str): IP address of the NFS server
        :param vapp: (pyvcloud.vcd.vapp.VApp): The vApp or cluster
         to which node belongs
        :param node: (str): IP address of the NFS server
        :param node: (`lxml.objectify.StringElement`) object
        representing the vm resource.

        :return: (List): List of exports
        """
        # TODO(right template) find a right way to retrieve
        # the template from which nfs node was created.
        server_config = get_server_runtime_config()
        template = server_config['broker']['templates'][0]
        script = f"#!/usr/bin/env bash\nshowmount -e {ip}"
        result = execute_script_in_nodes(
            server_config, vapp, template['admin_password'],
            script, nodes=[node], check_tools=False)
        lines = result[0][1].content.decode().split('\n')
        exports = []
        for index in range(1, len(lines) - 1):
            export = lines[index].strip().split()[0]
            exports.append(export)
        return exports
    def node_rollback(self, node_list):
        """Rollback for node creation failure.

        :param list node_list: faulty nodes to be deleted
        """
        LOGGER.info(f"About to rollback nodes from cluster with name: "
                    "{self.cluster_name}")
        LOGGER.info(f"Node list to be deleted:{node_list}")
        vapp = VApp(self.tenant_client, href=self.cluster['vapp_href'])
        template = self._get_template()
        try:
            server_config = get_server_runtime_config()
            delete_nodes_from_cluster(server_config, vapp, template,
                                      node_list, force=True)
        except Exception:
            LOGGER.warning("Couldn't delete node {node_list} from cluster:"
                           "{self.cluster_name}")
        for vm_name in node_list:
            vm = VM(self.tenant_client, resource=vapp.get_vm(vm_name))
            try:
                vm.undeploy()
            except Exception:
                LOGGER.warning(f"Couldn't undeploy VM {vm_name}")
        vapp.delete_vms(node_list)
        LOGGER.info(f"Successfully deleted nodes: {node_list}")
예제 #10
0
        def decorator_wrapper(*args, **kwargs):
            server_config = utils.get_server_runtime_config()

            if (server_config['service']['enforce_authorization']
                    and required_rights is not None
                    and len(required_rights) > 0):
                class_instance: abstract_broker.AbstractBroker = args[0]
                user_rights = class_instance.context.user.rights

                missing_rights = []
                for right_name in required_rights:
                    namespaced_name = f'{{{CSE_SERVICE_NAMESPACE}}}' \
                                      f':{right_name}'
                    if namespaced_name not in user_rights:
                        missing_rights.append(namespaced_name)

                if len(missing_rights) > 0:
                    LOGGER.debug(f"Authorization failed for user "
                                 f"'{class_instance.context.user.name}'. "
                                 f"Missing required rights: "
                                 f"{missing_rights}")
                    raise Exception(f'Access forbidden. Missing required '
                                    f'rights: {missing_rights}')

            return func(*args, **kwargs)
def ovdc_compute_policy_list(request_data, op_ctx: ctx.OperationContext):
    """Request handler for ovdc compute-policy list operation.

    Required data: ovdc_id

    :return: Dictionary with task href.
    """
    required = [RequestKey.OVDC_ID]
    req_utils.validate_payload(request_data, required)

    config = utils.get_server_runtime_config()
    cpm = compute_policy_manager.ComputePolicyManager(
        op_ctx.sysadmin_client,
        log_wire=utils.str_to_bool(config['service'].get('log_wire')))
    compute_policies = []
    for cp in \
            compute_policy_manager.list_cse_sizing_policies_on_vdc(
                cpm,
                request_data[RequestKey.OVDC_ID]):
        policy = {
            'name': cp['display_name'],
            'id': cp['id'],
            'href': cp['href']
        }
        compute_policies.append(policy)
    return compute_policies
 def _get_template(self, name=None):
     server_config = get_server_runtime_config()
     name = name or \
         self.req_spec.get(RequestKey.TEMPLATE_NAME) or \
         server_config['broker']['default_template']
     for template in server_config['broker']['templates']:
         if template['name'] == name:
             return template
     raise Exception(f"Template {name} not found.")
예제 #13
0
 def create_nodes_thread(self):
     LOGGER.debug(f"About to add nodes to cluster with name: "
                  f"{self.cluster_name}")
     try:
         server_config = get_server_runtime_config()
         org_resource = self.tenant_client.get_org()
         org = Org(self.tenant_client, resource=org_resource)
         vdc = VDC(self.tenant_client, href=self.cluster['vdc_href'])
         vapp = VApp(self.tenant_client, href=self.cluster['vapp_href'])
         template = self.get_template()
         self.update_task(
             TaskStatus.RUNNING,
             message=f"Creating {self.req_spec['node_count']} node(s) for "
             f"{self.cluster_name}({self.cluster_id})")
         new_nodes = add_nodes(self.req_spec['node_count'], template,
                               self.req_spec['node_type'], server_config,
                               self.tenant_client, org, vdc, vapp,
                               self.req_spec)
         if self.req_spec['node_type'] == TYPE_NFS:
             self.update_task(
                 TaskStatus.SUCCESS,
                 message=f"Created {self.req_spec['node_count']} node(s) "
                 f"for {self.cluster_name}({self.cluster_id})")
         elif self.req_spec['node_type'] == TYPE_NODE:
             self.update_task(
                 TaskStatus.RUNNING,
                 message=f"Adding {self.req_spec['node_count']} node(s) to "
                 f"cluster {self.cluster_name}({self.cluster_id})")
             target_nodes = []
             for spec in new_nodes['specs']:
                 target_nodes.append(spec['target_vm_name'])
             vapp.reload()
             join_cluster(server_config, vapp, template, target_nodes)
             self.update_task(
                 TaskStatus.SUCCESS,
                 message=f"Added {self.req_spec['node_count']} node(s) to "
                 f"cluster {self.cluster_name}({self.cluster_id})")
     except NodeCreationError as e:
         error_obj = error_to_json(e)
         LOGGER.error(traceback.format_exc())
         stack_trace = ''.join(error_obj[ERROR_MESSAGE][ERROR_STACKTRACE])
         self.update_task(
             TaskStatus.ERROR,
             error_message=error_obj[ERROR_MESSAGE][ERROR_DESCRIPTION],
             stack_trace=stack_trace)
         raise
     except Exception as e:
         error_obj = error_to_json(e)
         LOGGER.error(traceback.format_exc())
         stack_trace = ''.join(error_obj[ERROR_MESSAGE][ERROR_STACKTRACE])
         self.update_task(
             TaskStatus.ERROR,
             error_message=error_obj[ERROR_MESSAGE][ERROR_DESCRIPTION],
             stack_trace=stack_trace)
     finally:
         self._disconnect_sys_admin()
예제 #14
0
    def __init__(self, pks_ctx, op_ctx: ctx.OperationContext):
        """Initialize PKS broker.

        :param dict pks_ctx: A dictionary with which should atleast have the
            following keys in it ['username', 'secret', 'host', 'port',
            'uaac_port'], 'proxy' and 'pks_compute_profile_name' are optional
            keys. Currently all callers of this method is using ovdc cache
            (subject to change) to initialize PKS broker.
        """
        self.context: ctx.OperationContext = None
        # populates above attributes
        super().__init__(op_ctx)

        if not pks_ctx:
            raise ValueError(
                "PKS context is required to establish connection to PKS")

        self.username = pks_ctx['username']
        self.secret = pks_ctx['secret']
        self.pks_host_uri = f"https://{pks_ctx['host']}:{pks_ctx['port']}"
        self.uaac_uri = f"https://{pks_ctx['host']}:{pks_ctx['uaac_port']}"
        self.proxy_uri = None
        if pks_ctx.get('proxy'):
            self.proxy_uri = f"http://{pks_ctx['proxy']}:80"
        self.compute_profile = pks_ctx.get(PKS_COMPUTE_PROFILE_KEY, None)
        self.nsxt_server = \
            utils.get_pks_cache().get_nsxt_info(pks_ctx.get('vc'))
        self.nsxt_client = None
        self.pks_wire_logger = NULL_LOGGER
        nsxt_wire_logger = NULL_LOGGER
        config = utils.get_server_runtime_config()
        if utils.str_to_bool(config['service'].get('log_wire')):
            nsxt_wire_logger = SERVER_NSXT_WIRE_LOGGER
            self.pks_wire_logger = SERVER_PKS_WIRE_LOGGER
        if self.nsxt_server:
            self.nsxt_client = NSXTClient(
                host=self.nsxt_server.get('host'),
                username=self.nsxt_server.get('username'),
                password=self.nsxt_server.get('password'),
                logger_debug=SERVER_LOGGER,
                logger_wire=nsxt_wire_logger,
                http_proxy=self.nsxt_server.get('proxy'),
                https_proxy=self.nsxt_server.get('proxy'),
                verify_ssl=self.nsxt_server.get('verify'))
        # TODO() Add support in pyvcloud to send metadata values with their
        # types intact.
        verify_ssl = pks_ctx.get('verify')
        self.verify = True
        if isinstance(verify_ssl, bool):
            self.verify = verify_ssl
        elif isinstance(verify_ssl, str):
            self.verify = utils.str_to_bool(verify_ssl)

        self.pks_client = self._get_pks_client(self._get_token())
예제 #15
0
 def get_cluster_config(self, cluster_name):
     self._connect_tenant()
     clusters = load_from_metadata(self.tenant_client, name=cluster_name)
     if len(clusters) != 1:
         raise CseServerError(f"Cluster '{cluster_name}' not found")
     vapp = VApp(self.tenant_client, href=clusters[0]['vapp_href'])
     template = self.get_template(name=clusters[0]['template'])
     server_config = get_server_runtime_config()
     result = get_cluster_config(server_config, vapp,
                                 template['admin_password'])
     return result
 def get_template(self, name=None):
     server_config = get_server_runtime_config()
     if name is None:
         if 'template' in self.body and self.body['template'] is not None:
             name = self.body['template']
         else:
             name = server_config['broker']['default_template']
     for template in server_config['broker']['templates']:
         if template['name'] == name:
             return template
     raise Exception('Template %s not found' % name)
 def cloudapi_client(self):
     if self._cloudapi_client is None:
         log_wire = utils.get_server_runtime_config() \
                         .get('service', {}).get('log_wire', False)
         logger_wire = logger.NULL_LOGGER
         if log_wire:
             logger_wire = logger.SERVER_CLOUDAPI_WIRE_LOGGER
         self._cloudapi_client = \
             vcd_utils.get_cloudapi_client_from_vcd_client(self.client,
                                                           logger.SERVER_LOGGER, # noqa: E501
                                                           logger_wire)
     return self._cloudapi_client
예제 #18
0
 def __init__(self, request_headers, request_query_params, request_spec):
     self.req_headers = request_headers
     self.req_qparams = request_query_params
     self.req_spec = request_spec
     self.pks_cache = get_pks_cache()
     self.ovdc_cache = OvdcCache(get_vcd_sys_admin_client())
     self.is_ovdc_present_in_request = False
     config = get_server_runtime_config()
     self.vcd_client, self.session = connect_vcd_user_via_token(
         vcd_uri=config['vcd']['host'],
         headers=self.req_headers,
         verify_ssl_certs=config['vcd']['verify'])
예제 #19
0
 def get_template(self, name=None):
     server_config = get_server_runtime_config()
     if name is None:
         if 'template' in self.req_spec and \
                 self.req_spec['template'] is not None:
             name = self.req_spec['template']
         else:
             name = server_config['broker']['default_template']
     for template in server_config['broker']['templates']:
         if template['name'] == name:
             return template
     raise Exception(f"Template {name} not found.")
 def get_cluster_config(self, cluster_name):
     result = {}
     self._connect_tenant()
     clusters = load_from_metadata(self.tenant_client, name=cluster_name)
     if len(clusters) != 1:
         raise CseServerError('Cluster \'%s\' not found' % cluster_name)
     vapp = VApp(self.tenant_client, href=clusters[0]['vapp_href'])
     template = self.get_template(name=clusters[0]['template'])
     server_config = get_server_runtime_config()
     result['body'] = get_cluster_config(server_config, vapp,
                                         template['admin_password'])
     result['status_code'] = OK
     return result
예제 #21
0
def template_list(request_data, op_ctx):
    """Request handler for template list operation.

    :return: List of dictionaries with template info.
    """
    config = utils.get_server_runtime_config()
    templates = []
    default_template_name = config['broker']['default_template_name']
    default_template_revision = str(
        config['broker']['default_template_revision'])  # noqa: E501

    for t in config['broker']['templates']:
        template_name = t[LocalTemplateKey.NAME]
        template_revision = str(t[LocalTemplateKey.REVISION])
        is_default = (template_name, template_revision) == (
            default_template_name, default_template_revision)  # noqa: E501

        templates.append({
            'catalog':
            config['broker']['catalog'],
            'catalog_item':
            t[LocalTemplateKey.CATALOG_ITEM_NAME],
            'cni':
            t[LocalTemplateKey.CNI],
            'cni_version':
            t[LocalTemplateKey.CNI_VERSION],
            'deprecated':
            t[LocalTemplateKey.DEPRECATED],
            'description':
            t[LocalTemplateKey.DESCRIPTION].replace("\\n", ", "),  # noqa: E501
            'docker_version':
            t[LocalTemplateKey.DOCKER_VERSION],
            'is_default':
            'Yes' if is_default else 'No',
            'kind':
            t[LocalTemplateKey.KIND],
            'kubernetes':
            t[LocalTemplateKey.KUBERNETES],
            'kubernetes_version':
            t[LocalTemplateKey.KUBERNETES_VERSION],
            'name':
            template_name,
            'os':
            t[LocalTemplateKey.OS],
            'revision':
            template_revision
        })

    return sorted(templates,
                  key=lambda i: (i['name'], i['revision']),
                  reverse=True)  # noqa: E501
def get_template(name=None, revision=None):
    if (name is None
            and revision is not None) or (name is not None
                                          and revision is None):  # noqa: E501
        raise ValueError(f"If template revision is specified, then template "
                         f"name must also be specified (and vice versa).")
    server_config = utils.get_server_runtime_config()
    name = name or server_config['broker']['default_template_name']
    revision = revision or server_config['broker']['default_template_revision']
    for template in server_config['broker']['templates']:
        if template[LocalTemplateKey.NAME] == name and str(template[
                LocalTemplateKey.REVISION]) == str(revision):  # noqa: E501
            return template
    raise Exception(f"Template '{name}' at revision {revision} not found.")
def connect_vcd_user_via_token(tenant_auth_token):
    server_config = get_server_runtime_config()
    vcd_uri = server_config['vcd']['host']
    version = server_config['vcd']['api_version']
    verify_ssl_certs = server_config['vcd']['verify']
    client_tenant = Client(uri=vcd_uri,
                           api_version=version,
                           verify_ssl_certs=verify_ssl_certs,
                           log_file=SERVER_DEBUG_WIRELOG_FILEPATH,
                           log_requests=True,
                           log_headers=True,
                           log_bodies=True)
    session = client_tenant.rehydrate_from_token(tenant_auth_token)
    return (client_tenant, session)
예제 #24
0
 def delete_nodes_thread(self):
     LOGGER.debug(f"About to delete nodes from cluster with name: "
                  f"{self.cluster_name}")
     try:
         vapp = VApp(self.tenant_client, href=self.cluster['vapp_href'])
         template = self.get_template()
         self.update_task(
             TaskStatus.RUNNING,
             message=f"Deleting {len(self.req_spec['nodes'])} node(s) from "
             f"{self.cluster_name}({self.cluster_id})")
         try:
             server_config = get_server_runtime_config()
             delete_nodes_from_cluster(server_config, vapp, template,
                                       self.req_spec['nodes'],
                                       self.req_spec['force'])
         except Exception:
             LOGGER.error(f"Couldn't delete node {self.req_spec['nodes']} "
                          f"from cluster:{self.cluster_name}")
         self.update_task(
             TaskStatus.RUNNING,
             message=f"Undeploying {len(self.req_spec['nodes'])} node(s) "
             f"for {self.cluster_name}({self.cluster_id})")
         for vm_name in self.req_spec['nodes']:
             vm = VM(self.tenant_client, resource=vapp.get_vm(vm_name))
             try:
                 task = vm.undeploy()
                 self.tenant_client.get_task_monitor().wait_for_status(task)
             except Exception:
                 LOGGER.warning(f"Couldn't undeploy VM {vm_name}")
         self.update_task(
             TaskStatus.RUNNING,
             message=f"Deleting {len(self.req_spec['nodes'])} VM(s) for "
             f"{self.cluster_name}({self.cluster_id})")
         task = vapp.delete_vms(self.req_spec['nodes'])
         self.tenant_client.get_task_monitor().wait_for_status(task)
         self.update_task(
             TaskStatus.SUCCESS,
             message=f"Deleted {len(self.req_spec['nodes'])} node(s) to "
             f"cluster {self.cluster_name}({self.cluster_id})")
     except Exception as e:
         LOGGER.error(traceback.format_exc())
         error_obj = error_to_json(e)
         stack_trace = ''.join(error_obj[ERROR_MESSAGE][ERROR_STACKTRACE])
         self.update_task(
             TaskStatus.ERROR,
             error_message=error_obj[ERROR_MESSAGE][ERROR_DESCRIPTION],
             stack_trace=stack_trace)
     finally:
         self._disconnect_sys_admin()
예제 #25
0
 def _connect_tenant(self):
     server_config = get_server_runtime_config()
     host = server_config['vcd']['host']
     verify = server_config['vcd']['verify']
     self.tenant_client, self.client_session = connect_vcd_user_via_token(
         vcd_uri=host,
         headers=self.req_headers,
         verify_ssl_certs=verify)
     self.tenant_info = {
         'user_name': self.client_session.get('user'),
         'user_id': self.client_session.get('userId'),
         'org_name': self.client_session.get('org'),
         'org_href': self.tenant_client._get_wk_endpoint(
             _WellKnownEndpoint.LOGGED_IN_ORG)
     }
예제 #26
0
def ovdc_compute_policy_list(request_data,
                             request_context: ctx.RequestContext):
    """Request handler for ovdc compute-policy list operation.

    Required data: ovdc_id

    :return: Dictionary with task href.
    """
    required = [RequestKey.OVDC_ID]
    req_utils.validate_payload(request_data, required)

    config = utils.get_server_runtime_config()
    cpm = compute_policy_manager.ComputePolicyManager(
        request_context.sysadmin_client,
        log_wire=utils.str_to_bool(config['service'].get('log_wire')))
    return cpm.list_compute_policies_on_vdc(request_data[RequestKey.OVDC_ID])
def connect_vcd_user_via_token(tenant_auth_token):
    server_config = get_server_runtime_config()
    vcd_uri = server_config['vcd']['host']
    version = server_config['vcd']['api_version']
    verify_ssl_certs = server_config['vcd']['verify']
    log_filename = None
    log_wire = str_to_bool(server_config['service'].get('log_wire'))
    if log_wire:
        log_filename = SERVER_DEBUG_WIRELOG_FILEPATH
    client_tenant = Client(uri=vcd_uri,
                           api_version=version,
                           verify_ssl_certs=verify_ssl_certs,
                           log_file=log_filename,
                           log_requests=log_wire,
                           log_headers=log_wire,
                           log_bodies=log_wire)
    session = client_tenant.rehydrate_from_token(tenant_auth_token)
    return (client_tenant, session)
    def get_cluster_config(self, cluster_name):
        self._connect_tenant()
        clusters = load_from_metadata(
            self.tenant_client,
            name=cluster_name,
            org_name=self.req_spec.get(RequestKey.ORG_NAME),
            vdc_name=self.req_spec.get(RequestKey.OVDC_NAME))
        if len(clusters) > 1:
            raise CseDuplicateClusterError(f"Multiple clusters of name"
                                           f" '{cluster_name}' detected.")
        if len(clusters) == 0:
            raise ClusterNotFoundError(f"Cluster '{cluster_name}' not found.")

        vapp = VApp(self.tenant_client, href=clusters[0]['vapp_href'])
        template = self._get_template(name=clusters[0]['template'])
        server_config = get_server_runtime_config()
        result = get_cluster_config(server_config, vapp,
                                    template['admin_password'])
        return result
 def decorator_wrapper(*args, **kwargs):
     sys_admin_client = None
     try:
         is_authorized = True
         server_config = get_server_runtime_config()
         if server_config['service']['enforce_authorization']:
             sys_admin_client = get_vcd_sys_admin_client()
             broker_instance = args[0]  # self
             user_session = broker_instance.get_tenant_client_session()
             is_authorized = _is_authorized(sys_admin_client,
                                            user_session,
                                            required_rights)
         if is_authorized:
             return func(*args, **kwargs)
         else:
             raise Exception(
                 'Access Forbidden. Missing required rights.')
     finally:
         if sys_admin_client is not None:
             sys_admin_client.logout()
예제 #30
0
def record_user_action_details(cse_operation,
                               cse_params,
                               telemetry_settings=None):
    """Record CSE user operation details in telemetry server.

    No exception should be leaked. Catch all exceptions and log them.

    :param CseOperation cse_operation: CSE operation information
    :param dict cse_params: CSE operation parameters
    :param dict telemetry_settings: telemetry section of config->service
    """
    try:
        if not telemetry_settings:
            telemetry_settings = get_server_runtime_config()['service'][
                'telemetry']  # noqa: E501

        if telemetry_settings['enable']:
            payload = OPERATION_TO_PAYLOAD_GENERATOR[cse_operation](cse_params)
            _send_data_to_telemetry_server(payload, telemetry_settings)
    except Exception as err:
        LOGGER.warning(f"Error in recording CSE operation details :{str(err)}"
                       )  # noqa: E501