Ejemplo n.º 1
0
 def setup_postgresql(self, replicas, node_selector=None):
     # Node selector for postgresql
     pgsql_sset = templating.load_yaml(constants.PGSQL_STATEFULSET_YAML)
     if node_selector is not None:
         pgsql_sset['spec']['template']['spec'][
             'nodeSelector'] = node_selector
     if helpers.storagecluster_independent_check():
         pgsql_sset['spec']['volumeClaimTemplates'][0][
             'metadata']['annotations'][
             'volume.beta.kubernetes.io/storage-class'] = \
             constants.DEFAULT_EXTERNAL_MODE_STORAGECLASS_RBD
     Postgresql.setup_postgresql(self, replicas=replicas)
Ejemplo n.º 2
0
 def __init__(self, *args, **kwargs):
     super().__init__(*args, **kwargs)
     obc_data = templating.load_yaml(constants.MCG_OBC_YAML)
     if self.name is None:
         self.name = create_unique_resource_name('oc', 'obc')
     obc_data['metadata']['name'] = self.name
     obc_data['spec']['bucketName'] = self.name
     if storagecluster_independent_check():
         obc_data['spec'][
             'storageClassName'] = constants.DEFAULT_EXTERNAL_MODE_STORAGECLASS_RGW
     else:
         obc_data['spec'][
             'storageClassName'] = constants.DEFAULT_STORAGECLASS_RGW
     obc_data['metadata']['namespace'] = self.namespace
     create_resource(**obc_data)
Ejemplo n.º 3
0
def create_instance_in_clusterlogging():
    """
    Creation of instance for clusterlogging that creates PVCs,
    ElasticSearch, curator fluentd and kibana pods and checks for all
    the pods and PVCs

    Args:
        sc_name (str): Storage class name to create PVCs

    Returns:
        dict: Contains all detailed information of the
            instance such as pods that got created, its resources and limits
            values, storage class and size details etc.

    """

    nodes_in_cluster = len(get_all_nodes())
    inst_data = templating.load_yaml(constants.CL_INSTANCE_YAML)
    es_node_count = inst_data['spec']['logStore']['elasticsearch']['nodeCount']
    if helpers.storagecluster_independent_check():
        inst_data['spec']['logStore']['elasticsearch']['storage'][
            'storageClassName'] = constants.DEFAULT_EXTERNAL_MODE_STORAGECLASS_RBD
    helpers.create_resource(wait=False, **inst_data)
    oc = ocp.OCP('v1', 'ClusterLogging', 'openshift-logging')
    logging_instance = oc.get(resource_name='instance', out_yaml_format='True')
    if logging_instance:
        logger.info("Successfully created instance for cluster-logging")
        logger.debug(logging_instance)
    else:
        logger.error("Instance for clusterlogging is not created properly")

    pod_obj = ocp.OCP(kind=constants.POD, namespace='openshift-logging')
    pod_status = pod_obj.wait_for_resource(condition=constants.STATUS_RUNNING,
                                           resource_count=2 + es_node_count +
                                           nodes_in_cluster,
                                           timeout=500,
                                           sleep=2)
    assert pod_status, "Pods are not in Running state."
    logger.info("All pods are in Running state")
    pvc_obj = ocp.OCP(kind=constants.PVC, namespace='openshift-logging')
    pvc_status = pvc_obj.wait_for_resource(condition=constants.STATUS_BOUND,
                                           resource_count=es_node_count,
                                           timeout=150,
                                           sleep=5)
    assert pvc_status, "PVCs are not in bound state."
    logger.info("PVCs are Bound")
    return logging_instance
Ejemplo n.º 4
0
    def __init__(self, namespace=None):
        self.namespace = namespace if namespace else config.ENV_DATA['cluster_namespace']

        if storagecluster_independent_check():
            sc_name = constants.DEFAULT_EXTERNAL_MODE_STORAGECLASS_RGW
        else:
            sc_name = constants.DEFAULT_STORAGECLASS_RGW

        self.storageclass = OCP(
            kind='storageclass', namespace=namespace,
            resource_name=sc_name
        )
        self.s3_internal_endpoint = self.storageclass.get().get('parameters').get('endpoint')
        self.region = self.storageclass.get().get('parameters').get('region')
        # Todo: Implement retrieval in cases where CephObjectStoreUser is available
        self.key_id = None
        self.secret_key = None
        self.s3_resource = None
Ejemplo n.º 5
0
def test_monitoring_enabled():
    """
    OCS Monitoring is enabled after OCS installation (which is why this test
    has a post deployment marker) by asking for values of one ceph and one
    noobaa related metrics.
    """
    prometheus = PrometheusAPI()

    if (storagecluster_independent_check()
            and float(config.ENV_DATA['ocs_version']) < 4.6):
        logger.info(
            f"Skipping ceph metrics because it is not enabled for external "
            f"mode for OCS {float(config.ENV_DATA['ocs_version'])}")

    else:
        # ask for values of ceph_pool_stored metric
        logger.info("Checking that ceph data are provided in OCS monitoring")
        result = prometheus.query('ceph_pool_stored')
        msg = "check that we actually received some values for a ceph query"
        assert len(result) > 0, msg
        for metric in result:
            _, value = metric['value']
            assert_msg = "number of bytes in a pool isn't a positive integer or zero"
            assert int(value) >= 0, assert_msg
        # additional check that values makes at least some sense
        logger.info(
            "Checking that size of ceph_pool_stored result matches number of pools"
        )
        ct_pod = pod.get_ceph_tools_pod()
        ceph_pools = ct_pod.exec_ceph_cmd("ceph osd pool ls")
        assert len(result) == len(ceph_pools)

    # again for a noobaa metric
    logger.info("Checking that MCG/NooBaa data are provided in OCS monitoring")
    result = prometheus.query('NooBaa_bucket_status')
    msg = "check that we actually received some values for a MCG/NooBaa query"
    assert len(result) > 0, msg
    for metric in result:
        _, value = metric['value']
        assert int(
            value) >= 0, "bucket status isn't a positive integer or zero"
Ejemplo n.º 6
0
    def __init__(self, *args, **kwargs):
        """
        Constructor for the MCG class
        """
        self.namespace = config.ENV_DATA['cluster_namespace']
        self.operator_pod = Pod(**get_pods_having_label(
            constants.NOOBAA_OPERATOR_POD_LABEL, self.namespace)[0])
        self.core_pod = Pod(**get_pods_having_label(
            constants.NOOBAA_CORE_POD_LABEL, self.namespace)[0])

        self.retrieve_noobaa_cli_binary()
        """
        The certificate will be copied on each mcg_obj instantiation since
        the process is so light and quick, that the time required for the redundant
        copy is neglible in comparison to the time a hash comparison will take.
        """
        retrieve_default_ingress_crt()

        get_noobaa = OCP(kind='noobaa', namespace=self.namespace).get()

        self.s3_endpoint = (get_noobaa.get('items')[0].get('status').get(
            'services').get('serviceS3').get('externalDNS')[0])
        self.s3_internal_endpoint = (get_noobaa.get('items')[0].get(
            'status').get('services').get('serviceS3').get('internalDNS')[0])
        self.mgmt_endpoint = (get_noobaa.get('items')[0].get('status').get(
            'services').get('serviceMgmt').get('externalDNS')[0]) + '/rpc'
        self.region = config.ENV_DATA['region']

        creds_secret_name = (get_noobaa.get('items')[0].get('status').get(
            'accounts').get('admin').get('secretRef').get('name'))
        secret_ocp_obj = OCP(kind='secret', namespace=self.namespace)
        creds_secret_obj = secret_ocp_obj.get(creds_secret_name)

        self.access_key_id = base64.b64decode(
            creds_secret_obj.get('data').get('AWS_ACCESS_KEY_ID')).decode(
                'utf-8')
        self.access_key = base64.b64decode(
            creds_secret_obj.get('data').get('AWS_SECRET_ACCESS_KEY')).decode(
                'utf-8')

        self.noobaa_user = base64.b64decode(
            creds_secret_obj.get('data').get('email')).decode('utf-8')
        self.noobaa_password = base64.b64decode(
            creds_secret_obj.get('data').get('password')).decode('utf-8')

        self.noobaa_token = self.send_rpc_query(
            'auth_api',
            'create_auth',
            params={
                'role': 'admin',
                'system': 'noobaa',
                'email': self.noobaa_user,
                'password': self.noobaa_password
            }).json().get('reply').get('token')

        self.s3_resource = boto3.resource(
            's3',
            verify=constants.DEFAULT_INGRESS_CRT_LOCAL_PATH,
            endpoint_url=self.s3_endpoint,
            aws_access_key_id=self.access_key_id,
            aws_secret_access_key=self.access_key)

        self.s3_client = self.s3_resource.meta.client

        if (config.ENV_DATA['platform'].lower() == 'aws'
                and kwargs.get('create_aws_creds')):
            (self.cred_req_obj, self.aws_access_key_id,
             self.aws_access_key) = self.request_aws_credentials()

            self.aws_s3_resource = boto3.resource(
                's3',
                endpoint_url="https://s3.amazonaws.com",
                aws_access_key_id=self.aws_access_key_id,
                aws_secret_access_key=self.aws_access_key)

        if (config.ENV_DATA['platform'].lower() in constants.CLOUD_PLATFORMS
                or storagecluster_independent_check()):
            logger.info(
                'Checking whether RGW pod is not present on AWS platform')
            pods = pod.get_pods_having_label(label=constants.RGW_APP_LABEL,
                                             namespace=self.namespace)
            assert not pods, 'RGW pods should not exist in the current platform/cluster'

        elif config.ENV_DATA.get('platform') in constants.ON_PREM_PLATFORMS:
            rgw_count = 2 if float(
                config.ENV_DATA['ocs_version']) >= 4.5 else 1
            logger.info(
                f'Checking for RGW pod/s on {config.ENV_DATA.get("platform")} platform'
            )
            rgw_pod = OCP(kind=constants.POD, namespace=self.namespace)
            assert rgw_pod.wait_for_resource(
                condition=constants.STATUS_RUNNING,
                selector=constants.RGW_APP_LABEL,
                resource_count=rgw_count,
                timeout=60)