Exemple #1
0
    def __init__(self,
                 mcg,
                 name,
                 email,
                 buckets,
                 admin_access=False,
                 s3_access=True,
                 full_bucket_access=True,
                 backingstore_name=constants.DEFAULT_NOOBAA_BACKINGSTORE):
        """
        Initializer function

        Args:
            mcg (obj): Multi cloud gateway object
            name (str): Name of noobaa account
            email (str): Email id to be assigned to noobaa account
            buckets (list): list of bucket names to be given permission
            admin_access (bool): True for admin privilege, otherwise False. Default (False)
            s3_access (bool): True for S3 access, otherwise False. Default (True)
            backingstore_name (str): Backingstore name on which buckets created
                using this account to be placed by default. Default("noobaa-default-backing-store")
            full_bucket_access (bool): True for future bucket access, otherwise False. Default (False)
        """
        self.account_name = name
        self.email_id = email
        response = mcg.send_rpc_query(api="account_api",
                                      method="create_account",
                                      params={
                                          "email": email,
                                          "name": name,
                                          "has_login": admin_access,
                                          "s3_access": s3_access,
                                          "default_pool": backingstore_name,
                                          "allowed_buckets": {
                                              "full_permission":
                                              full_bucket_access,
                                              "permission_list": buckets
                                          }
                                      }).json()
        self.access_key_id = response['reply']['access_keys'][0]['access_key']
        self.access_key = response['reply']['access_keys'][0]['secret_key']
        self.s3_endpoint = mcg.s3_endpoint
        self.token = response['reply']['token']

        self.s3_resource = boto3.resource(
            's3',
            verify=retrieve_verification_mode(),
            endpoint_url=self.s3_endpoint,
            aws_access_key_id=self.access_key_id,
            aws_secret_access_key=self.access_key)

        self.s3_client = boto3.client('s3',
                                      verify=retrieve_verification_mode(),
                                      endpoint_url=self.s3_endpoint,
                                      aws_access_key_id=self.access_key_id,
                                      aws_secret_access_key=self.access_key)
Exemple #2
0
    def send_rpc_query(self, api, method, params=None):
        """
        Templates and sends an RPC query to the MCG mgmt endpoint

        Args:
            api: The name of the API to use
            method: The method to use inside the API
            params: A dictionary containing the command payload

        Returns:
            The server's response

        """
        logger.info(f"Sending MCG RPC query:\n{api} {method} {params}")
        payload = {
            "api": api,
            "method": method,
            "params": params,
            "auth_token": self.noobaa_token,
        }
        return requests.post(
            url=self.mgmt_endpoint,
            data=json.dumps(payload),
            verify=retrieve_verification_mode(),
        )
Exemple #3
0
    def __init__(self, obc_name):
        """
        Initializer function

        Args:
            obc_name (str): Name of the Object Bucket Claim
        """
        self.obc_name = obc_name
        self.namespace = config.ENV_DATA['cluster_namespace']
        obc_resource = OCP(namespace=self.namespace, kind='ObjectBucketClaim', resource_name=self.obc_name).get()
        self.ob_name = obc_resource.get('spec').get('ObjectBucketName')
        self.bucket_name = obc_resource.get('spec').get('bucketName')
        ob_obj = OCP(namespace=self.namespace, kind='ObjectBucket', resource_name=self.ob_name).get()
        self.obc_account = ob_obj.get('spec').get('additionalState').get('account')
        secret_obc_obj = OCP(kind='secret', namespace=self.namespace, resource_name=self.obc_name).get()

        obc_configmap = OCP(namespace=self.namespace, kind='ConfigMap', resource_name=self.obc_name).get()
        obc_configmap_data = obc_configmap.get('data')

        obc_provisioner = obc_resource.get('metadata').get('labels').get('bucket-provisioner')

        self.region = obc_configmap_data.get('BUCKET_REGION')

        self.access_key_id = base64.b64decode(
            secret_obc_obj.get('data').get('AWS_ACCESS_KEY_ID')
        ).decode('utf-8')
        self.access_key = base64.b64decode(
            secret_obc_obj.get('data').get('AWS_SECRET_ACCESS_KEY')
        ).decode('utf-8')

        if 'noobaa' in obc_provisioner:
            get_noobaa = OCP(kind='noobaa', namespace=self.namespace).get()
            self.s3_internal_endpoint = (
                get_noobaa.get('items')[0].get('status').get('services')
                .get('serviceS3').get('internalDNS')[0]
            )
            self.s3_external_endpoint = (
                get_noobaa.get('items')[0].get('status').get('services')
                .get('serviceS3').get('externalDNS')[0]
            )
            self.s3_resource = boto3.resource(
                's3', verify=retrieve_verification_mode(),
                endpoint_url=self.s3_external_endpoint,
                aws_access_key_id=self.access_key_id,
                aws_secret_access_key=self.access_key
            )
            self.s3_client = self.s3_resource.meta.client

        elif 'rook' in obc_provisioner:
            # TODO: implement network forwarding to access the internal address
            self.s3_internal_endpoint = (
                'http://' + obc_configmap_data.get('BUCKET_HOST') + ':'
                + obc_configmap_data.get('BUCKET_PORT')
            )
Exemple #4
0
    def send_rpc_query(self, api, method, params=None):
        """
        Templates and sends an RPC query to the MCG mgmt endpoint

        Args:
            api: The name of the API to use
            method: The method to use inside the API
            params: A dictionary containing the command payload

        Returns:
            The server's response

        """
        payload = {
            'api': api,
            'method': method,
            'params': params,
            'auth_token': self.noobaa_token
        }
        return requests.post(url=self.mgmt_endpoint,
                             data=json.dumps(payload),
                             verify=retrieve_verification_mode())
Exemple #5
0
    def __init__(self, obc_name):
        """
        Initializer function

        Args:
            obc_name (str): Name of the Object Bucket Claim
        """
        self.obc_name = obc_name
        self.namespace = config.ENV_DATA["cluster_namespace"]
        obc_resource = OCP(
            namespace=self.namespace,
            kind="ObjectBucketClaim",
            resource_name=self.obc_name,
        ).get()
        self.ob_name = obc_resource.get("spec").get("ObjectBucketName")
        self.bucket_name = obc_resource.get("spec").get("bucketName")
        ob_obj = OCP(namespace=self.namespace,
                     kind="ObjectBucket",
                     resource_name=self.ob_name).get()
        self.obc_account = ob_obj.get("spec").get("additionalState").get(
            "account")
        secret_obc_obj = OCP(kind="secret",
                             namespace=self.namespace,
                             resource_name=self.obc_name).get()

        obc_configmap = OCP(namespace=self.namespace,
                            kind="ConfigMap",
                            resource_name=self.obc_name).get()
        obc_configmap_data = obc_configmap.get("data")

        obc_provisioner = (obc_resource.get("metadata").get("labels").get(
            "bucket-provisioner"))

        self.region = obc_configmap_data.get("BUCKET_REGION")

        self.access_key_id = base64.b64decode(
            secret_obc_obj.get("data").get("AWS_ACCESS_KEY_ID")).decode(
                "utf-8")
        self.access_key = base64.b64decode(
            secret_obc_obj.get("data").get("AWS_SECRET_ACCESS_KEY")).decode(
                "utf-8")

        if "noobaa" in obc_provisioner:
            get_noobaa = OCP(kind="noobaa", namespace=self.namespace).get()
            self.s3_internal_endpoint = (
                get_noobaa.get("items")[0].get("status").get("services").get(
                    "serviceS3").get("internalDNS")[0])
            self.s3_external_endpoint = (
                get_noobaa.get("items")[0].get("status").get("services").get(
                    "serviceS3").get("externalDNS")[0])
            self.s3_resource = boto3.resource(
                "s3",
                verify=retrieve_verification_mode(),
                endpoint_url=self.s3_external_endpoint,
                aws_access_key_id=self.access_key_id,
                aws_secret_access_key=self.access_key,
            )
            self.s3_client = self.s3_resource.meta.client

        elif "rook" in obc_provisioner:
            # TODO: implement network forwarding to access the internal address
            self.s3_internal_endpoint = (
                "http://" + obc_configmap_data.get("BUCKET_HOST") + ":" +
                obc_configmap_data.get("BUCKET_PORT"))
Exemple #6
0
    def __init__(self, *args, **kwargs):
        """
        Constructor for the MCG class
        """
        self.namespace = config.ENV_DATA["cluster_namespace"]
        self.operator_pod = Pod(**get_pods_having_label(
            constants.NOOBAA_OPERATOR_POD_LABEL, self.namespace)[0])
        self.core_pod = Pod(**get_pods_having_label(
            constants.NOOBAA_CORE_POD_LABEL, self.namespace)[0])

        self.retrieve_noobaa_cli_binary()
        """
        The certificate will be copied on each mcg_obj instantiation since
        the process is so light and quick, that the time required for the redundant
        copy is neglible in comparison to the time a hash comparison will take.
        """
        retrieve_default_ingress_crt()

        get_noobaa = OCP(kind="noobaa", namespace=self.namespace).get()

        self.s3_endpoint = (get_noobaa.get("items")[0].get("status").get(
            "services").get("serviceS3").get("externalDNS")[0])
        self.s3_internal_endpoint = (get_noobaa.get("items")[0].get(
            "status").get("services").get("serviceS3").get("internalDNS")[0])
        self.mgmt_endpoint = (get_noobaa.get("items")[0].get("status").get(
            "services").get("serviceMgmt").get("externalDNS")[0]) + "/rpc"
        self.region = config.ENV_DATA["region"]

        creds_secret_name = (get_noobaa.get("items")[0].get("status").get(
            "accounts").get("admin").get("secretRef").get("name"))
        secret_ocp_obj = OCP(kind="secret", namespace=self.namespace)
        creds_secret_obj = secret_ocp_obj.get(creds_secret_name)

        self.access_key_id = base64.b64decode(
            creds_secret_obj.get("data").get("AWS_ACCESS_KEY_ID")).decode(
                "utf-8")
        self.access_key = base64.b64decode(
            creds_secret_obj.get("data").get("AWS_SECRET_ACCESS_KEY")).decode(
                "utf-8")

        self.noobaa_user = base64.b64decode(
            creds_secret_obj.get("data").get("email")).decode("utf-8")
        self.noobaa_password = base64.b64decode(
            creds_secret_obj.get("data").get("password")).decode("utf-8")

        self.noobaa_token = self.retrieve_nb_token()

        self.s3_resource = boto3.resource(
            "s3",
            verify=retrieve_verification_mode(),
            endpoint_url=self.s3_endpoint,
            aws_access_key_id=self.access_key_id,
            aws_secret_access_key=self.access_key,
        )

        self.s3_client = self.s3_resource.meta.client

        if config.ENV_DATA["platform"].lower() == "aws" and kwargs.get(
                "create_aws_creds"):
            (
                self.cred_req_obj,
                self.aws_access_key_id,
                self.aws_access_key,
            ) = self.request_aws_credentials()

            self.aws_s3_resource = boto3.resource(
                "s3",
                endpoint_url="https://s3.amazonaws.com",
                aws_access_key_id=self.aws_access_key_id,
                aws_secret_access_key=self.aws_access_key,
            )

        if (config.ENV_DATA["platform"].lower() in constants.CLOUD_PLATFORMS
                or storagecluster_independent_check()):
            if not config.ENV_DATA["platform"] == constants.AZURE_PLATFORM and (
                    float(config.ENV_DATA["ocs_version"]) > 4.5):
                logger.info("Checking whether RGW pod is not present")
                pods = pod.get_pods_having_label(label=constants.RGW_APP_LABEL,
                                                 namespace=self.namespace)
                assert (
                    not pods
                ), "RGW pods should not exist in the current platform/cluster"

        elif config.ENV_DATA.get("platform") in constants.ON_PREM_PLATFORMS:
            rgw_count = get_rgw_count(config.ENV_DATA["ocs_version"],
                                      check_if_cluster_was_upgraded(), None)
            logger.info(
                f'Checking for RGW pod/s on {config.ENV_DATA.get("platform")} platform'
            )
            rgw_pod = OCP(kind=constants.POD, namespace=self.namespace)
            assert rgw_pod.wait_for_resource(
                condition=constants.STATUS_RUNNING,
                selector=constants.RGW_APP_LABEL,
                resource_count=rgw_count,
                timeout=60,
            )
Exemple #7
0
    def test_rgw_kafka_notifications(self, bucket_factory):
        """
        Test to verify rgw kafka notifications

        """
        # Get sc
        sc = default_storage_class(interface_type=constants.CEPHBLOCKPOOL)

        # Deploy amq cluster
        self.amq.setup_amq_cluster(sc.name)

        # Create topic
        self.kafka_topic = self.amq.create_kafka_topic()

        # Create Kafkadrop pod
        (
            self.kafkadrop_pod,
            self.kafkadrop_pod,
            self.kafkadrop_route,
        ) = self.amq.create_kafkadrop()

        # Get the kafkadrop route
        kafkadrop_host = self.kafkadrop_route.get().get("spec").get("host")

        # Create bucket
        bucketname = bucket_factory(amount=1, interface="RGW-OC")[0].name

        # Get RGW credentials
        rgw_obj = RGW()
        rgw_endpoint, access_key, secret_key = rgw_obj.get_credentials()

        # Clone notify repo
        notify_path = clone_notify()

        # Initialise to put objects
        data = "A random string data to write on created rgw bucket"
        obc_obj = OBC(bucketname)
        s3_resource = boto3.resource(
            "s3",
            verify=retrieve_verification_mode(),
            endpoint_url=rgw_endpoint,
            aws_access_key_id=obc_obj.access_key_id,
            aws_secret_access_key=obc_obj.access_key,
        )
        s3_client = s3_resource.meta.client

        # Initialize notify command to run
        notify_cmd = (
            f"python {notify_path} -e {rgw_endpoint} -a {obc_obj.access_key_id} "
            f"-s {obc_obj.access_key} -b {bucketname} -ke {constants.KAFKA_ENDPOINT} -t {self.kafka_topic.name}"
        )
        log.info(f"Running cmd {notify_cmd}")

        # Put objects to bucket
        assert s3_client.put_object(Bucket=bucketname, Key="key-1",
                                    Body=data), "Failed: Put object: key-1"
        exec_cmd(notify_cmd)

        # Validate rgw logs notification are sent
        # No errors are seen
        pattern = "ERROR: failed to create push endpoint"
        rgw_pod_obj = get_rgw_pods()
        rgw_log = get_pod_logs(pod_name=rgw_pod_obj[0].name, container="rgw")
        assert re.search(pattern=pattern, string=rgw_log) is None, (
            f"Error: {pattern} msg found in the rgw logs."
            f"Validate {pattern} found on rgw logs and also "
            f"rgw bucket notification is working correctly")
        assert s3_client.put_object(Bucket=bucketname, Key="key-2",
                                    Body=data), "Failed: Put object: key-2"
        exec_cmd(notify_cmd)

        # Validate message are received Kafka side using curl command
        # A temporary way to check from Kafka side, need to check from UI
        curl_command = (
            f"curl -X GET {kafkadrop_host}/topic/{self.kafka_topic.name} "
            "-H 'content-type: application/vnd.kafka.json.v2+json'")
        json_output = run_cmd(cmd=curl_command)
        new_string = json_output.split()
        messages = new_string[new_string.index("messages</td>") + 1]
        if messages.find("1") == -1:
            raise Exception(
                "Error: Messages are not recieved from Kafka side."
                "RGW bucket notification is not working as expected.")

        # Validate the timestamp events
        ocs_version = config.ENV_DATA["ocs_version"]
        if Version.coerce(ocs_version) >= Version.coerce("4.8"):
            cmd = (
                f"bin/kafka-console-consumer.sh --bootstrap-server {constants.KAFKA_ENDPOINT} "
                f"--topic {self.kafka_topic.name} --from-beginning --timeout-ms 20000"
            )
            pod_list = get_pod_name_by_pattern(
                pattern="my-cluster-zookeeper",
                namespace=constants.AMQ_NAMESPACE)
            zookeeper_obj = get_pod_obj(name=pod_list[0],
                                        namespace=constants.AMQ_NAMESPACE)
            event_obj = zookeeper_obj.exec_cmd_on_pod(command=cmd)
            log.info(f"Event obj: {event_obj}")
            event_time = event_obj.get("Records")[0].get("eventTime")
            format_string = "%Y-%m-%dT%H:%M:%S.%fZ"
            try:
                datetime.strptime(event_time, format_string)
            except ValueError as ef:
                log.error(
                    f"Timestamp event {event_time} doesnt match the pattern {format_string}"
                )
                raise ef

            log.info(
                f"Timestamp event {event_time} matches the pattern {format_string}"
            )
Exemple #8
0
    def test_rgw_kafka_notifications(self, bucket_factory):
        """
        Test to verify rgw kafka notifications

        """
        # Get sc
        sc = default_storage_class(interface_type=constants.CEPHBLOCKPOOL)

        # Deploy amq cluster
        self.amq.setup_amq_cluster(sc.name)

        # Create topic
        self.kafka_topic = self.amq.create_kafka_topic()

        # Create Kafkadrop pod
        (
            self.kafkadrop_pod,
            self.kafkadrop_pod,
            self.kafkadrop_route,
        ) = self.amq.create_kafkadrop()

        # Get the kafkadrop route
        kafkadrop_host = self.kafkadrop_route.get().get("spec").get("host")

        # Create bucket
        bucketname = bucket_factory(amount=1, interface="RGW-OC")[0].name

        # Get RGW credentials
        rgw_obj = RGW()
        rgw_endpoint, access_key, secret_key = rgw_obj.get_credentials()

        # Clone notify repo
        notify_path = clone_notify()

        # Initialise to put objects
        data = "A random string data to write on created rgw bucket"
        obc_obj = OBC(bucketname)
        s3_resource = boto3.resource(
            "s3",
            verify=retrieve_verification_mode(),
            endpoint_url=rgw_endpoint,
            aws_access_key_id=obc_obj.access_key_id,
            aws_secret_access_key=obc_obj.access_key,
        )
        s3_client = s3_resource.meta.client

        # Initialize notify command to run
        notify_cmd = (
            f"python {notify_path} -e {rgw_endpoint} -a {obc_obj.access_key_id} "
            f"-s {obc_obj.access_key} -b {bucketname} -ke {constants.KAFKA_ENDPOINT} -t {self.kafka_topic.name}"
        )
        log.info(f"Running cmd {notify_cmd}")

        # Put objects to bucket
        assert s3_client.put_object(Bucket=bucketname, Key="key-1",
                                    Body=data), "Failed: Put object: key-1"
        exec_cmd(notify_cmd)

        # Validate rgw logs notification are sent
        # No errors are seen
        pattern = "ERROR: failed to create push endpoint"
        rgw_pod_obj = get_rgw_pods()
        rgw_log = get_pod_logs(pod_name=rgw_pod_obj[0].name, container="rgw")
        assert re.search(pattern=pattern, string=rgw_log) is None, (
            f"Error: {pattern} msg found in the rgw logs."
            f"Validate {pattern} found on rgw logs and also "
            f"rgw bucket notification is working correctly")
        assert s3_client.put_object(Bucket=bucketname, Key="key-2",
                                    Body=data), "Failed: Put object: key-2"
        exec_cmd(notify_cmd)

        # Validate message are received Kafka side using curl command
        # A temporary way to check from Kafka side, need to check from UI
        curl_command = (
            f"curl -X GET {kafkadrop_host}/topic/{self.kafka_topic.name} "
            "-H 'content-type: application/vnd.kafka.json.v2+json'")
        json_output = run_cmd(cmd=curl_command)
        new_string = json_output.split()
        messages = new_string[new_string.index("messages</td>") + 1]
        if messages.find("1") == -1:
            raise Exception(
                "Error: Messages are not recieved from Kafka side."
                "RGW bucket notification is not working as expected.")
    def __init__(self, obc_name):
        """
        Initializer function

        Args:
            obc_name (str): Name of the Object Bucket Claim
        """
        self.obc_name = obc_name
        self.namespace = config.ENV_DATA["cluster_namespace"]
        obc_resource = OCP(
            namespace=self.namespace,
            kind="ObjectBucketClaim",
            resource_name=self.obc_name,
        ).get()
        obn_str = (constants.OBJECTBUCKETNAME_46ANDBELOW
                   if version.get_semantic_ocs_version_from_config() <
                   version.VERSION_4_7 else
                   constants.OBJECTBUCKETNAME_47ANDABOVE)
        self.ob_name = obc_resource.get("spec").get(obn_str)
        self.bucket_name = obc_resource.get("spec").get("bucketName")
        ob_obj = OCP(namespace=self.namespace,
                     kind="ObjectBucket",
                     resource_name=self.ob_name).get()
        self.obc_account = ob_obj.get("spec").get("additionalState").get(
            "account")
        secret_obc_obj = OCP(kind="secret",
                             namespace=self.namespace,
                             resource_name=self.obc_name).get()

        obc_configmap = OCP(namespace=self.namespace,
                            kind="ConfigMap",
                            resource_name=self.obc_name).get()
        obc_configmap_data = obc_configmap.get("data")

        obc_provisioner = (obc_resource.get("metadata").get("labels").get(
            "bucket-provisioner"))

        self.region = obc_configmap_data.get("BUCKET_REGION")

        self.access_key_id = base64.b64decode(
            secret_obc_obj.get("data").get("AWS_ACCESS_KEY_ID")).decode(
                "utf-8")
        self.access_key = base64.b64decode(
            secret_obc_obj.get("data").get("AWS_SECRET_ACCESS_KEY")).decode(
                "utf-8")

        if "noobaa" in obc_provisioner:
            get_noobaa = OCP(kind="noobaa", namespace=self.namespace).get()
            self.s3_internal_endpoint = (
                get_noobaa.get("items")[0].get("status").get("services").get(
                    "serviceS3").get("internalDNS")[0])
            self.s3_external_endpoint = (
                get_noobaa.get("items")[0].get("status").get("services").get(
                    "serviceS3").get("externalDNS")[0])
            self.s3_resource = boto3.resource(
                "s3",
                verify=retrieve_verification_mode(),
                endpoint_url=self.s3_external_endpoint,
                aws_access_key_id=self.access_key_id,
                aws_secret_access_key=self.access_key,
            )
            self.s3_client = self.s3_resource.meta.client

        elif "rook" in obc_provisioner:
            scheme = ("https" if obc_configmap_data.get("BUCKET_PORT") == "443"
                      else "http")
            host = obc_configmap_data.get("BUCKET_HOST")
            port = obc_configmap_data.get("BUCKET_PORT")
            self.s3_internal_endpoint = f"{scheme}://{host}:{port}"
Exemple #10
0
    def test_ns_bucket_unsigned_access(self, mcg_obj, bucket_factory,
                                       namespace_store_factory):
        """
        Test anonymous(unsigned) access of S3 operations are denied on Namespace bucket.
        """
        sample_data = "Sample string content to write to a S3 object"
        object_key = "ObjKey-" + str(uuid.uuid4().hex)

        # Create the namespace bucket
        nss_tup = ("oc", {"aws": [(1, self.DEFAULT_REGION)]})
        ns_store = namespace_store_factory(*nss_tup)[0]
        bucketclass_dict = {
            "interface": "OC",
            "namespace_policy_dict": {
                "type": "Single",
                "namespacestores": [ns_store],
            },
        }
        ns_bucket = bucket_factory(
            amount=1,
            interface=bucketclass_dict["interface"],
            bucketclass=bucketclass_dict,
        )[0].name

        # Put and Get object operations done with s3 credentials
        logger.info(f"Put and Get object operations on {ns_bucket}")
        assert bucket_utils.s3_put_object(
            s3_obj=mcg_obj,
            bucketname=ns_bucket,
            object_key=object_key,
            data=sample_data,
        ), "Failed: PutObject"
        assert bucket_utils.s3_get_object(
            s3_obj=mcg_obj, bucketname=ns_bucket,
            object_key=object_key), "Failed: GetObject"

        # Boto3 client with signing disabled
        anon_s3_client = boto3.client(
            "s3",
            verify=retrieve_verification_mode(),
            endpoint_url=mcg_obj.s3_endpoint,
            config=Config(signature_version=UNSIGNED),
        )

        logger.info(
            f"Verifying anonymous access is blocked on namespace bucket: {ns_bucket}"
        )
        try:
            anon_s3_client.get_object(Bucket=ns_bucket, Key=object_key)
        except boto3exception.ClientError as e:
            response = HttpResponseParser(e.response)
            assert (response.error["Code"] == "AccessDenied"
                    ), f"Invalid error code:{response.error['Code']}"
            assert (response.status_code == 403
                    ), f"Invalid status code:{response.status_code}"
            assert (response.error["Message"] == "Access Denied"
                    ), f"Invalid error message:{response.error['Message']}"
        else:
            assert (
                False
            ), "GetObject operation has been granted access, when it should have been blocked"
Exemple #11
0
    def __init__(self, *args, **kwargs):
        """
        Constructor for the MCG class
        """
        self.namespace = config.ENV_DATA["cluster_namespace"]
        self.operator_pod = Pod(**get_pods_having_label(
            constants.NOOBAA_OPERATOR_POD_LABEL, self.namespace)[0])
        self.core_pod = Pod(**get_pods_having_label(
            constants.NOOBAA_CORE_POD_LABEL, self.namespace)[0])
        wait_for_resource_state(resource=self.operator_pod,
                                state=constants.STATUS_RUNNING,
                                timeout=300)
        self.retrieve_noobaa_cli_binary()
        """
        The certificate will be copied on each mcg_obj instantiation since
        the process is so light and quick, that the time required for the redundant
        copy is neglible in comparison to the time a hash comparison will take.
        """
        retrieve_default_ingress_crt()

        get_noobaa = OCP(kind="noobaa", namespace=self.namespace).get()

        self.s3_endpoint = (get_noobaa.get("items")[0].get("status").get(
            "services").get("serviceS3").get("externalDNS")[0])
        self.s3_internal_endpoint = (get_noobaa.get("items")[0].get(
            "status").get("services").get("serviceS3").get("internalDNS")[0])
        self.mgmt_endpoint = (get_noobaa.get("items")[0].get("status").get(
            "services").get("serviceMgmt").get("externalDNS")[0]) + "/rpc"
        self.region = config.ENV_DATA["region"]

        creds_secret_name = (get_noobaa.get("items")[0].get("status").get(
            "accounts").get("admin").get("secretRef").get("name"))
        secret_ocp_obj = OCP(kind="secret", namespace=self.namespace)
        creds_secret_obj = secret_ocp_obj.get(creds_secret_name)

        self.access_key_id = base64.b64decode(
            creds_secret_obj.get("data").get("AWS_ACCESS_KEY_ID")).decode(
                "utf-8")
        self.access_key = base64.b64decode(
            creds_secret_obj.get("data").get("AWS_SECRET_ACCESS_KEY")).decode(
                "utf-8")

        self.noobaa_user = base64.b64decode(
            creds_secret_obj.get("data").get("email")).decode("utf-8")
        self.noobaa_password = base64.b64decode(
            creds_secret_obj.get("data").get("password")).decode("utf-8")

        self.noobaa_token = self.retrieve_nb_token()

        self.s3_resource = boto3.resource(
            "s3",
            verify=retrieve_verification_mode(),
            endpoint_url=self.s3_endpoint,
            aws_access_key_id=self.access_key_id,
            aws_secret_access_key=self.access_key,
        )

        self.s3_client = self.s3_resource.meta.client

        if config.ENV_DATA["platform"].lower() == "aws" and kwargs.get(
                "create_aws_creds"):
            (
                self.cred_req_obj,
                self.aws_access_key_id,
                self.aws_access_key,
            ) = self.request_aws_credentials()

            self.aws_s3_resource = boto3.resource(
                "s3",
                endpoint_url="https://s3.amazonaws.com",
                aws_access_key_id=self.aws_access_key_id,
                aws_secret_access_key=self.aws_access_key,
            )
Exemple #12
0
    def __init__(self, *args, **kwargs):
        """
        Constructor for the MCG class
        """
        self.namespace = config.ENV_DATA['cluster_namespace']
        self.operator_pod = Pod(**get_pods_having_label(
            constants.NOOBAA_OPERATOR_POD_LABEL, self.namespace)[0])
        self.core_pod = Pod(**get_pods_having_label(
            constants.NOOBAA_CORE_POD_LABEL, self.namespace)[0])

        self.retrieve_noobaa_cli_binary()
        """
        The certificate will be copied on each mcg_obj instantiation since
        the process is so light and quick, that the time required for the redundant
        copy is neglible in comparison to the time a hash comparison will take.
        """
        retrieve_default_ingress_crt()

        get_noobaa = OCP(kind='noobaa', namespace=self.namespace).get()

        self.s3_endpoint = (get_noobaa.get('items')[0].get('status').get(
            'services').get('serviceS3').get('externalDNS')[0])
        self.s3_internal_endpoint = (get_noobaa.get('items')[0].get(
            'status').get('services').get('serviceS3').get('internalDNS')[0])
        self.mgmt_endpoint = (get_noobaa.get('items')[0].get('status').get(
            'services').get('serviceMgmt').get('externalDNS')[0]) + '/rpc'
        self.region = config.ENV_DATA['region']

        creds_secret_name = (get_noobaa.get('items')[0].get('status').get(
            'accounts').get('admin').get('secretRef').get('name'))
        secret_ocp_obj = OCP(kind='secret', namespace=self.namespace)
        creds_secret_obj = secret_ocp_obj.get(creds_secret_name)

        self.access_key_id = base64.b64decode(
            creds_secret_obj.get('data').get('AWS_ACCESS_KEY_ID')).decode(
                'utf-8')
        self.access_key = base64.b64decode(
            creds_secret_obj.get('data').get('AWS_SECRET_ACCESS_KEY')).decode(
                'utf-8')

        self.noobaa_user = base64.b64decode(
            creds_secret_obj.get('data').get('email')).decode('utf-8')
        self.noobaa_password = base64.b64decode(
            creds_secret_obj.get('data').get('password')).decode('utf-8')

        self.noobaa_token = self.send_rpc_query(
            'auth_api',
            'create_auth',
            params={
                'role': 'admin',
                'system': 'noobaa',
                'email': self.noobaa_user,
                'password': self.noobaa_password
            }).json().get('reply').get('token')

        self.s3_resource = boto3.resource(
            's3',
            verify=retrieve_verification_mode(),
            endpoint_url=self.s3_endpoint,
            aws_access_key_id=self.access_key_id,
            aws_secret_access_key=self.access_key)

        self.s3_client = self.s3_resource.meta.client

        if (config.ENV_DATA['platform'].lower() == 'aws'
                and kwargs.get('create_aws_creds')):
            (self.cred_req_obj, self.aws_access_key_id,
             self.aws_access_key) = self.request_aws_credentials()

            self.aws_s3_resource = boto3.resource(
                's3',
                endpoint_url="https://s3.amazonaws.com",
                aws_access_key_id=self.aws_access_key_id,
                aws_secret_access_key=self.aws_access_key)

        if (config.ENV_DATA['platform'].lower() in constants.CLOUD_PLATFORMS
                or storagecluster_independent_check()):
            if not config.ENV_DATA['platform'] == constants.AZURE_PLATFORM and (
                    float(config.ENV_DATA['ocs_version']) > 4.5):
                logger.info('Checking whether RGW pod is not present')
                pods = pod.get_pods_having_label(label=constants.RGW_APP_LABEL,
                                                 namespace=self.namespace)
                assert not pods, 'RGW pods should not exist in the current platform/cluster'

        elif config.ENV_DATA.get(
                'platform') in constants.ON_PREM_PLATFORMS or (
                    config.ENV_DATA.get('platform')
                    == constants.AZURE_PLATFORM):
            rgw_count = 2 if float(
                config.ENV_DATA['ocs_version']) >= 4.5 and not (
                    check_if_cluster_was_upgraded()) else 1

            # With 4.4 OCS cluster deployed over Azure, RGW is the default backingstore
            if float(config.ENV_DATA['ocs_version']
                     ) == 4.4 and config.ENV_DATA.get(
                         'platform') == constants.AZURE_PLATFORM:
                rgw_count = 1
            if float(config.ENV_DATA['ocs_version']
                     ) == 4.5 and config.ENV_DATA.get(
                         'platform') == constants.AZURE_PLATFORM and (
                             check_if_cluster_was_upgraded()):
                rgw_count = 1
            logger.info(
                f'Checking for RGW pod/s on {config.ENV_DATA.get("platform")} platform'
            )
            rgw_pod = OCP(kind=constants.POD, namespace=self.namespace)
            assert rgw_pod.wait_for_resource(
                condition=constants.STATUS_RUNNING,
                selector=constants.RGW_APP_LABEL,
                resource_count=rgw_count,
                timeout=60)