def create_namespace_store(self, nss_name, region, cld_mgr, cloud_uls_factory, platform): """ Creates a new namespace store Args: nss_name (str): The name to be given to the new namespace store region (str): The region name to be used cld_mgr: A cloud manager instance cloud_uls_factory: The cloud uls factory platform (str): The platform resource name Returns: str: The name of the created target_bucket_name (cloud uls) """ # Create the actual target bucket on AWS uls_dict = cloud_uls_factory({platform: [(1, region)]}) target_bucket_name = list(uls_dict[platform])[0] nss_data = templating.load_yaml(constants.MCG_NAMESPACESTORE_YAML) nss_data["metadata"]["name"] = nss_name nss_data["metadata"]["namespace"] = config.ENV_DATA[ "cluster_namespace"] NSS_MAPPING = { constants.AWS_PLATFORM: { "type": "aws-s3", "awsS3": { "targetBucket": target_bucket_name, "secret": { "name": get_attr_chain(cld_mgr, "aws_client.secret.name") }, }, }, constants.AZURE_PLATFORM: { "type": "azure-blob", "azureBlob": { "targetBlobContainer": target_bucket_name, "secret": { "name": get_attr_chain(cld_mgr, "azure_client.secret.name") }, }, }, constants.RGW_PLATFORM: { "type": "s3-compatible", "s3Compatible": { "targetBucket": target_bucket_name, "endpoint": get_attr_chain(cld_mgr, "rgw_client.endpoint"), "signatureVersion": "v2", "secret": { "name": get_attr_chain(cld_mgr, "rgw_client.secret.name") }, }, }, } nss_data["spec"] = NSS_MAPPING[platform] create_resource(**nss_data) return target_bucket_name
def oc_create_namespacestore( nss_name, platform, mcg_obj, uls_name=None, cld_mgr=None, nss_tup=None, nsfs_pvc_name=None, ): """ Create a namespacestore using the MCG CLI Args: nss_name (str): Name of the namespacestore platform (str): Platform to create the namespacestore on mcg_obj (MCG): A redundant MCG object, used for uniformity between OC and CLI calls uls_name (str): Name of the ULS bucket to use for the namespacestore cld_mgr (CloudManager): CloudManager object used for supplying the needed connection credentials nss_tup (tuple): A tuple containing the NSFS namespacestore details, in this order: pvc_name (str): Name of the PVC that will host the namespace filesystem pvc_size (int): Size in Gi of the PVC that will host the namespace filesystem sub_path (str): The path to a sub directory inside the PVC FS which the NSS will use as the root directory fs_backend (str): The file system backend type - CEPH_FS | GPFS | NFSv4. Defaults to None. """ nss_data = templating.load_yaml(constants.MCG_NAMESPACESTORE_YAML) nss_data["metadata"]["name"] = nss_name nss_data["metadata"]["namespace"] = config.ENV_DATA["cluster_namespace"] NSS_MAPPING = { constants.AWS_PLATFORM: lambda: { "type": "aws-s3", "awsS3": { "targetBucket": uls_name, "secret": { "name": get_attr_chain(cld_mgr, "aws_client.secret.name") }, }, }, constants.AZURE_PLATFORM: lambda: { "type": "azure-blob", "azureBlob": { "targetBlobContainer": uls_name, "secret": { "name": get_attr_chain(cld_mgr, "azure_client.secret.name") }, }, }, constants.RGW_PLATFORM: lambda: { "type": "s3-compatible", "s3Compatible": { "targetBucket": uls_name, "endpoint": get_attr_chain(cld_mgr, "rgw_client.endpoint"), "signatureVersion": "v2", "secret": { "name": get_attr_chain(cld_mgr, "rgw_client.secret.name") }, }, }, constants.NAMESPACE_FILESYSTEM: lambda: { "type": "nsfs", "nsfs": { "pvcName": uls_name, "subPath": nss_tup[2] if nss_tup[2] else "", }, }, } if (platform.lower() == constants.NAMESPACE_FILESYSTEM and len(nss_tup) == 4 and nss_tup[3]): NSS_MAPPING[platform.lower()]["nsfs"]["fsBackend"] = nss_tup[3] nss_data["spec"] = NSS_MAPPING[platform.lower()]() create_resource(**nss_data)
def create_connection(self, cld_mgr, platform, conn_name=None): """ Creates a new NooBaa connection to an AWS backend Args: cld_mgr (obj): A cloud manager instance platform (str): Platform to use for new connection conn_name (str): The connection name to be used If None provided then the name will be generated Returns: bool: False if the connection creation failed """ if conn_name is None: conn_name = create_unique_resource_name(f"{platform}-connection", "mcgconn") if platform == constants.AWS_PLATFORM: params = { "auth_method": "AWS_V4", "endpoint": constants.MCG_NS_AWS_ENDPOINT, "endpoint_type": "AWS", "identity": get_attr_chain(cld_mgr, "aws_client.access_key"), "name": conn_name, "secret": get_attr_chain(cld_mgr, "aws_client.secret_key"), } elif platform == constants.AZURE_PLATFORM: params = { "endpoint": constants.MCG_NS_AZURE_ENDPOINT, "endpoint_type": "AZURE", "identity": get_attr_chain(cld_mgr, "azure_client.account_name"), "name": conn_name, "secret": get_attr_chain(cld_mgr, "azure_client.credential"), } elif platform == constants.RGW_PLATFORM: params = { "auth_method": "AWS_V4", "endpoint": get_attr_chain(cld_mgr, "rgw_client.endpoint"), "endpoint_type": "S3_COMPATIBLE", "identity": get_attr_chain(cld_mgr, "rgw_client.access_key"), "name": conn_name, "secret": get_attr_chain(cld_mgr, "rgw_client.secret_key"), } else: raise UnsupportedPlatformError(f"Unsupported Platform: {platform}") try: for resp in TimeoutSampler( 30, 3, self.send_rpc_query, "account_api", "add_external_connection", params, ): if "error" not in resp.text: logger.info(f"Connection {conn_name} created successfully") return True else: logger.info( f"{platform} IAM {conn_name} did not yet propagate: {resp.text}" ) except TimeoutExpiredError: logger.error(f"Could not create connection {conn_name}") assert False
def test_get_empty_attr(): assert utils.get_attr_chain(A(1), "") is None
def test_get_none_obj_attr(): assert utils.get_attr_chain(None, "attribute") is None
def test_get_nonexistent_attr_chain(chain_length): attr_chain = ".".join(repeat("sub_attr", chain_length + 1)) sub_attr = utils.get_attr_chain(A(chain_length), attr_chain) assert sub_attr is None
def test_get_attr_chain(chain_length): attr_chain = ".".join(repeat("sub_attr", chain_length)) sub_attr = utils.get_attr_chain(A(chain_length), attr_chain) assert sub_attr.num == 0