Пример #1
0
 def __init__(self):
     super(VSPHEREIPI.OCPDeployment, self).__init__()
     self.ipi_details = load_auth_config()["vmware_ipi"]
Пример #2
0
# the app labels specified
ignore_leftover_label = pytest.mark.ignore_leftover_label

# testing marker this is just for testing purpose if you want to run some test
# under development, you can mark it with @run_this and run pytest -m run_this
run_this = pytest.mark.run_this

# Skip marks
skip_inconsistent = pytest.mark.skip(
    reason=
    "Currently the reduction is too inconsistent leading to inconsistent test results"
)

# Skipif marks
skipif_aws_creds_are_missing = pytest.mark.skipif(
    (load_auth_config().get("AUTH", {}).get("AWS", {}).get("AWS_ACCESS_KEY_ID")
     is None and "AWS_ACCESS_KEY_ID" not in os.environ
     and update_config_from_s3() is None),
    reason=("AWS credentials weren't found in the local auth.yaml "
            "and couldn't be fetched from the cloud"),
)

google_api_required = pytest.mark.skipif(
    not os.path.exists(os.path.expanduser(config.RUN["google_api_secret"])),
    reason="Google API credentials don't exist",
)

aws_platform_required = pytest.mark.skipif(
    config.ENV_DATA["platform"].lower() != "aws",
    reason="Test runs ONLY on AWS deployed cluster",
)
Пример #3
0
 def __init__(self):
     self.flexy_deployment = True
     super().__init__()
     self.flexy_instance = FlexyBaremetalPSI()
     self.psi_conf = load_auth_config()['psi']
     self.utils = psiutils.PSIUtils(self.psi_conf)
Пример #4
0
# the app labels specified
ignore_leftover_label = pytest.mark.ignore_leftover_label

# testing marker this is just for testing purpose if you want to run some test
# under development, you can mark it with @run_this and run pytest -m run_this
run_this = pytest.mark.run_this

# Skip marks
skip_inconsistent = pytest.mark.skip(
    reason=
    "Currently the reduction is too inconsistent leading to inconsistent test results"
)

# Skipif marks
skipif_aws_creds_are_missing = pytest.mark.skipif(
    (load_auth_config().get("AUTH", {}).get("AWS", {}).get("AWS_ACCESS_KEY_ID")
     is None and "AWS_ACCESS_KEY_ID" not in os.environ
     and update_config_from_s3() is None),
    reason=("AWS credentials weren't found in the local auth.yaml "
            "and couldn't be fetched from the cloud"),
)

google_api_required = pytest.mark.skipif(
    not os.path.exists(os.path.expanduser(config.RUN["google_api_secret"])),
    reason="Google API credentials don't exist",
)

aws_platform_required = pytest.mark.skipif(
    config.ENV_DATA["platform"].lower() != "aws",
    reason="Test runs ONLY on AWS deployed cluster",
)
Пример #5
0
 def __init__(self):
     super().__init__()
     self.helper_node_details = load_auth_config()["baremetal"]
     self.mgmt_details = load_auth_config()["ipmi"]
     self.aws = aws.AWS()
Пример #6
0
 def __init__(self):
     super().__init__()
     self.helper_node_details = load_auth_config()['baremetal']
     self.mgmt_details = load_auth_config()['ipmi']
Пример #7
0
# the app labels specified
ignore_leftover_label = pytest.mark.ignore_leftover_label

# testing marker this is just for testing purpose if you want to run some test
# under development, you can mark it with @run_this and run pytest -m run_this
run_this = pytest.mark.run_this

# Skip marks
skip_inconsistent = pytest.mark.skip(
    reason="Currently the reduction is too inconsistent leading to inconsistent test results"
)

# Skipif marks
skipif_aws_creds_are_missing = pytest.mark.skipif(
    (
        load_auth_config().get("AUTH", {}).get("AWS", {}).get("AWS_ACCESS_KEY_ID")
        is None
        and "AWS_ACCESS_KEY_ID" not in os.environ
        and update_config_from_s3() is None
    ),
    reason=(
        "AWS credentials weren't found in the local auth.yaml "
        "and couldn't be fetched from the cloud"
    ),
)

skipif_mcg_only = pytest.mark.skipif(
    config.ENV_DATA["mcg_only_deployment"],
    reason="This test cannot run on MCG-Only deployments",
)
Пример #8
0
    def __init__(self):
        """
        Initialize the variables required

        """
        self.mgmt_details = load_auth_config()["ipmi"]
Пример #9
0
                           order_post_ocs_upgrade)

# mark the test class with marker below to ignore leftover check
ignore_leftovers = pytest.mark.ignore_leftovers

# Mark the test class with marker below to ignore leftover of resources having
# the app labels specified
ignore_leftover_label = pytest.mark.ignore_leftover_label

# testing marker this is just for testing purpose if you want to run some test
# under development, you can mark it with @run_this and run pytest -m run_this
run_this = pytest.mark.run_this

# Skipif marks
skipif_aws_creds_are_missing = pytest.mark.skipif(
    (load_auth_config().get('AUTH', {}).get('AWS', {}).get('AWS_ACCESS_KEY_ID')
     is None and 'AWS_ACCESS_KEY_ID' not in os.environ
     and update_config_from_s3() is None),
    reason=("AWS credentials weren't found in the local auth.yaml "
            "and couldn't be fetched from the cloud"))

google_api_required = pytest.mark.skipif(
    not os.path.exists(os.path.expanduser(config.RUN['google_api_secret'])),
    reason="Google API credentials don't exist")

aws_platform_required = pytest.mark.skipif(
    config.ENV_DATA['platform'].lower() != 'aws',
    reason="Test runs ONLY on AWS deployed cluster")

azure_platform_required = pytest.mark.skipif(
    config.ENV_DATA['platform'].lower() != 'azure',
Пример #10
0
# the app labels specified
ignore_leftover_label = pytest.mark.ignore_leftover_label

# testing marker this is just for testing purpose if you want to run some test
# under development, you can mark it with @run_this and run pytest -m run_this
run_this = pytest.mark.run_this

# Skip marks
skip_inconsistent = pytest.mark.skip(
    reason=
    "Currently the reduction is too inconsistent leading to inconsistent test results"
)

# Skipif marks
skipif_aws_creds_are_missing = pytest.mark.skipif(
    (load_auth_config().get("AUTH", {}).get("AWS", {}).get("AWS_ACCESS_KEY_ID")
     is None and "AWS_ACCESS_KEY_ID" not in os.environ
     and update_config_from_s3() is None),
    reason=("AWS credentials weren't found in the local auth.yaml "
            "and couldn't be fetched from the cloud"),
)

skipif_mcg_only = pytest.mark.skipif(
    config.ENV_DATA["mcg_only_deployment"],
    reason="This test cannot run on MCG-Only deployments",
)

google_api_required = pytest.mark.skipif(
    not os.path.exists(os.path.expanduser(config.RUN["google_api_secret"])),
    reason="Google API credentials don't exist",
)
Пример #11
0
    def test_noobaa_secret_deletion_method2(self, teardown_factory, mcg_obj, cleanup):
        """
        Objectives of this tests are:
            1) create first backingstore using CLI passing credentials, which creates secret as well
            2) create second backingstore using CLI passing credentials, which recognizes the duplicates
               and uses the secret created above
            3) Modify the existing secret credentials see if the owned BS/NS is getting reconciled
            4) delete the first backingstore and make sure secret is not deleted
            5) check for the ownerReference see if its removed for the above backingstore deletion
            6) delete the second backingstore and make sure secret is now deleted

        """

        # create ULS
        try:
            logger.info(
                "Trying to load credentials from ocs-ci-data. "
                "This flow is only relevant when running under OCS-QE environments."
            )
            secret_dict = update_config_from_s3().get("AUTH")
        except (AttributeError, EndpointConnectionError):
            logger.warning(
                "Failed to load credentials from ocs-ci-data.\n"
                "Your local AWS credentials might be misconfigured.\n"
                "Trying to load credentials from local auth.yaml instead"
            )
            secret_dict = load_auth_config().get("AUTH", {})
        access_key = secret_dict["AWS"]["AWS_ACCESS_KEY_ID"]
        secret_key = secret_dict["AWS"]["AWS_SECRET_ACCESS_KEY"]
        first_uls_name = create_unique_resource_name(
            resource_description="uls", resource_type="aws"
        )
        client = boto3.resource(
            "s3",
            verify=True,
            endpoint_url="https://s3.amazonaws.com",
            aws_access_key_id=access_key,
            aws_secret_access_key=secret_key,
        )
        client.create_bucket(
            Bucket=first_uls_name,
            CreateBucketConfiguration={"LocationConstraint": "eu-central-1"},
        )
        first_bs_name = create_unique_resource_name(
            resource_description="backingstore", resource_type="aws"
        )
        create_aws_bs_using_cli(
            mcg_obj=mcg_obj,
            backingstore_name=first_bs_name,
            access_key=access_key,
            secret_key=secret_key,
            uls_name=first_uls_name,
            region="eu-central-1",
        )
        mcg_obj.check_backingstore_state(
            backingstore_name=first_bs_name, desired_state=constants.BS_OPTIMAL
        )
        first_bs_obj = BackingStore(
            name=first_bs_name,
            method="cli",
            type="cloud",
            uls_name=first_uls_name,
            mcg_obj=mcg_obj,
        )
        cleanup(first_bs_obj)

        # create second backingstore using CLI and pass the secret credentials
        second_uls_name = create_unique_resource_name(
            resource_description="uls", resource_type="aws"
        )
        client.create_bucket(
            Bucket=second_uls_name,
            CreateBucketConfiguration={"LocationConstraint": "eu-central-1"},
        )
        second_bs_name = create_unique_resource_name(
            resource_description="backingstore", resource_type="aws"
        )
        create_aws_bs_using_cli(
            mcg_obj=mcg_obj,
            backingstore_name=second_bs_name,
            access_key=access_key,
            secret_key=secret_key,
            uls_name=second_uls_name,
            region="eu-central-1",
        )
        mcg_obj.check_backingstore_state(
            backingstore_name=second_bs_name, desired_state=constants.BS_OPTIMAL
        )
        second_bs_obj = BackingStore(
            name=second_bs_name,
            method="cli",
            type="cloud",
            uls_name=second_uls_name,
            mcg_obj=mcg_obj,
        )
        cleanup(second_bs_obj)

        # Modify the secret credentials to wrong one and see if the backingstores get rejected
        secret_name = OCP(
            namespace=config.ENV_DATA["cluster_namespace"], kind="backingstore"
        ).get(resource_name=second_bs_name)["spec"]["awsS3"]["secret"]["name"]

        wrong_access_key_patch = {
            "data": {"AWS_ACCESS_KEY_ID": "d3JvbmdhY2Nlc3NrZXk="}
        }  # Invalid Access Key
        OCP(namespace=config.ENV_DATA["cluster_namespace"], kind="secret").patch(
            resource_name=secret_name,
            params=json.dumps(wrong_access_key_patch),
            format_type="merge",
        )
        logger.info("Patched wrong access key!")
        assert OCP(
            namespace=config.ENV_DATA["cluster_namespace"], kind="backingstore"
        ).wait_for_resource(
            resource_name=second_bs_name,
            condition="Creating",
            column="PHASE",
        ), "Backingstores are not getting reconciled after changing linked secret credentials!"
        logger.info("Backingstores getting reconciled!")

        # delete first backingstore
        first_bs_obj.delete()
        logger.info(f"First backingstore {first_bs_name} deleted!")
        assert (
            OCP(namespace=config.ENV_DATA["cluster_namespace"], kind="secret").get(
                resource_name=secret_name, dont_raise=True
            )
            is not None
        ), "[Not expected] Secret got deleted along when first backingstore deleted!!"
        logger.info("Secret exists after the first backingstore deletion!")

        # check for the owner reference
        secret_owner_ref = OCP(
            namespace=config.ENV_DATA["cluster_namespace"], kind="secret"
        ).get(resource_name=secret_name)["metadata"]["ownerReferences"]
        for owner in secret_owner_ref:
            assert owner["name"] != first_bs_name, (
                f"Owner reference for {first_bs_name} still exists in the secret {secret_name} "
                f"even after backingstore {first_bs_name} got deleted!"
            )
        logger.info(
            f"Owner reference for first backingstore {first_bs_name} is deleted in {secret_name} !!"
        )

        # delete second backingstore
        second_bs_obj.delete()
        logger.info(f"Second backingstore {second_bs_name} deleted!")
        assert (
            OCP(namespace=config.ENV_DATA["cluster_namespace"], kind="secret").get(
                resource_name=secret_name, dont_raise=True
            )
            is None
        ), "[Not expected] Secret still exists even after all backingstores linked are deleted!"
        logger.info(
            "Secret got deleted after the all the linked backingstores are deleted!"
        )