def run(**kw):
    """Verification of primary image deletion mirroring.

    This module verifies that the deletion of primary image deletes the image in secondary also

    Args:
        **kw:

    Returns:
        0 - if test case pass
        1 - it test case fails

    Test case covered -
    CEPH-9501 - Delete image on local/primary cluster that is getting mirrored

    Pre-requisites :
    1. At least two clusters must be up and running with enough number of OSDs to create pools
    2. We need atleast one client node with ceph-common package,
       conf and keyring files

    Test Case Flow:
    1. Create an image in primary and perform IOs.
    2. Delete the image when contents are getting mirrored.
    3. Make sure that image is deleted in secondary site.
    """
    try:
        log.info("Starting RBD mirroring test case - 9501")
        config = kw.get("config")
        mirror1, mirror2 = [
            rbdmirror.RbdMirror(cluster, config)
            for cluster in kw.get("ceph_cluster_dict").values()
        ]
        poolname = mirror1.random_string() + "_ceph_9501"
        imagename = mirror1.random_string() + "_ceph_9501"
        imagespec = poolname + "/" + imagename

        initial_config(
            mirror1,
            mirror2,
            poolname,
            imagespec,
            imagesize=config.get("imagesize", "1G"),
        )
        mirror1.benchwrite(imagespec=imagespec,
                           io=config.get("io-total", "1G"))

        mirror1.delete_image(imagespec)
        if mirror2.image_exists(imagespec):
            return 0

    except Exception as e:
        log.exception(e)

    return 1
예제 #2
0
def run(**kw):
    log.info("Starting mirroring")
    config = kw.get("config")

    mirror1 = rbdmirror.RbdMirror(kw.get("ceph_cluster_dict").get("ceph-rbd1"), config)
    mirror2 = rbdmirror.RbdMirror(kw.get("ceph_cluster_dict").get("ceph-rbd2"), config)
    kw.get("test_data").update({"mirror1": mirror1, "mirror2": mirror2})

    # Handling of clusters with same name
    if mirror1.cluster_name == mirror2.cluster_name:
        mirror1.handle_same_name("master")
        if "two-way" in config.get("way", ""):
            mirror2.handle_same_name("slave")

    if "one-way" in config.get("way", ""):
        mirror2.setup_mirror(mirror1)
    else:
        mirror1.setup_mirror(mirror2)
        mirror2.setup_mirror(mirror1)

    return 0
예제 #3
0
def run(**kw):
    log.info("Starting mirroring")
    config = kw.get('config')

    mirror1 = rbdmirror.RbdMirror(
        kw.get('ceph_cluster_dict').get('ceph-rbd1'), config)
    mirror2 = rbdmirror.RbdMirror(
        kw.get('ceph_cluster_dict').get('ceph-rbd2'), config)
    kw.get('test_data').update({'mirror1': mirror1, 'mirror2': mirror2})

    # Handling of clusters with same name
    if mirror1.cluster_name == mirror2.cluster_name:
        mirror1.handle_same_name('master')
        if 'two-way' in config.get('way', ''):
            mirror2.handle_same_name('slave')

    if 'one-way' in config.get('way', ''):
        mirror2.setup_mirror(mirror1)
    else:
        mirror1.setup_mirror(mirror2)
        mirror2.setup_mirror(mirror1)

    return 0
예제 #4
0
def run(**kw):
    """
    --> Configures RBD Mirroring on cephadm
    --> Creates Pool Image and enables Mirroring
    --> Runs IO using rbd bench
    Args:
        **kw:
    Returns:
        0 - if test case pass
        1 - it test case fails
    """
    try:
        log.info("Starting RBD mirroring test case")
        config = kw.get("config")
        mirror1, mirror2 = [
            rbdmirror.RbdMirror(cluster, config)
            for cluster in kw.get("ceph_cluster_dict").values()
        ]
        poolname = mirror1.random_string() + "_tier_1_rbd_mirror_pool"
        imagename = mirror1.random_string() + "_tier_1_rbd_mirror_image"
        imagespec = poolname + "/" + imagename

        mirror1.create_pool(poolname=poolname)
        mirror2.create_pool(poolname=poolname)
        mirror1.create_image(imagespec=imagespec, size=config.get("imagesize"))
        mirror1.config_mirror(mirror2, poolname=poolname, mode="pool")
        mirror2.wait_for_status(poolname=poolname, images_pattern=1)
        mirror1.benchwrite(imagespec=imagespec, io=config.get("io-total"))
        mirror1.resize_image(imagespec=imagespec, size=config.get("resize_to"))
        mirror1.wait_for_status(imagespec=imagespec,
                                state_pattern="up+stopped")
        mirror2.wait_for_status(imagespec=imagespec,
                                state_pattern="up+replaying")
        mirror1.check_data(peercluster=mirror2, imagespec=imagespec)
        mirror1.clean_up(peercluster=mirror2, pools=[poolname])
        return 0

    except ValueError as ve:
        log.error(
            f"{kw.get('ceph_cluster_dict').values} has less or more clusters Than Expected(2 clusters expected)"
        )
        log.exception(ve)
    except Exception as e:
        log.exception(e)
        return 1
예제 #5
0
def run(**kw):
    """
    1. Enable snapshot mode for images
    2. Run IO on mirrored images
    3. Rename the image from primary cluster and see if image reflected to secondary
       and check the data consistency
    Args:
        **kw:
    Returns:
        0 - if test case pass
        1 - it test case fails
    """
    try:
        log.info("Starting RBD mirroring test case")
        config = kw.get("config")
        mirror1, mirror2 = [
            rbdmirror.RbdMirror(cluster, config)
            for cluster in kw.get("ceph_cluster_dict").values()
        ]
        poolname = mirror1.random_string() + "_tier_1_rbd_mirror_pool"
        mirror1.create_pool(poolname=poolname)
        mirror2.create_pool(poolname=poolname)

        # Create image and enable snapshot mirroring"
        imagename_1 = mirror1.random_string() + "_tier_1_rbd_mirror_image"
        imagespec_1 = poolname + "/" + imagename_1
        mirror1.create_image(imagespec=imagespec_1,
                             size=config.get("imagesize"))
        mirror1.config_mirror(mirror2, poolname=poolname, mode="image")
        mirror1.enable_mirror_image(poolname, imagename_1, "snapshot")
        mirror2.wait_for_status(poolname=poolname, images_pattern=1)
        mirror1.benchwrite(imagespec=imagespec_1, io=config.get("io-total"))
        mirror1.wait_for_status(imagespec=imagespec_1,
                                state_pattern="up+stopped")
        mirror2.wait_for_status(imagespec=imagespec_1,
                                state_pattern="up+replaying")

        # Rename primary image and check on secondary
        mirror1.rename_primary_image(
            source_imagespec=imagespec_1,
            dest_imagespec="rename_image",
            peercluster=mirror2,
            poolname=poolname,
        )
        mirror1.create_mirror_snapshot(f"{poolname}/rename_image")
        time.sleep(30)
        out2 = mirror2.exec_cmd(cmd=f"rbd info {poolname}/rename_image")
        log.info(out2)

        # Cleans up the configuration
        mirror1.delete_image(f"{poolname}/rename_image")
        mirror1.clean_up(peercluster=mirror2, pools=[poolname])
        return 0

    except ValueError as ve:
        log.error(
            f"{kw.get('ceph_cluster_dict').values} has less or more clusters Than Expected(2 clusters expected)"
        )
        log.exception(ve)
    except Exception as e:
        log.exception(e)
        return 1
예제 #6
0
def run(**kw):
    """
    1. Enable journal and snapshot mode for images
    2. Make changes to the mirrored image and check the consistancy
    3. Delete the image from secondary cluster and see if sync happens and check the consistency
        (for checking the conistency we can leverage check_data())
    4. Shutdown/stop network of the rbdmirror node( node where rbd mirror daemon is running).
       write data and check the sync after bringing back the node
    5. Verify rbd mirror image commands
        a. create Image and enable snapshot-based mirroring on it
        b. schedule snapshot and verify snapshots are getting created within the interval at any point of time,
            image should have only 3 snapshots. all the latest ones should be retained
        c. check the status of the image
        d. Remove the image
    Args:
        **kw:
    Returns:
        0 - if test case pass
        1 - it test case fails
    """
    try:
        log.info("Starting RBD mirroring test case")
        config = kw.get("config")
        build = config.get("build", config.get("rhbuild"))
        mirror1, mirror2 = [
            rbdmirror.RbdMirror(cluster, config)
            for cluster in kw.get("ceph_cluster_dict").values()
        ]
        poolname = mirror1.random_string() + "_tier_1_rbd_mirror_pool"
        imagename = mirror1.random_string() + "_tier_1_rbd_mirror_image"
        imagespec = poolname + "/" + imagename

        mirror1.create_pool(poolname=poolname)
        mirror2.create_pool(poolname=poolname)
        mirror1.create_image(imagespec=imagespec, size=config.get("imagesize"))
        mirror1.config_mirror(mirror2, poolname=poolname, mode="image")
        mirror1.enable_mirror_image(poolname, imagename, "journal")
        mirror2.wait_for_status(poolname=poolname, images_pattern=1)
        mirror1.benchwrite(imagespec=imagespec, io=config.get("io-total"))
        mirror1.wait_for_status(imagespec=imagespec,
                                state_pattern="up+stopped")
        mirror2.wait_for_status(imagespec=imagespec,
                                state_pattern="up+replaying")
        mirror1.check_data(peercluster=mirror2, imagespec=imagespec)

        # Stop the rdb-mirror service and cehck the status
        if build.startswith("5"):
            service_name = mirror2.get_rbd_service_name("rbd-mirror")
        if build.startswith("4"):
            service_name = mirror2.get_rbd_service_name(
                "*****@*****.**")
        mirror2.change_service_state(service_name=service_name,
                                     operation="stop")
        mirror2.wait_for_status(imagespec=imagespec,
                                state_pattern="down+stopped")
        mirror1.benchwrite(imagespec=imagespec, io=config.get("io-total"))
        mirror2.change_service_state(service_name=service_name,
                                     operation="start")
        mirror1.wait_for_status(imagespec=imagespec,
                                state_pattern="up+stopped")
        mirror2.wait_for_status(imagespec=imagespec,
                                state_pattern="up+replaying")
        mirror1.check_data(peercluster=mirror2, imagespec=imagespec)

        mirror1.delete_image(imagespec)
        # Add check of the image in secondary cluster

        # Create image and enable snapshot mirroring"
        imagename_1 = mirror1.random_string() + "_tier_1_rbd_mirror_image"
        imagespec_1 = poolname + "/" + imagename_1
        mirror1.create_image(imagespec=imagespec_1,
                             size=config.get("imagesize"))
        mirror1.enable_mirror_image(poolname, imagename_1, "snapshot")
        mirror2.wait_for_status(poolname=poolname, images_pattern=1)
        mirror1.benchwrite(imagespec=imagespec_1, io=config.get("io-total"))
        mirror1.wait_for_status(imagespec=imagespec_1,
                                state_pattern="up+stopped")
        mirror2.wait_for_status(imagespec=imagespec_1,
                                state_pattern="up+replaying")
        # Check Data failing for snapshot mirroring looks like it is syncing snapshots
        # mirror1.check_data(peercluster=mirror2, imagespec=imagespec_1)

        # schedule snapshot rbd-mirror
        mirror1.mirror_snapshot_schedule_add(poolname=poolname,
                                             imagename=imagename_1)
        mirror1.verify_snapshot_schedule(imagespec_1)
        mirror1.mirror_snapshot_schedule_list(poolname=poolname,
                                              imagename=imagename_1)
        mirror1.mirror_snapshot_schedule_status(poolname=poolname,
                                                imagename=imagename_1)
        mirror1.mirror_snapshot_schedule_remove(poolname=poolname,
                                                imagename=imagename_1)
        # Cleans up the configuration
        mirror1.delete_image(imagespec_1)
        mirror1.clean_up(peercluster=mirror2, pools=[poolname])
        return 0

    except ValueError as ve:
        log.error(
            f"{kw.get('ceph_cluster_dict').values} has less or more clusters Than Expected(2 clusters expected)"
        )
        log.exception(ve)
    except Exception as e:
        log.exception(e)
        return 1