def init():
    Deployer.start(num_mayastors=NUM_MAYASTORS)
    ApiClient.pools_api().put_node_pool(
        NODE_1_NAME,
        POOL_1_UUID,
        CreatePoolBody(
            ["malloc:///disk?size_mb=50"],
            labels={
                "pool1-specific-key": "pool1-specific-value",
                "openebs.io/created-by": "msp-operator",
            },
        ),
    )
    ApiClient.pools_api().put_node_pool(
        NODE_2_NAME,
        POOL_2_UUID,
        CreatePoolBody(
            ["malloc:///disk?size_mb=50"],
            labels={
                "pool2-specific-key": "pool2-specific-value",
                "openebs.io/created-by": "msp-operator",
            },
        ),
    )
    yield
    Deployer.stop()
Esempio n. 2
0
def the_volume_should_be_deleted():
    """the volume should be deleted."""
    try:
        ApiClient.volumes_api().get_volume(VOLUME_UUID)
    except Exception as e:
        exception_info = e.__dict__
        assert exception_info["status"] == requests.codes["not_found"]
def pool_labels_must_not_contain_the_volume_topology_labels():
    """pool labels must not contain the volume topology labels."""
    volume = ApiClient.volumes_api().get_volume(VOLUME_UUID)
    assert (common_labels(
        volume["spec"]["topology"]["pool_topology"]["labelled"]["inclusion"],
        ApiClient.pools_api().get_pool(POOL_2_UUID),
    ) == 0)
def init():
    Deployer.start(1)
    ApiClient.pools_api().put_node_pool(
        NODE_NAME, POOL_UUID, CreatePoolBody(["malloc:///disk?size_mb=50"]))
    ApiClient.volumes_api().put_volume(
        VOLUME_UUID, CreateVolumeBody(VolumePolicy(False), 1, VOLUME_SIZE))
    yield
    Deployer.stop()
def unpublishing_the_volume_should_return_an_already_unpublished_error():
    """unpublishing the volume should return an already unpublished error."""
    try:
        ApiClient.volumes_api().del_volume_target(VOLUME_UUID)
    except Exception as e:
        exception_info = e.__dict__
        assert exception_info["status"] == requests.codes[
            "precondition_failed"]
        assert "NotPublished" in exception_info["body"]
def publishing_the_volume_should_return_an_already_published_error():
    """publishing the volume should return an already published error."""
    try:
        ApiClient.volumes_api().put_volume_target(VOLUME_UUID, NODE_NAME,
                                                  Protocol("nvmf"))
    except Exception as e:
        exception_info = e.__dict__
        assert exception_info["status"] == requests.codes[
            "precondition_failed"]
        assert "AlreadyPublished" in exception_info["body"]
def a_published_selfhealing_volume():
    """a published self-healing volume."""
    request = CreateVolumeBody(
        VolumePolicy(True),
        NUM_VOLUME_REPLICAS,
        VOLUME_SIZE,
        topology=Topology(pool_topology=PoolTopology(labelled=LabelledTopology(
            exclusion={}, inclusion={"node": MAYASTOR_2}))),
    )
    ApiClient.volumes_api().put_volume(VOLUME_UUID, request)
    ApiClient.volumes_api().put_volume_target(VOLUME_UUID, MAYASTOR_1,
                                              Protocol("nvmf"))
def an_existing_published_volume_without_pool_topology():
    """an existing published volume without pool topology"""
    ApiClient.volumes_api().put_volume(
        VOLUME_UUID,
        CreateVolumeBody(
            VolumePolicy(False),
            1,
            VOLUME_SIZE,
        ),
    )
    # Publish volume so that there is a nexus to add a replica to.
    ApiClient.volumes_api().put_volume_target(VOLUME_UUID, NODE_1_NAME,
                                              Protocol("nvmf"))
def pool_labels_must_contain_all_the_volume_request_topology_labels(
        create_request):
    """pool labels must contain all the volume request topology labels."""
    assert (common_labels(
        create_request[CREATE_REQUEST_KEY]["topology"]["pool_topology"]
        ["labelled"]["inclusion"],
        ApiClient.pools_api().get_pool(POOL_1_UUID),
    ) or common_labels(
        create_request[CREATE_REQUEST_KEY]["topology"]["pool_topology"]
        ["labelled"]["inclusion"],
        ApiClient.pools_api().get_pool(POOL_2_UUID),
    )) == len(create_request[CREATE_REQUEST_KEY]["topology"]["pool_topology"]
              ["labelled"]["inclusion"])
Esempio n. 10
0
def init(create_pool_disk_images):
    # Shorten the reconcile periods and cache period to speed up the tests.
    Deployer.start_with_args(
        [
            "-j",
            "-m=2",
            "-w=10s",
            "--reconcile-idle-period=500ms",
            "--reconcile-period=500ms",
            "--cache-period=1s",
        ]
    )

    # Create pools
    ApiClient.pools_api().put_node_pool(
        MAYASTOR_1,
        POOL1_UUID,
        CreatePoolBody(["aio:///host/tmp/{}".format(POOL_DISK1)]),
    )
    ApiClient.pools_api().put_node_pool(
        MAYASTOR_2,
        POOL2_UUID,
        CreatePoolBody(["aio:///host/tmp/{}".format(POOL_DISK2)]),
    )

    # Create and publish a volume on node 1
    request = CreateVolumeBody(VolumePolicy(False), NUM_VOLUME_REPLICAS, VOLUME_SIZE)
    ApiClient.volumes_api().put_volume(VOLUME_UUID, request)
    ApiClient.volumes_api().put_volume_target(VOLUME_UUID, MAYASTOR_1, Protocol("nvmf"))

    yield
    Deployer.stop()
def volume_creation_should_fail_with_an_insufficient_storage_error(
        create_request):
    """volume creation should fail with an insufficient storage error."""
    request = create_request[CREATE_REQUEST_KEY]
    try:
        ApiClient.volumes_api().put_volume(VOLUME_UUID, request)
    except Exception as e:
        exception_info = e.__dict__
        assert exception_info["status"] == requests.codes[
            "insufficient_storage"]

    # Check that the volume wasn't created.
    volumes = ApiClient.volumes_api().get_volumes()
    assert len(volumes) == 0
def no_of_suitable_pools(volume_pool_topology_labels):
    pool_labels = [
        ApiClient.pools_api().get_pool(POOL_1_UUID)["spec"]["labels"],
        ApiClient.pools_api().get_pool(POOL_2_UUID)["spec"]["labels"],
    ]
    count = 0
    for labels in pool_labels:
        f = True
        for key in volume_pool_topology_labels:
            if not (key in labels
                    and volume_pool_topology_labels[key] == labels[key]):
                f = False
        if f:
            count += 1
    return count
Esempio n. 13
0
def check_get_node_capacity(get_nodes_capacity):
    pool_api = ApiClient.pools_api()

    for i, p in enumerate([POOL1_UUID, POOL2_UUID]):
        pool = pool_api.get_pool(p)
        assert (pool.state.capacity == get_nodes_capacity[i]
                ), "Node pool size does not match reported node capacity"
def an_additional_replica_should_be_added_to_the_volume(replica_ctx):
    """an additional replica should be added to the volume."""
    volume = ApiClient.volumes_api().get_volume(VOLUME_UUID)
    assert hasattr(volume.state, "target")
    nexus = volume.state.target
    assert replica_ctx[REPLICA_CONTEXT_KEY] == len(nexus["children"])
    assert REPLICA_ERROR not in replica_ctx
Esempio n. 15
0
def check_volume_status_published():
    vol = ApiClient.volumes_api().get_volume(VOLUME1_UUID)
    assert str(
        vol.spec.target.protocol) == "nvmf", "Volume protocol mismatches"
    assert vol.state.target["protocol"] == "nvmf", "Volume protocol mismatches"
    assert vol.state.target["deviceUri"].startswith(
        "nvmf://"), "Volume share URI mismatches"
def volume_creation_should_succeed_with_a_returned_volume_object(
        create_request):
    """volume creation should succeed with a returned volume object."""
    expected_spec = VolumeSpec(
        1,
        VOLUME_SIZE,
        SpecStatus("Created"),
        VOLUME_UUID,
        VolumePolicy(False),
    )

    # Check the volume object returned is as expected
    request = create_request[CREATE_REQUEST_KEY]
    volume = ApiClient.volumes_api().put_volume(VOLUME_UUID, request)
    assert str(volume.spec) == str(expected_spec)

    # The key for the replica topology is the replica UUID. This is assigned at replica creation
    # time, so get the replica UUID from the returned volume object, and use this as the key of
    # the expected replica topology.
    expected_replica_toplogy = {}
    for key, value in volume.state.replica_topology.items():
        expected_replica_toplogy[key] = ReplicaTopology(ReplicaState("Online"),
                                                        node="mayastor-1",
                                                        pool=POOL_UUID)
    expected_state = VolumeState(
        VOLUME_SIZE,
        VolumeStatus("Online"),
        VOLUME_UUID,
        expected_replica_toplogy,
    )
    assert str(volume.state) == str(expected_state)
def check_nexus_removed():
    volume = ApiClient.volumes_api().get_volume(VOLUME_UUID)
    assert (not hasattr(volume.state, "target")
            # or it might have been recreated if we lost the "race"...
            or
            NexusState(volume.state.target["state"]) == NexusState("Online") or
            NexusState(volume.state.target["state"]) == NexusState("Degraded"))
Esempio n. 18
0
def publish_to_node_2(background):
    volume = background
    device_uri = volume.state["target"]["deviceUri"]

    try:
        ApiClient.volumes_api().del_volume_target(VOLUME_UUID)
    except ApiException as e:
        # Timeout or node not online
        assert (e.status == http.HTTPStatus.REQUEST_TIMEOUT
                or e.status == http.HTTPStatus.PRECONDITION_FAILED)

    ApiClient.volumes_api().del_volume_target(VOLUME_UUID, force="true")
    volume_updated = ApiClient.volumes_api().put_volume_target(
        VOLUME_UUID, TARGET_NODE_2, Protocol("nvmf"))
    device_uri_2 = volume_updated.state["target"]["deviceUri"]
    assert device_uri != device_uri_2
    return device_uri_2
def a_pool_which_does_not_contain_the_volume_topology_label():
    """a pool which does not contain the volume topology label."""
    volume = ApiClient.volumes_api().get_volume(VOLUME_UUID)
    assert (
        # From the no of suitable pools we get one would be already occupied, thus reduce the count by 1.
        # Since in this scenario, the only pool having topology labels is being used up, we are left with
        # 0 pools having topology labels.
        no_of_suitable_pools(volume["spec"]["topology"]["pool_topology"]
                             ["labelled"]["inclusion"]) - 1 == 0)
def an_existing_published_volume_with_a_topology_not_matching_pool_labels():
    """an existing published volume with a topology not matching pool labels"""
    ApiClient.volumes_api().put_volume(
        VOLUME_UUID,
        CreateVolumeBody(
            VolumePolicy(False),
            1,
            VOLUME_SIZE,
            topology=Topology(pool_topology=PoolTopology(
                labelled=LabelledTopology(
                    exclusion={},
                    inclusion={"pool1-specific-key": "pool1-specific-value"},
                ))),
        ),
    )
    # Publish volume so that there is a nexus to add a replica to.
    ApiClient.volumes_api().put_volume_target(VOLUME_UUID, NODE_1_NAME,
                                              Protocol("nvmf"))
Esempio n. 21
0
def background():
    Deployer.start_with_args([
        "-j",
        "-m=3",
        "-w=10s",
        "--cache-period=1s",
        "--mayastor-env=NEXUS_NVMF_ANA_ENABLE=1,NEXUS_NVMF_RESV_ENABLE=1",
        "--agents-env=TEST_NEXUS_NVMF_ANA_ENABLE=1",
    ])
    ApiClient.pools_api().put_node_pool(
        POOL_NODE, POOL_UUID, CreatePoolBody(["malloc:///disk?size_mb=100"]))
    ApiClient.volumes_api().put_volume(
        VOLUME_UUID, CreateVolumeBody(VolumePolicy(False), 1, VOLUME_SIZE))
    volume = ApiClient.volumes_api().put_volume_target(VOLUME_UUID,
                                                       TARGET_NODE_1,
                                                       Protocol("nvmf"))
    yield volume
    Deployer.stop()
def check_replicas_online():
    volume = ApiClient.volumes_api().get_volume(VOLUME_UUID)
    online_replicas = list(
        filter(
            lambda uuid: str(volume.state.replica_topology[uuid].state) ==
            "Online",
            list(volume.state.replica_topology),
        ))
    assert len(online_replicas) > 0
def publishing_the_volume_should_succeed_with_a_returned_volume_object_containing_the_share_uri(
):
    """publishing the volume should succeed with a returned volume object containing the share URI."""
    volume = ApiClient.volumes_api().put_volume_target(VOLUME_UUID, NODE_NAME,
                                                       Protocol("nvmf"))
    assert hasattr(volume.spec, "target")
    assert str(volume.spec.target.protocol) == str(Protocol("nvmf"))
    assert hasattr(volume.state, "target")
    assert "nvmf://" in volume.state.target["deviceUri"]
def the_number_of_suitable_pools_is_less_than_the_number_of_desired_volume_replicas(
    create_request, ):
    """the number of suitable pools is less than the number of desired volume replicas."""
    # Delete the pool so that there aren't enough
    pools_api = ApiClient.pools_api()
    pools_api.del_pool(POOL_UUID)
    num_pools = len(pools_api.get_pools())
    num_volume_replicas = create_request[CREATE_REQUEST_KEY]["replicas"]
    assert num_pools < num_volume_replicas
def setting_the_number_of_replicas_to_zero_should_fail_with_a_suitable_error():
    """the replica removal should fail with a suitable error."""
    volumes_api = ApiClient.volumes_api()
    volume = volumes_api.get_volume(VOLUME_UUID)
    assert hasattr(volume.state, "target")
    try:
        volumes_api.put_volume_replica_count(VOLUME_UUID, 0)
    except Exception as e:
        # TODO: Return a proper error rather than asserting for a substring
        assert "ApiValueError" in str(type(e))
def no_available_pools_for_replacement_replicas():
    """no available pools for replacement replicas."""
    pool_api = ApiClient.pools_api()
    pools = pool_api.get_pools()
    assert len(pools) == 3

    # Delete the additional pool so that a replacement replica cannot be created.
    pool_api.del_pool(POOL_3_UUID)
    pools = pool_api.get_pools()
    assert len(pools) == 2
Esempio n. 27
0
def check_orphaned_replica():
    # There should only be one replica remaining - the one on the node that is inaccessible.
    replicas = ApiClient.specs_api().get_specs()["replicas"]
    assert len(replicas) == 1

    # Check that the replica is an orphan (i.e. it is managed but does not have any owners).
    replica = replicas[0]
    assert replica["managed"]
    assert len(replica["owners"]["nexuses"]) == 0
    assert "volume" not in replica["owners"]
Esempio n. 28
0
def a_replica_which_is_managed_but_does_not_have_any_owners():
    """a replica which is managed but does not have any owners."""

    # Kill the Mayastor instance which does not host the nexus.
    Docker.kill_container(MAYASTOR_2)

    # Attempt to delete the volume. This will leave a replica behind on the node that is
    # inaccessible.
    try:
        ApiClient.volumes_api().del_volume(VOLUME_UUID)
    except Exception as e:
        # A Mayastor node is inaccessible, so deleting the volume will fail because the replica
        # on this node cannot be destroyed. Attempting to do so results in a timeout. This is
        # expected and results in a replica being orphaned.
        exception_info = e.__dict__
        assert exception_info["status"] == requests.codes["request_timeout"]
        pass

    check_orphaned_replica()
Esempio n. 29
0
def the_replica_on_the_inaccessible_node_should_become_orphaned():
    """the replica on the inaccessible node should become orphaned."""
    replicas = ApiClient.specs_api().get_specs()["replicas"]
    assert len(replicas) == 1

    # The replica is orphaned if it doesn't have any owners.
    replica = replicas[0]
    assert replica["managed"]
    assert len(replica["owners"]["nexuses"]) == 0
    assert "volume" not in replica["owners"]
Esempio n. 30
0
def populate_published_2_replica_volume(_create_2_replica_nvmf_volume):
    do_publish_volume(VOLUME4_UUID, NODE1)

    # Make sure volume is published.
    volume = ApiClient.volumes_api().get_volume(VOLUME4_UUID)
    assert (str(volume.spec.target.protocol) == "nvmf"
            ), "Protocol mismatches for published volume"
    assert (volume.state.target["protocol"] == "nvmf"
            ), "Protocol mismatches for published volume"
    return volume