def the_volume_should_be_deleted(): """the volume should be deleted.""" try: ApiClient.volumes_api().get_volume(VOLUME_UUID) except Exception as e: exception_info = e.__dict__ assert exception_info["status"] == requests.codes["not_found"]
def init(create_pool_disk_images): # Shorten the reconcile periods and cache period to speed up the tests. Deployer.start_with_args( [ "-j", "-m=2", "-w=10s", "--reconcile-idle-period=500ms", "--reconcile-period=500ms", "--cache-period=1s", ] ) # Create pools ApiClient.pools_api().put_node_pool( MAYASTOR_1, POOL1_UUID, CreatePoolBody(["aio:///host/tmp/{}".format(POOL_DISK1)]), ) ApiClient.pools_api().put_node_pool( MAYASTOR_2, POOL2_UUID, CreatePoolBody(["aio:///host/tmp/{}".format(POOL_DISK2)]), ) # Create and publish a volume on node 1 request = CreateVolumeBody(VolumePolicy(False), NUM_VOLUME_REPLICAS, VOLUME_SIZE) ApiClient.volumes_api().put_volume(VOLUME_UUID, request) ApiClient.volumes_api().put_volume_target(VOLUME_UUID, MAYASTOR_1, Protocol("nvmf")) yield Deployer.stop()
def init(): Deployer.start(1) ApiClient.pools_api().put_node_pool( NODE_NAME, POOL_UUID, CreatePoolBody(["malloc:///disk?size_mb=50"])) ApiClient.volumes_api().put_volume( VOLUME_UUID, CreateVolumeBody(VolumePolicy(False), 1, VOLUME_SIZE)) yield Deployer.stop()
def unpublishing_the_volume_should_return_an_already_unpublished_error(): """unpublishing the volume should return an already unpublished error.""" try: ApiClient.volumes_api().del_volume_target(VOLUME_UUID) except Exception as e: exception_info = e.__dict__ assert exception_info["status"] == requests.codes[ "precondition_failed"] assert "NotPublished" in exception_info["body"]
def publishing_the_volume_should_return_an_already_published_error(): """publishing the volume should return an already published error.""" try: ApiClient.volumes_api().put_volume_target(VOLUME_UUID, NODE_NAME, Protocol("nvmf")) except Exception as e: exception_info = e.__dict__ assert exception_info["status"] == requests.codes[ "precondition_failed"] assert "AlreadyPublished" in exception_info["body"]
def a_published_selfhealing_volume(): """a published self-healing volume.""" request = CreateVolumeBody( VolumePolicy(True), NUM_VOLUME_REPLICAS, VOLUME_SIZE, topology=Topology(pool_topology=PoolTopology(labelled=LabelledTopology( exclusion={}, inclusion={"node": MAYASTOR_2}))), ) ApiClient.volumes_api().put_volume(VOLUME_UUID, request) ApiClient.volumes_api().put_volume_target(VOLUME_UUID, MAYASTOR_1, Protocol("nvmf"))
def init(): Deployer.start(2) ApiClient.pools_api().put_node_pool( NODE1_NAME, POOL1_UUID, CreatePoolBody(["malloc:///disk?size_mb=50"]) ) ApiClient.pools_api().put_node_pool( NODE2_NAME, POOL2_UUID, CreatePoolBody(["malloc:///disk?size_mb=50"]) ) ApiClient.volumes_api().put_volume( VOLUME_UUID, CreateVolumeBody(VolumePolicy(False), 2, 10485761) ) yield Deployer.stop()
def an_existing_published_volume_without_pool_topology(): """an existing published volume without pool topology""" ApiClient.volumes_api().put_volume( VOLUME_UUID, CreateVolumeBody( VolumePolicy(False), 1, VOLUME_SIZE, ), ) # Publish volume so that there is a nexus to add a replica to. ApiClient.volumes_api().put_volume_target(VOLUME_UUID, NODE_1_NAME, Protocol("nvmf"))
def volume_creation_should_fail_with_an_insufficient_storage_error( create_request): """volume creation should fail with an insufficient storage error.""" request = create_request[CREATE_REQUEST_KEY] try: ApiClient.volumes_api().put_volume(VOLUME_UUID, request) except Exception as e: exception_info = e.__dict__ assert exception_info["status"] == requests.codes[ "insufficient_storage"] # Check that the volume wasn't created. volumes = ApiClient.volumes_api().get_volumes() assert len(volumes) == 0
def check_nexus_removed(): volume = ApiClient.volumes_api().get_volume(VOLUME_UUID) assert (not hasattr(volume.state, "target") # or it might have been recreated if we lost the "race"... or NexusState(volume.state.target["state"]) == NexusState("Online") or NexusState(volume.state.target["state"]) == NexusState("Degraded"))
def volume_creation_should_succeed_with_a_returned_volume_object( create_request): """volume creation should succeed with a returned volume object.""" expected_spec = VolumeSpec( 1, VOLUME_SIZE, SpecStatus("Created"), VOLUME_UUID, VolumePolicy(False), ) # Check the volume object returned is as expected request = create_request[CREATE_REQUEST_KEY] volume = ApiClient.volumes_api().put_volume(VOLUME_UUID, request) assert str(volume.spec) == str(expected_spec) # The key for the replica topology is the replica UUID. This is assigned at replica creation # time, so get the replica UUID from the returned volume object, and use this as the key of # the expected replica topology. expected_replica_toplogy = {} for key, value in volume.state.replica_topology.items(): expected_replica_toplogy[key] = ReplicaTopology(ReplicaState("Online"), node="mayastor-1", pool=POOL_UUID) expected_state = VolumeState( VOLUME_SIZE, VolumeStatus("Online"), VOLUME_UUID, expected_replica_toplogy, ) assert str(volume.state) == str(expected_state)
def check_volume_status_published(): vol = ApiClient.volumes_api().get_volume(VOLUME1_UUID) assert str( vol.spec.target.protocol) == "nvmf", "Volume protocol mismatches" assert vol.state.target["protocol"] == "nvmf", "Volume protocol mismatches" assert vol.state.target["deviceUri"].startswith( "nvmf://"), "Volume share URI mismatches"
def an_additional_replica_should_be_added_to_the_volume(replica_ctx): """an additional replica should be added to the volume.""" volume = ApiClient.volumes_api().get_volume(VOLUME_UUID) assert hasattr(volume.state, "target") nexus = volume.state.target assert replica_ctx[REPLICA_CONTEXT_KEY] == len(nexus["children"]) assert REPLICA_ERROR not in replica_ctx
def pool_labels_must_not_contain_the_volume_topology_labels(): """pool labels must not contain the volume topology labels.""" volume = ApiClient.volumes_api().get_volume(VOLUME_UUID) assert (common_labels( volume["spec"]["topology"]["pool_topology"]["labelled"]["inclusion"], ApiClient.pools_api().get_pool(POOL_2_UUID), ) == 0)
def publish_to_node_2(background): volume = background device_uri = volume.state["target"]["deviceUri"] try: ApiClient.volumes_api().del_volume_target(VOLUME_UUID) except ApiException as e: # Timeout or node not online assert (e.status == http.HTTPStatus.REQUEST_TIMEOUT or e.status == http.HTTPStatus.PRECONDITION_FAILED) ApiClient.volumes_api().del_volume_target(VOLUME_UUID, force="true") volume_updated = ApiClient.volumes_api().put_volume_target( VOLUME_UUID, TARGET_NODE_2, Protocol("nvmf")) device_uri_2 = volume_updated.state["target"]["deviceUri"] assert device_uri != device_uri_2 return device_uri_2
def publishing_the_volume_should_succeed_with_a_returned_volume_object_containing_the_share_uri( ): """publishing the volume should succeed with a returned volume object containing the share URI.""" volume = ApiClient.volumes_api().put_volume_target(VOLUME_UUID, NODE_NAME, Protocol("nvmf")) assert hasattr(volume.spec, "target") assert str(volume.spec.target.protocol) == str(Protocol("nvmf")) assert hasattr(volume.state, "target") assert "nvmf://" in volume.state.target["deviceUri"]
def a_pool_which_does_not_contain_the_volume_topology_label(): """a pool which does not contain the volume topology label.""" volume = ApiClient.volumes_api().get_volume(VOLUME_UUID) assert ( # From the no of suitable pools we get one would be already occupied, thus reduce the count by 1. # Since in this scenario, the only pool having topology labels is being used up, we are left with # 0 pools having topology labels. no_of_suitable_pools(volume["spec"]["topology"]["pool_topology"] ["labelled"]["inclusion"]) - 1 == 0)
def check_replicas_online(): volume = ApiClient.volumes_api().get_volume(VOLUME_UUID) online_replicas = list( filter( lambda uuid: str(volume.state.replica_topology[uuid].state) == "Online", list(volume.state.replica_topology), )) assert len(online_replicas) > 0
def an_existing_published_volume_with_a_topology_not_matching_pool_labels(): """an existing published volume with a topology not matching pool labels""" ApiClient.volumes_api().put_volume( VOLUME_UUID, CreateVolumeBody( VolumePolicy(False), 1, VOLUME_SIZE, topology=Topology(pool_topology=PoolTopology( labelled=LabelledTopology( exclusion={}, inclusion={"pool1-specific-key": "pool1-specific-value"}, ))), ), ) # Publish volume so that there is a nexus to add a replica to. ApiClient.volumes_api().put_volume_target(VOLUME_UUID, NODE_1_NAME, Protocol("nvmf"))
def background(): Deployer.start_with_args([ "-j", "-m=3", "-w=10s", "--cache-period=1s", "--mayastor-env=NEXUS_NVMF_ANA_ENABLE=1,NEXUS_NVMF_RESV_ENABLE=1", "--agents-env=TEST_NEXUS_NVMF_ANA_ENABLE=1", ]) ApiClient.pools_api().put_node_pool( POOL_NODE, POOL_UUID, CreatePoolBody(["malloc:///disk?size_mb=100"])) ApiClient.volumes_api().put_volume( VOLUME_UUID, CreateVolumeBody(VolumePolicy(False), 1, VOLUME_SIZE)) volume = ApiClient.volumes_api().put_volume_target(VOLUME_UUID, TARGET_NODE_1, Protocol("nvmf")) yield volume Deployer.stop()
def setting_the_number_of_replicas_to_zero_should_fail_with_a_suitable_error(): """the replica removal should fail with a suitable error.""" volumes_api = ApiClient.volumes_api() volume = volumes_api.get_volume(VOLUME_UUID) assert hasattr(volume.state, "target") try: volumes_api.put_volume_replica_count(VOLUME_UUID, 0) except Exception as e: # TODO: Return a proper error rather than asserting for a substring assert "ApiValueError" in str(type(e))
def a_replica_which_is_managed_but_does_not_have_any_owners(): """a replica which is managed but does not have any owners.""" # Kill the Mayastor instance which does not host the nexus. Docker.kill_container(MAYASTOR_2) # Attempt to delete the volume. This will leave a replica behind on the node that is # inaccessible. try: ApiClient.volumes_api().del_volume(VOLUME_UUID) except Exception as e: # A Mayastor node is inaccessible, so deleting the volume will fail because the replica # on this node cannot be destroyed. Attempting to do so results in a timeout. This is # expected and results in a replica being orphaned. exception_info = e.__dict__ assert exception_info["status"] == requests.codes["request_timeout"] pass check_orphaned_replica()
def populate_published_2_replica_volume(_create_2_replica_nvmf_volume): do_publish_volume(VOLUME4_UUID, NODE1) # Make sure volume is published. volume = ApiClient.volumes_api().get_volume(VOLUME4_UUID) assert (str(volume.spec.target.protocol) == "nvmf" ), "Protocol mismatches for published volume" assert (volume.state.target["protocol"] == "nvmf" ), "Protocol mismatches for published volume" return volume
def init_resources(): ApiClient.pools_api().put_node_pool( NODE_1_NAME, POOL_1_UUID, CreatePoolBody(["malloc:///disk?size_mb=50"])) ApiClient.pools_api().put_node_pool( NODE_2_NAME, POOL_2_UUID, CreatePoolBody(["malloc:///disk?size_mb=50"])) ApiClient.volumes_api().put_volume( VOLUME_UUID, CreateVolumeBody(VolumePolicy(True), NUM_VOLUME_REPLICAS, VOLUME_SIZE), ) ApiClient.pools_api().put_node_pool( NODE_3_NAME, POOL_3_UUID, CreatePoolBody(["malloc:///disk?size_mb=50"])) # Publish volume so that there is a nexus to add a replica to. volume = ApiClient.volumes_api().put_volume_target(VOLUME_UUID, NODE_1_NAME, Protocol("nvmf")) assert hasattr(volume.spec, "target") assert str(volume.spec.target.protocol) == str(Protocol("nvmf"))
def a_user_attempts_to_increase_the_number_of_volume_replicas(replica_ctx): """a user attempts to increase the number of volume replicas.""" volumes_api = ApiClient.volumes_api() volume = volumes_api.get_volume(VOLUME_UUID) num_replicas = volume.spec.num_replicas try: volume = volumes_api.put_volume_replica_count(VOLUME_UUID, num_replicas + 1) replica_ctx[REPLICA_CONTEXT_KEY] = volume.spec.num_replicas except Exception as e: replica_ctx[REPLICA_ERROR] = e
def volume_creation_should_succeed_with_a_returned_volume_object_without_pool_topology( create_request, ): """volume creation should succeed with a returned volume object without pool topology.""" expected_spec = VolumeSpec( 1, VOLUME_SIZE, SpecStatus("Created"), VOLUME_UUID, VolumePolicy(False), ) # Check the volume object returned is as expected request = create_request[CREATE_REQUEST_KEY] volume = ApiClient.volumes_api().put_volume(VOLUME_UUID, request) assert str(volume.spec) == str(expected_spec) assert str(volume.state["status"]) == "Online"
def volume_creation_should_succeed_with_a_returned_volume_object_with_topology( create_request, ): """volume creation should succeed with a returned volume object with topology.""" expected_spec = VolumeSpec( 1, VOLUME_SIZE, SpecStatus("Created"), VOLUME_UUID, VolumePolicy(False), topology=Topology(pool_topology=PoolTopology(labelled=LabelledTopology( exclusion={}, inclusion={"openebs.io/created-by": "msp-operator" }))), ) # Check the volume object returned is as expected request = create_request[CREATE_REQUEST_KEY] volume = ApiClient.volumes_api().put_volume(VOLUME_UUID, request) assert str(volume.spec) == str(expected_spec) assert str(volume.state["status"]) == "Online"
def start_stop_ms1(): docker_client = docker.from_env() try: node1 = docker_client.containers.list(all=True, filters={"name": NODE1})[0] except docker.errors.NotFound: raise Exception("No Mayastor instance found that hosts the nexus") # Stop the nexus node and wait till nexus offline status is also reflected in volume target info. # Wait at most 60 seconds. node1.stop() state_synced = False for i in range(12): vol = ApiClient.volumes_api().get_volume(VOLUME4_UUID) if getattr(vol.state, "target", None) is None: state_synced = True break sleep(5) assert state_synced, "Nexus failure is not reflected in volume target info" yield node1.start()
def set_num_volume_replicas(num_replicas): volumes_api = ApiClient.volumes_api() volume = volumes_api.put_volume_replica_count(VOLUME_UUID, num_replicas) return volume
def num_desired_volume_replicas(): volume = ApiClient.volumes_api().get_volume(VOLUME_UUID) return volume.spec.num_replicas