Ejemplo n.º 1
0
def _values(request):

    global kubeconfig_value, clusterconfig_value, namespace_value
    kubeconfig_value, clusterconfig_value, operator_namespace, test_namespace, _, operator_yaml = scaleop.get_cmd_values(
        request)
    namespace_value = operator_namespace
    condition = scaleop.check_ns_exists(kubeconfig_value, namespace_value)
    operator = scaleop.Scaleoperator(kubeconfig_value, namespace_value,
                                     operator_yaml)
    read_file = scaleop.read_operator_data(clusterconfig_value,
                                           namespace_value)
    ff.cred_check(read_file)
    fileset_exist = ff.fileset_exists(read_file)
    operator.create()
    operator.check()
    scaleop.check_nodes_available(read_file["pluginNodeSelector"],
                                  "pluginNodeSelector")
    scaleop.check_nodes_available(read_file["provisionerNodeSelector"],
                                  "provisionerNodeSelector")
    scaleop.check_nodes_available(read_file["attacherNodeSelector"],
                                  "attacherNodeSelector")

    yield
    operator.delete(condition)
    if (not (fileset_exist) and ff.fileset_exists(read_file)):
        ff.delete_fileset(read_file)
def _values(request):

    global kubeconfig_value, clusterconfig_value, namespace_value
    kubeconfig_value = request.config.option.kubeconfig
    if kubeconfig_value is None:
        kubeconfig_value = "~/.kube/config"
    clusterconfig_value = request.config.option.clusterconfig
    if clusterconfig_value is None:
        clusterconfig_value = "../../operator/deploy/crds/csiscaleoperators.csi.ibm.com_cr.yaml"
    namespace_value = request.config.option.namespace
    if namespace_value is None:
        namespace_value = "ibm-spectrum-scale-csi-driver"
    operator = Scaleoperator(kubeconfig_value)
    read_file = read_scale_config_file(clusterconfig_value, namespace_value)
    operator.create(namespace_value, read_file)
    operator.check()
    check_nodes_available(read_file["pluginNodeSelector"],
                          "pluginNodeSelector")
    check_nodes_available(read_file["provisionerNodeSelector"],
                          "provisionerNodeSelector")
    check_nodes_available(read_file["attacherNodeSelector"],
                          "attacherNodeSelector")

    yield
    operator.delete()
    if (ff.fileset_exists(read_file)):
        ff.delete_fileset(read_file)
def values(request):
    global data, remote_data, snapshot_object, kubeconfig_value  # are required in every testcase
    kubeconfig_value, clusterconfig_value, namespace_value, runslow_val = scaleop.get_cmd_values(request)

    data = scaleop.read_driver_data(clusterconfig_value, namespace_value)
    operator_data = scaleop.read_operator_data(clusterconfig_value, namespace_value)
    keep_objects = data["keepobjects"]
    if not("remote" in data):
        LOGGER.error("remote data is not provided in cr file")
        assert False
    test_namespace = namespace_value

    remote_data = get_remote_data(data)
    ff.cred_check(data)
    ff.cred_check(remote_data)
    ff.set_data(remote_data)

    operator = scaleop.Scaleoperator(kubeconfig_value, namespace_value)
    operator_object = scaleop.Scaleoperatorobject(operator_data, kubeconfig_value)
    condition = scaleop.check_ns_exists(kubeconfig_value, namespace_value)
    if condition is True:
        if not(operator_object.check()):
            LOGGER.error("Operator custom object is not deployed succesfully")
            assert False
    else:
        operator.create()
        operator.check()
        scaleop.check_nodes_available(operator_data["pluginNodeSelector"], "pluginNodeSelector")
        scaleop.check_nodes_available(
            operator_data["provisionerNodeSelector"], "provisionerNodeSelector")
        scaleop.check_nodes_available(
            operator_data["attacherNodeSelector"], "attacherNodeSelector")
        operator_object.create()
        val = operator_object.check()
        if val is True:
            LOGGER.info("Operator custom object is deployed succesfully")
        else:
            LOGGER.error("Operator custom object is not deployed succesfully")
            assert False
    if runslow_val:
        value_pvc = [{"access_modes": "ReadWriteMany", "storage": "1Gi"},
                     {"access_modes": "ReadWriteOnce", "storage": "1Gi"}]
    else:
        value_pvc = [{"access_modes": "ReadWriteMany", "storage": "1Gi"}]
    value_vs_class = {"deletionPolicy": "Delete"}
    number_of_snapshots = 1
    snapshot_object = scaleop.Snapshot(kubeconfig_value, test_namespace, keep_objects, value_pvc, value_vs_class,
                               number_of_snapshots, data["image_name"], remote_data["id"])
    ff.create_dir(remote_data["volDirBasePath"])
    yield
    if condition is False and not(keep_objects):
        operator_object.delete()
        operator.delete()
        if(ff.fileset_exists(data)):
            ff.delete_fileset(data)