Beispiel #1
0
 def __init__(self, logger, **kwargs):
     """
     :param **kwargs:
         reserved python word for unlimited parameters
         keys should only include: token, host
     :type **kwargs: dict
     """
     self.api_client = ApiClient(**kwargs)
     self.cluster_client = ClusterApi(self.api_client)
     self.libraries_client = LibrariesApi(self.api_client)
     self.logger = logger
Beispiel #2
0
def get_cli(api_client, cluster_id, cluster_name):
    """
    Retrieves metadata about a cluster.
    """
    if cluster_id:
        cluster = ClusterApi(api_client).get_cluster(cluster_id)
    elif cluster_name:
        cluster = ClusterApi(api_client).get_cluster_by_name(cluster_name)
    else:
        raise RuntimeError('cluster_name and cluster_id were empty?')

    click.echo(pretty_format(cluster))
Beispiel #3
0
class SdkClient():
    def __init__(self, profile=None):
        client = utils.get_api_client(profile)
        self.cluster_client = ClusterApi(client)
        self.jobs_client = JobsApi(client)

    def list_clusters(self):
        return self.cluster_client.list_clusters()

    def get_cluster(self, cluster_id):
        return self.cluster_client.get_cluster(cluster_id)

    def list_jobs(self):
        return self.jobs_client.list_jobs()
Beispiel #4
0
def restart_cli(api_client, cluster_id):
    """
    Restarts a Databricks cluster given its ID.

    If the cluster is not currently in a RUNNING state, nothing will happen
    """
    ClusterApi(api_client).restart_cluster(cluster_id)
Beispiel #5
0
def list_zones_cli(api_client):
    """
    Lists zones where clusters can be created.

    The output format is specified in
    https://docs.databricks.com/api/latest/clusters.html#list-zones
    """
    click.echo(pretty_format(ClusterApi(api_client).list_zones()))
def create_cli(api_client, json_file, json):
    """
    Creates a Databricks cluster.

    The specification for the request json can be found at
    https://docs.databricks.com/api/latest/clusters.html#create
    """
    json_cli_base(json_file, json, lambda json: ClusterApi(api_client).create_cluster(json))
Beispiel #7
0
def list_node_types_cli(api_client):
    """
    Lists possible node types for a cluster.

    The output format is specified in
    https://docs.databricks.com/api/latest/clusters.html#list-node-types
    """
    click.echo(pretty_format(ClusterApi(api_client).list_node_types()))
Beispiel #8
0
def spark_versions_cli(api_client):
    """
    Lists possible Databricks Runtime versions for a cluster.

    The output format is specified in
    https://docs.databricks.com/api/latest/clusters.html#spark-versions
    """
    click.echo(pretty_format(ClusterApi(api_client).spark_versions()))
Beispiel #9
0
def permanent_delete_cli(api_client, cluster_id):
    """
    Permanently deletes a Spark cluster.

    If the cluster is running, it is terminated and its resources are asynchronously removed.
    If the cluster is terminated, then it is immediately removed.
    """
    ClusterApi(api_client).permanent_delete(cluster_id)
Beispiel #10
0
def start_cli(api_client, cluster_id):
    """
    Starts a terminated Databricks cluster given its ID.

    If the cluster is not currently in a TERMINATED state, nothing will happen.

    """
    ClusterApi(api_client).start_cluster(cluster_id)
Beispiel #11
0
def resize_cli(api_client, cluster_id, num_workers):
    """Resizes a Databricks cluster given its ID.

    Provide a `--num-workers` parameter to indicate the new cluster size.

    If the cluster is not currently in a RUNNING state, this will cause an
    error to occur.
    """
    ClusterApi(api_client).resize_cluster(cluster_id, num_workers)
Beispiel #12
0
def delete_cli(api_client, cluster_id):
    """
    Removes a Databricks cluster given its ID.

    The cluster is removed asynchronously. Once the deletion has completed,
    the cluster will be in a TERMINATED state. If the cluster is already in
    a TERMINATING or TERMINATED state, nothing will happen.

    Use ``databricks clusters get --cluster-id CLUSTER_ID`` to check termination states.
    """
    ClusterApi(api_client).delete_cluster(cluster_id)
Beispiel #13
0
def cluster_events_cli(api_client, cluster_id, start_time, end_time, order, event_type, offset,
                       limit, output):
    """
    Gets events for a Spark cluster.
    """
    events_json = ClusterApi(api_client).get_events(
        cluster_id=cluster_id, start_time=start_time, end_time=end_time, order=order,
        event_types=event_type, offset=offset, limit=limit)
    if OutputClickType.is_json(output):
        click.echo(pretty_format(events_json))
    else:
        click.echo(tabulate(_cluster_events_to_table(events_json), tablefmt='plain'))
Beispiel #14
0
def edit_cli(api_client, json_file, json):
    """
    Edits a Databricks cluster.

    The specification for the request json can be found at
    https://docs.databricks.com/api/latest/clusters.html#edit
    """
    if not bool(json_file) ^ bool(json):
        raise RuntimeError('Either --json-file or --json should be provided')
    if json_file:
        with open(json_file, 'r') as f:
            json = f.read()
    deser_json = json_loads(json)
    ClusterApi(api_client).edit_cluster(deser_json)
def get_cluster_state(profile, cluster_id):
    """Get cluster state
    
    Args:
        profile (str): Databricks CLI profile string
        cluster_id (str): Cluster ID
    
    Returns:
        dict: ClusterStatus
    """
    apiclient = connect(profile)
    try:
        client = ClusterApi(apiclient)
        cluster = client.get_cluster(cluster_id)
        state = cluster.get("state", None)
    except:
        _logger.warning("DbStatusHandler cluster not reachable")
        state = "TERMINATED"
    # if state == "TERMINATED":
    #     _logger.warning("DbStatusHandler cluster state = %s" % state)
    # else:
    #     _logger.info("DbStatusHandler cluster state = %s" % state)

    return state
Beispiel #16
0
def list_cli(api_client, output):
    """
    Lists active and recently terminated clusters.

    Returns information about all currently active clusters, and up
    to 100 most recently terminated clusters in the past 7 days.

    By default the output format will be a human readable table with the following fields

      - Cluster ID

      - Cluster name

      - Cluster state
    """
    clusters_json = ClusterApi(api_client).list_clusters()
    if OutputClickType.is_json(output):
        click.echo(pretty_format(clusters_json))
    else:
        click.echo(tabulate(_clusters_to_table(clusters_json), tablefmt='plain'))
def tgt_cluster_api(tgt_api_client:ApiClient):
    return ClusterApi(tgt_api_client)
Beispiel #18
0
class ClusterManagement:
    def __init__(self, logger, **kwargs):
        """
        :param **kwargs:
            reserved python word for unlimited parameters
            keys should only include: token, host
        :type **kwargs: dict
        """
        self.api_client = ApiClient(**kwargs)
        self.cluster_client = ClusterApi(self.api_client)
        self.libraries_client = LibrariesApi(self.api_client)
        self.logger = logger

    def create_cluster(self, cluster_specs):
        """function to build/edit cluster and start

        :param cluster_specs: cluster specs in clusterconf.yaml
        :type cluster_specs: dict
        """
        # self.cluster_client.get_cluster_by_name("unknown")

        try:
            cluster = self.cluster_client.get_cluster_by_name(
                cluster_specs["cluster_name"])

            self.logger.info(f"cluster {cluster['cluster_name']} exists "
                             f"with id {cluster['cluster_id']}")
            self.logger.debug(cluster_specs)
            self.logger.debug(cluster)

            if not cluster_specs.items() <= cluster.items():
                self.logger.warning(
                    "cluster spec doesn't match existing cluster")

                cluster_specs['cluster_id'] = cluster['cluster_id']
                self.cluster_client.edit_cluster(cluster_specs)
            else:
                self.logger.info("cluster spec matches")
        except Exception:
            cluster = self.cluster_client.create_cluster(cluster_specs)
            self.logger.info(f"the cluster {cluster} is being created")
            time.sleep(30)

        cluster_id = cluster['cluster_id']
        status = self._cluster_status(cluster_id)

        while status['state'] in ["RESTARTING", "RESIZING", "TERMINATING"]:
            self.logger.info(
                f"waiting for the cluster. status {status['state']}")
            time.sleep(10)
            status = self._cluster_status(cluster_id)

        while status['state'] in ["TERMINATED", "PENDING"]:
            self.logger.info(f"cluster status {status['state']}")
            if status['state'] == "TERMINATED":
                self.logger.info(f"starting cluster, status {status['state']}")
                self.cluster_client.start_cluster(cluster_id)

            time.sleep(10)
            status = self._cluster_status(cluster_id)

        self.logger.info(f"cluster is up. final status: {status['state']}")

        return cluster_id

    def install_cluster_library(self, cluster_id, cluster_libraries):
        """function to install libraries on cluster

        :param cluster_id: id of cluster in Databricks to install libs on
        :type cluster_id: str
        :param cluster_libraries: clusterlib.yaml
        :type cluster_libraries: list(dict)
        """
        try:
            if not isinstance(cluster_libraries, list):
                raise ValueError(
                    f"cluster_libraries is not a list: {cluster_libraries}")

            current_libs = self.libraries_client.cluster_status(cluster_id)

            # parse the libs to match the yaml
            parsed_currentlibs = []
            if current_libs.get("library_statuses"):
                for lib in current_libs["library_statuses"]:
                    parsed_currentlibs.append(lib["library"])

            install_libs = [
                x for x in cluster_libraries if x not in parsed_currentlibs
            ]
            self.logger.info(f"install libraries: {install_libs}")
            self.libraries_client.install_libraries(cluster_id, install_libs)

            uninstall_libs = [
                x for x in parsed_currentlibs if x not in cluster_libraries
            ]
            self.logger.warning(f"uninstall libraries: {uninstall_libs}")
            self.libraries_client.uninstall_libraries(cluster_id,
                                                      uninstall_libs)

        except Exception as error:
            self.logger.error(f"install_cluster_library error: {repr(error)}")

    def _cluster_status(self, cluster_id):
        """internal method to get cluster status

        :param cluster_id: id of databricks cluster
        :type cluster_id: str
        """
        try:
            status = self.cluster_client.get_cluster(cluster_id)
            return status
        except Exception as error:
            self.logger.error(f"cluster status error: {error}")

    def delete_unmanaged_clusters(self, cluster_config):
        """function to delete clusters that are not in clusterconf.yaml

        :param cluster_config: clusterconf.yaml
        :type cluster_config: list(dict)
        """
        existing_clusters = self.cluster_client.list_clusters()
        if existing_clusters.get("clusters"):
            existing_clusters = [
                c for c in existing_clusters.get("clusters")
                if c["cluster_source"].upper() != "JOB"
            ]
        self.logger.debug(existing_clusters)

        cluster_list = [c["cluster_name"] for c in cluster_config]
        remove_cluster = [(c["cluster_name"], c["cluster_id"])
                          for c in existing_clusters
                          if c["cluster_name"] not in cluster_list]

        self.logger.warning("removing unmanaged clusters:")
        self.logger.warning(remove_cluster)

        for c in remove_cluster:
            self.logger.debug(f"deleting {c[1]}")
            self.cluster_client.permanent_delete(c[1])

        return

    def main(self, cluster_specs, cluster_libraries):
        """main method to build/edit clusters and install libs

        :cluster_spec: cluster spec in clusterconf.yaml
        :type cluster_spec: dict
        :param cluster_libraries: clusterlib.yaml
        :type cluster_libraries: list(dict)
        """
        # self.logger.info("=======================================================")
        self.logger.info(
            f"create/update cluster: {cluster_specs['cluster_name']}")
        cluster_id = self.create_cluster(cluster_specs)

        self.logger.info("installing libraries")
        self.install_cluster_library(cluster_id, cluster_libraries)
Beispiel #19
0
                       overwrite=args.overwrite)
    print("Created: {}".format(myegg))

    ############################
    # Interact with Databricks:
    ############################

    # first make sure you are using the correct profile and connecting to the intended workspace
    my_api_client = _get_api_client(
        ProfileConfigProvider(args.profile).get_config())

    # Create a cluster if flagged
    if args.create_cluster:
        # treat args.cluster_id as the name, because if you create a cluster, you do not know its id yet.
        DEFAULT_CLUSTER_CONFIG["cluster_name"] = args.cluster_id
        cluster_info = ClusterApi(my_api_client).create_cluster(
            DEFAULT_CLUSTER_CONFIG)
        args.cluster_id = cluster_info["cluster_id"]
        print("Creating a new cluster with name {}. New cluster_id={}".format(
            DEFAULT_CLUSTER_CONFIG["cluster_name"], args.cluster_id))

    # Upload the egg:
    upload_path = Path(args.dbfs_path, args.eggname).as_posix()

    # Check if file exists to alert user.
    print("Uploading {} to databricks at {}".format(args.eggname, upload_path))
    if dbfs_file_exists(my_api_client, upload_path):
        if args.overwrite:
            print("Overwriting file at {}".format(upload_path))
        else:
            raise IOError("""
            {} already exists on databricks cluster. 
def configure_ssh(profile, cluster_id):
    """Configure SSH for the remote cluster

    Args:
        profile (str): Databricks CLI profile string
        host (str): host from databricks cli config for given profile string
        token (str): token from databricks cli config for given profile string
        cluster_id (str): cluster ID
    """
    sshkey_file = os.path.expanduser("~/.ssh/id_%s" % profile)
    if not os.path.exists(sshkey_file):
        print("\n   => ssh key '%s' does not exist" % sshkey_file)
        answer = input("   => Shall it be created (y/n)? (default = n): ")
        if answer.lower() == "y":
            print("   => Creating ssh key %s" % sshkey_file)
            result = execute(
                ["ssh-keygen", "-b", "2048", "-N", "", "-f", sshkey_file])
            if result["returncode"] == 0:
                print_ok("   => OK")
            else:
                print_error(result["stderr"])
                bye()
        else:
            bye()
    else:
        print_ok("\n   => ssh key '%s' already exists" % sshkey_file)

    with open(sshkey_file + ".pub", "r") as fd:
        sshkey = fd.read().strip()

    try:
        apiclient = connect(profile)
        client = ClusterApi(apiclient)
    except Exception as ex:  # pylint: disable=broad-except
        print_error(ex)
        return None

    try:
        response = client.get_cluster(cluster_id)
    except Exception as ex:  # pylint: disable=broad-except
        print_error(ex)
        return None

    ssh_public_keys = response.get("ssh_public_keys", [])
    if sshkey in [key.strip() for key in ssh_public_keys]:
        print_ok("   => public ssh key already configured for cluster %s" %
                 cluster_id)
        bye()

    request = {}

    for key, val in response.items():
        if key not in [
                "driver",
                "executors",
                "spark_context_id",
                "state",
                "state_message",
                "start_time",
                "terminated_time",
                "last_state_loss_time",
                "last_activity_time",
                "disk_spec",
        ]:  # omit runtime attributes
            request[key] = val

    request["ssh_public_keys"] = ssh_public_keys + [sshkey]

    print_warning("   => The ssh key will be added to the cluster." +
                  "\n   Note: The cluster will be restarted immediately!")
    answer = input(
        "   => Shall the ssh key be added and the cluster be restarted (y/n)? (default = n): "
    )
    if answer.lower() == "y":
        try:
            response = client.edit_cluster(request)
        except DatabricksApiException as ex:
            print_error(str(ex))
            return None
        print_ok("   => OK")
    else:
        print_error("   => Cancelled")
Beispiel #21
0
                      getContext().apiUrl().get())

# COMMAND ----------

apiclient.perform_query("GET", "/clusters/list?state=RUNNING")

# COMMAND ----------

from databricks_cli.clusters.api import ClusterApi
import json
from pyspark.sql.types import *
from pyspark.sql.functions import *

# COMMAND ----------

clusters_api = ClusterApi(apiclient)

# COMMAND ----------

data = clusters_api.list_clusters()
rdd = sc.parallelize(data['clusters']).map(lambda x: json.dumps(x))
#

# COMMAND ----------

raw_clusters_df = spark.read.json(rdd)

# COMMAND ----------

parsed_clusters = raw_clusters_df.filter(col('init_scripts').isNotNull())
display(parsed_clusters)
Beispiel #22
0
from databrickslabs_jupyterlab.remote import connect

from helpers import get_profile, get_running_clusters, remove_running_clusters


def delete_cluster(client, cluster_id):
    try:
        response = client.permanent_delete(cluster_id)
        return response
    except Exception as ex:  # pylint: disable=broad-except
        print(ex)
        return None


clusters = get_running_clusters()
profile = get_profile()

try:
    apiclient = connect(profile)
    client = ClusterApi(apiclient)
except Exception as ex:  # pylint: disable=broad-except
    print(ex)
    sys.exit(1)


for name, cluster_id in clusters.items():
    print(name)
    delete_cluster(client, cluster_id)

remove_running_clusters()
Beispiel #23
0
 def __init__(self, profile=None):
     client = utils.get_api_client(profile)
     self.cluster_client = ClusterApi(client)
     self.jobs_client = JobsApi(client)
def src_cluster_api(src_api_client:ApiClient):
    return ClusterApi(src_api_client)
Beispiel #25
0
def get_cli(api_client, cluster_id):
    """
    Retrieves metadata about a cluster.
    """
    click.echo(pretty_format(ClusterApi(api_client).get_cluster(cluster_id)))
def configure_ssh(profile, host, token, cluster_id):
    """Configure SSH for the remote cluster
    
    Args:
        profile (str): Databricks CLI profile string
        host (str): host from databricks cli config for given profile string
        token (str): token from databricks cli config for given profile string
        cluster_id (str): cluster ID
    """
    sshkey_file = os.path.expanduser("~/.ssh/id_%s" % profile)
    if not os.path.exists(sshkey_file):
        print("\n   => ssh key '%s' does not exist" % sshkey_file)
        answer = input("   => Shall it be created (y/n)? (default = n): ")
        if answer.lower() == "y":
            print("   => Creating ssh key %s" % sshkey_file)
            result = execute(
                ["ssh-keygen", "-b", "2048", "-N", "", "-f", sshkey_file])
            if result["returncode"] == 0:
                print_ok("   => OK")
            else:
                print_error(result["stderr"])
                bye()
        else:
            bye()
    else:
        print_ok("\n   => ssh key '%s' already exists" % sshkey_file)

    with open(sshkey_file + ".pub", "r") as fd:
        sshkey = fd.read().strip()

    try:
        apiclient = connect(profile)
        client = ClusterApi(apiclient)
    except Exception as ex:
        print_error(ex)
        return None

    try:
        response = client.get_cluster(cluster_id)
    except Exception as ex:
        print_error(ex)
        return None

    ssh_public_keys = response.get("ssh_public_keys", [])
    if sshkey in [key.strip() for key in ssh_public_keys]:
        print_ok("   => public ssh key already configured for cluster %s" %
                 cluster_id)
        bye()

    request = {}
    for key in [
            "autotermination_minutes",
            "cluster_id",
            "cluster_name",
            "cluster_source",
            "creator_user_name",
            "default_tags",
            "driver_node_type_id",
            "enable_elastic_disk",
            "init_scripts_safe_mode",
            "node_type_id",
            "spark_version",
    ]:
        request[key] = response[key]

    if response.get("spark_env_vars", None) is not None:
        request["spark_env_vars"] = response["spark_env_vars"]

    if response.get("aws_attributes", None) is not None:
        request["aws_attributes"] = response["aws_attributes"]

    if response.get("num_workers", None) is not None:
        request["num_workers"] = response["num_workers"]

    if response.get("autoscale", None) is not None:
        request["autoscale"] = response["autoscale"]

    request["ssh_public_keys"] = ssh_public_keys + [sshkey]

    print_warning(
        "   => The ssh key will be added to the cluster. \n   Note: The cluster will be restarted immediately!"
    )
    answer = input(
        "   => Shall the ssh key be added and the cluster be restarted (y/n)? (default = n): "
    )
    if answer.lower() == "y":
        try:
            response = client.edit_cluster(request)
        except DatabricksApiException as ex:
            print_error(str(ex))
            return None
        print_ok("   => OK")
    else:
        print_error("   => Cancelled")
def get_cluster(profile, cluster_id=None, status=None):
    """Get the cluster configuration from remote
    
    Args:
        profile (str): Databricks CLI profile string
        cluster_id (str, optional): If cluster_id is given, the user will not be asked to select one.
                                    Defaults to None.
        status (Status, optional): A Status class providing set_status. Defaults to None.
    
    Returns:
        tuple: Cluster configs: cluster_id, public_ip, cluster_name and a flag whether it was started
    
    Returns:
        tuple: (cluster_id, public_ip, cluster_name, started)
    """
    with open("%s/.ssh/id_%s.pub" % (expanduser("~"), profile)) as fd:
        try:
            ssh_pub = fd.read().strip()
        except:
            print_error(
                "   Error: ssh key for profile 'id_%s.pub' does not exist in %s/.ssh"
                % (profile, expanduser("~")))
            bye()

    try:
        apiclient = connect(profile)
        client = ClusterApi(apiclient)
        clusters = client.list_clusters()
    except Exception as ex:
        print_error(ex)
        return (None, None, None, None)

    clusters = clusters["clusters"]

    if cluster_id is not None:
        cluster = None
        for c in clusters:
            if c["cluster_id"] == cluster_id:
                cluster = c
                break

        if cluster is None:
            print_error(
                "   Error: A cluster with id '%s' does not exist in the workspace of profile '%s'"
                % (cluster_id, profile))
            return (None, None, None, None)

        if ssh_pub not in [
                c.strip() for c in cluster.get("ssh_public_keys", [])
        ]:
            print_error(
                "   Error: Cluster with id '%s' does not have ssh key '~/.ssh/id_%s' configured"
                % (cluster_id, profile))
            return (None, None, None, None)
    else:
        my_clusters = [
            cluster for cluster in clusters if ssh_pub in
            [c.strip() for c in cluster.get("ssh_public_keys", [])]
        ]

        if not my_clusters:
            print_error(
                "    Error: There is no cluster in the workspace for profile '%s' configured with ssh key '~/.ssh/id_%s':"
                % (profile, profile))
            print(
                "    Use 'databrickslabs_jupyterlab %s -s' to configure ssh for clusters in this workspace\n"
                % profile)
            return (None, None, None, None)

        current_conda_env = os.environ.get("CONDA_DEFAULT_ENV", None)
        found = None
        for i, c in enumerate(my_clusters):
            if c["cluster_name"].replace(" ", "_") == current_conda_env:
                found = c["cluster_name"]
                break

        if found is not None:
            print_warning(
                "\n   => The current conda environment is '%s'.\n      You might want to select cluster %d with the name '%s'?\n"
                % (current_conda_env, i, found))

        cluster = select_cluster(my_clusters)

    cluster_id = cluster["cluster_id"]
    cluster_name = cluster["cluster_name"]

    try:
        response = client.get_cluster(cluster_id)
    except Exception as ex:
        print_error(ex)
        return (None, None, None, None)

    state = response["state"]
    if not state in ["RUNNING", "RESIZING"]:
        if state == "TERMINATED":
            print("   => Starting cluster %s" % cluster_id)
            if status is not None:
                status.set_status(profile, cluster_id, "Cluster Starting")
            try:
                response = client.start_cluster(cluster_id)
            except Exception as ex:
                print_error(ex)
                return (None, None, None, None)

        print(
            "   => Waiting for cluster %s being started (this can take up to 5 min)"
            % cluster_id)
        print("   ", end="", flush=True)

        while not state in ("RUNNING", "RESIZING"):
            if status is not None:
                status.set_status(profile, cluster_id, "Cluster Starting",
                                  False)
            print(".", end="", flush=True)
            time.sleep(5)
            try:
                response = client.get_cluster(cluster_id)
            except Exception as ex:
                print_error(ex)
                return (None, None, None, None)

            if response.get("error", None) is not None:
                print_error(response["error"])
                return (None, None, None, None)
            else:
                state = response["state"]
        print_ok("\n   => OK")

        if status is not None:
            status.set_status(profile, cluster_id, "Cluster started")

        print(
            "\n   => Waiting for libraries on cluster %s being installed (this can take some time)"
            % cluster_id)
        print("   ", end="", flush=True)

        done = False
        while not done:
            try:
                states = get_library_state(profile, cluster_id)
            except DatabricksApiException as ex:
                print_error(ex)
                return (None, None, None, None)

            installing = any(
                [s in ["PENDING", "RESOLVING", "INSTALLING"] for s in states])
            if installing:
                if status is not None:
                    status.set_status(profile, cluster_id,
                                      "Installing cluster libraries", False)
                print(".", end="", flush=True)
                time.sleep(5)
            else:
                done = True
                print_ok("\n   => OK\n")

        if status is not None:
            status.set_status(profile, cluster_id,
                              "Cluster libraries installed", False)

    public_ip = response["driver"].get("public_dns", None)
    if public_ip is None:
        print_error("   Error: Cluster does not have public DNS name")
        return (None, None, None, None)

    print_ok("   => Selected cluster: %s (%s)" % (cluster_name, public_ip))

    return (cluster_id, public_ip, cluster_name, None)
Beispiel #28
0
def test_src_clusters(src_cluster_api: ClusterApi):
    print(src_cluster_api.list_clusters())
    assert src_cluster_api.list_clusters() is not None