Exemple #1
0
    def get_builder(self,
                    preprocessor,
                    base_image,
                    registry,
                    needs_deps_installation=True,
                    pod_spec_mutators=None):
        """Creates a builder instance with right config for GKE

        :param preprocessor: Preprocessor to use to modify inputs
        :param base_image: Base image to use for this job
        :param registry: Registry to push image to. Example: gcr.io/kubeflow-images
        :param needs_deps_installation:  need depends on installation(Default value = True)
        :param pod_spec_mutators: list of functions that is used to mutate the podsspec.
                                  e.g. fairing.cloud.gcp.add_gcp_credentials_if_exists
                                  This can used to set things like volumes and security context.
                                  (Default value =None)

        """

        pod_spec_mutators = pod_spec_mutators or []
        pod_spec_mutators.append(gcp.add_gcp_credentials_if_exists)

        if not needs_deps_installation:
            return AppendBuilder(preprocessor=preprocessor,
                                 base_image=base_image,
                                 registry=registry)
        elif (utils.is_running_in_k8s()
              or not ml_tasks_utils.is_docker_daemon_exists()):
            return ClusterBuilder(preprocessor=preprocessor,
                                  base_image=base_image,
                                  registry=registry,
                                  pod_spec_mutators=pod_spec_mutators,
                                  namespace=self._namespace,
                                  context_source=self._build_context_source)
        elif ml_tasks_utils.is_docker_daemon_exists():
            return DockerBuilder(preprocessor=preprocessor,
                                 base_image=base_image,
                                 registry=registry)
        else:
            msg = ["Not able to guess the right builder for this job!"]
            if not utils.is_running_in_k8s():
                msg.append(
                    " Also If you are using 'sudo' to access docker in your system you can"
                    " solve this problem by adding your username to the docker group. "
                    "Reference: https://docs.docker.com/install/linux/linux-postinstall/"
                    "#manage-docker-as-a-non-root-user You need to logout and login to "
                    "get change activated.")
            message = " ".join(msg)
            raise RuntimeError(message)
Exemple #2
0
 def __init__(self,
              config_file=None,
              context=None,
              client_configuration=None,
              persist_config=True):
     """
     :param config_file: kubeconfig file, defaults to ~/.kube/config. Note that for the case
            that the SDK is running in cluster and you want to operate in another remote
            cluster, user must set config_file to load kube-config file explicitly.
     :param context: kubernetes context
     :param client_configuration: The kubernetes.client.Configuration to set configs to.
     :param persist_config: If True, config file will be updated when changed
     """
     self.config_file = config_file
     self.context = context
     self.client_configuration = client_configuration
     self.persist_config = persist_config
     if config_file or not is_running_in_k8s():
         config.load_kube_config(
             config_file=self.config_file,
             context=self.context,
             client_configuration=self.client_configuration,
             persist_config=self.persist_config)
     else:
         config.load_incluster_config()
Exemple #3
0
    def get_builder(self, preprocessor, base_image, registry, needs_deps_installation=True,  # pylint:disable=arguments-differ
                    pod_spec_mutators=None):
        """Creates a builder instance with right config for the given Kubernetes

        :param preprocessor: Preprocessor to use to modify inputs
        :param base_image: Base image to use for this job
        :param registry: Registry to push image to. Example: gcr.io/kubeflow-images
        :param needs_deps_installation:  need depends on installation(Default value = True)
        :param pod_spec_mutators: list of functions that is used to mutate the podsspec.
                                  e.g. fairing.cloud.gcp.add_gcp_credentials_if_exists
                                  This can used to set things like volumes and security context.
                                  (Default value =None)

        """
        if not needs_deps_installation:
            return AppendBuilder(preprocessor=preprocessor,
                                 base_image=base_image,
                                 registry=registry)
        elif utils.is_running_in_k8s():
            return ClusterBuilder(preprocessor=preprocessor,
                                  base_image=base_image,
                                  registry=registry,
                                  pod_spec_mutators=pod_spec_mutators,
                                  namespace=self._namespace,
                                  context_source=self._build_context_source)
        elif ml_tasks_utils.is_docker_daemon_exists():
            return DockerBuilder(preprocessor=preprocessor,
                                 base_image=base_image,
                                 registry=registry)
        else:
            # TODO (karthikv2k): Add more info on how to reolve this issue
            raise RuntimeError(
                "Not able to guess the right builder for this job!")
Exemple #4
0
 def __init__(self, namespace=None, build_context_source=None):
     if not namespace and not utils.is_running_in_k8s():
         logger.warning("Can't determine namespace automatically. "
                        "Using 'default' namespace but recomend to provide namespace explicitly"
                        ". Using 'default' namespace might result in unable to mount some "
                        "required secrets in cloud backends.")
     self._namespace = namespace or utils.get_default_target_namespace()
     self._build_context_source = build_context_source
Exemple #5
0
 def __init__(self):
     if is_running_in_k8s():
         config.load_incluster_config()
     else:
         config.load_kube_config()
Exemple #6
0
 def __init__(self, namespace=None, build_context_source=None):
     if not namespace and not utils.is_running_in_k8s():
         namespace = "kubeflow"
     super(KubeflowGKEBackend, self).__init__(
         namespace, build_context_source)
Exemple #7
0
def execute(config,
            docker_registry,
            base_image="gcr.io/kubeflow-fairing/lightgbm:latest",
            namespace=None,
            stream_log=True,
            cores_per_worker=None,
            memory_per_worker=None,
            pod_spec_mutators=None):
    """Runs the LightGBM CLI in a single pod in user's Kubeflow cluster.
    Users can configure it to be a train, predict, and other supported tasks
    by using the right config.
    Please refere https://github.com/microsoft/LightGBM/blob/master/docs/Parameters.rst
    for more information on config options.

    :param config: config entries
    :param docker_registry: docker registry name
    :param base_image: base image (Default value = "gcr.io/kubeflow-fairing/lightgbm:latest")
    :param namespace: k8s namespace (Default value = None)
    :param stream_log: should that stream log? (Default value = True)
    :param cores_per_worker: number of cores per worker (Default value = None)
    :param memory_per_worker: memory value per worker (Default value = None)
    :param pod_spec_mutators: pod spec mutators (Default value = None)

    """
    if not namespace and not fairing_utils.is_running_in_k8s():
        namespace = "kubeflow"
    namespace = namespace or fairing_utils.get_default_target_namespace()
    config_file_name = None
    if isinstance(config, str):
        config_file_name = config
        config = utils.load_properties_config_file(config)
    elif isinstance(config, dict):
        config_file_name = utils.save_properties_config_file(config)
    else:
        raise RuntimeError("config should be of type dict or string(filepath) "
                           "but got {}".format(type(dict)))

    utils.scrub_fields(config, BLACKLISTED_FIELDS)

    _, num_machines = utils.get_config_value(config, NUM_MACHINES_FILEDS)
    num_machines = num_machines or 1
    if num_machines:
        try:
            num_machines = int(num_machines)
        except ValueError:
            raise ValueError(
                "num_machines value in config should be an int >= 1 "
                "but got {}".format(config.get('num_machines')))
        if num_machines < 1:
            raise ValueError(
                "num_machines value in config should >= 1 but got {}".format(
                    num_machines))

    if num_machines > 1:
        config['machine_list_file'] = "mlist.txt"
    output_map = generate_context_files(config, config_file_name, num_machines)

    preprocessor = BasePreProcessor(command=[ENTRYPOINT],
                                    output_map=output_map)
    builder = AppendBuilder(registry=docker_registry,
                            base_image=base_image,
                            preprocessor=preprocessor)
    builder.build()
    pod_spec = builder.generate_pod_spec()

    pod_spec_mutators = pod_spec_mutators or []
    pod_spec_mutators.append(gcp.add_gcp_credentials_if_exists)
    pod_spec_mutators.append(
        k8s_utils.get_resource_mutator(cores_per_worker, memory_per_worker))

    if num_machines == 1:
        # non-distributed mode
        deployer = Job(namespace=namespace,
                       pod_spec_mutators=pod_spec_mutators,
                       stream_log=stream_log)
    else:
        # distributed mode
        deployer = TfJob(namespace=namespace,
                         pod_spec_mutators=pod_spec_mutators,
                         chief_count=1,
                         worker_count=num_machines - 1,
                         stream_log=stream_log)
    deployer.deploy(pod_spec)
    return deployer