def delete(self, experiment_name): assert experiment_name not in _RESERVED_NS, \ 'cannot delete reserved names: default, kube-public, kube-system' check_valid_dns(experiment_name) runner.run_verbose( 'kubectl delete namespace {}'.format(experiment_name), print_out=True, raise_on_error=False)
def launch(self, experiment_spec, force=False, dry_run=False): print('launching', experiment_spec.name) launch_plan = experiment_spec.compile() if dry_run: print(launch_plan) else: # TODO: some of them should be shared if self.fs.has_experiment_folder(): if not force and self.fs.experiment_exists( experiment_spec.name): raise ValueError( '[Error] Experiment {} already exists'.format( experiment_spec.name)) experiment_file = Path( self.fs.save_experiment(experiment_spec)) experiment_folder = experiment_file.parent launch_plan_file = experiment_folder / 'kube.yml' with launch_plan_file.open('w') as f: f.write(launch_plan) #TODO: persist yaml file runner.run_verbose('kubectl create namespace ' + experiment_spec.name) runner.run_verbose( 'kubectl create -f "{}" --namespace {}'.format( launch_plan_file, experiment_spec.name)) self.set_experiment(experiment_spec.name) else: runner.run_verbose('kubectl create namespace ' + experiment_spec.name) runner.run_verbose('kubectl create -f - --namespace {}'.format( experiment_spec.name), stdin=launch_plan) self.set_experiment(experiment_spec.name)
def get_log(self, experiment_name, process_name, process_group=None, follow=False, since=0, tail=500, print_logs=False): if process_group is None: pod_name, container_name = process_name, process_name else: pod_name, container_name = process_group, process_name cmd = self._get_logs_cmd(pod_name, process_name, follow=follow, since=since, tail=tail, namespace=experiment_name) if follow: # os.system will not block, stream the latest logs to stdout runner.run_raw(cmd) else: out, err, retcode = runner.run_verbose(cmd, print_out=print_logs, raise_on_error=False) if retcode != 0: return '' else: return out
def config_view(self): """ kubectl config view Generates a yaml of context and cluster info """ out, err, retcode = runner.run_verbose('kubectl config view', print_out=False, raise_on_error=True) return BeneDict(load_yaml_str(out))
def set_experiment(self, namespace): """ https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ After this call, all subsequent `kubectl` will default to the namespace """ check_valid_dns(namespace) _, _, retcode = runner.run_verbose( 'kubectl config set-context $(kubectl config current-context) --namespace={}' .format(namespace), print_out=True, raise_on_error=False) if retcode == 0: print('successfully switched to namespace `{}`'.format(namespace))
def launch(self, experiment_spec, dry_run=False): """ Launches a Docker experiment specified by the given spec. Args: experiment_spec: a DockerExperimentSpec object dry_run: print out the generated YAML config instead of actually launching Docker containers. """ print('Launching a Docker experiment', experiment_spec.name) launch_plan = experiment_spec.yml() if dry_run: print(launch_plan) else: compose_cmd = 'docker-compose -p {} -f - up -d'.format( experiment_spec.name), out, err, retcode = runner.run_verbose(compose_cmd, stdin=launch_plan) if retcode != 0: print('Error while starting Docker experiment')
def query_resources(self, resource, output_format, names=None, labels='', fields='', namespace=None): """ Query all items in the resource with `output_format` JSONpath: https://kubernetes.io/docs/reference/kubectl/jsonpath/ label selectors: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ Args: resource: pod, service, deployment, etc. output_format: https://kubernetes.io/docs/reference/kubectl/overview/#output-options - custom-columns=<spec> - custom-columns-file=<filename> - json: returns a dict - jsonpath=<template> - jsonpath-file=<filename> - name: list - wide - yaml: returns a dict names: list of names to get resource, mutually exclusive with label and field selectors. Should only specify one. labels: label selector syntax, comma separated as logical AND. E.g: - equality: mylabel=production - inequality: mylabel!=production - set: mylabel in (group1, group2) - set exclude: mylabel notin (group1, group2) - don't check value, only check key existence: mylabel - don't check value, only check key nonexistence: !mylabel fields: field selector, similar to label selector but operates on the pod fields, such as `status.phase=Running` fields can be found from `kubectl get pod <mypod> -o yaml` Returns: dict if output format is yaml or json list if output format is name string from stdout otherwise """ if names and (labels or fields): raise ValueError( 'names and (labels or fields) are mutually exclusive') cmd = 'kubectl get ' + resource cmd += self._get_ns_cmd(namespace) if names is None: cmd += self._get_selectors(labels, fields) else: assert isinstance(names, (list, tuple)) cmd += ' ' + ' '.join(names) if '=' in output_format: # quoting the part after jsonpath=<...> prefix, arg = output_format.split('=', 1) output_format = prefix + '=' + shlex.quote(arg) cmd += ' -o ' + output_format out, _, _ = runner.run_verbose(cmd, print_out=False, raise_on_error=True) if output_format == 'yaml': return load_yaml_str(out) elif output_format == 'json': return load_json_str(out) elif output_format == 'name': return out.split('\n') else: return out
def current_context(self): out, err, retcode = runner.run_verbose( 'kubectl config current-context', print_out=False, raise_on_error=True) return out