def init(self): self.namespace = "default" self.k8s_manifests = [] logger.debug("Given config: %s", self.config) if self.config.get("namespace"): self.namespace = self.config.get("namespace") logger.info("Using namespace %s", self.namespace) if self.container: self.kubectl = self._find_kubectl(Utils.getRoot()) kube_conf_path = "/etc/kubernetes" host_kube_conf_path = Utils.get_real_abspath(kube_conf_path) if not os.path.exists(kube_conf_path) and os.path.exists(host_kube_conf_path): if self.dryrun: logger.info("DRY-RUN: link %s from %s" % (kube_conf_path, host_kube_conf_path)) else: os.symlink(host_kube_conf_path, kube_conf_path) else: self.kubectl = self._find_kubectl() if not self.dryrun: if not os.access(self.kubectl, os.X_OK): raise ProviderFailedException("Command: " + self.kubectl + " not found") # Check if Kubernetes config file is accessible, but only # if one was provided by the user; config file is optional. if self.config_file: self.checkConfigFile()
def _find_kubectl(self, prefix=""): """Determine the path to the kubectl program on the host. 1) Check the config for a provider_cli in the general section remember to add /host prefix 2) Search /usr/bin:/usr/local/bin Use the first valid value found """ if self.dryrun: # Testing env does not have kubectl in it return "/usr/bin/kubectl" test_paths = ['/usr/bin/kubectl', '/usr/local/bin/kubectl'] if self.config.get("provider_cli"): logger.info("caller gave provider_cli: " + self.config.get("provider_cli")) test_paths.insert(0, self.config.get("provider_cli")) for path in test_paths: test_path = prefix + path logger.info("trying kubectl at " + test_path) kubectl = test_path if os.access(kubectl, os.X_OK): logger.info("found kubectl at " + test_path) return kubectl raise ProviderFailedException("No kubectl found in %s" % ":".join(test_paths))
def deploy(self): logger.info("Deploying to provider: Docker") for container in self._get_containers(): if re.match( "%s_+%s+_+[a-zA-Z0-9]{12}" % (self.default_name, self.namespace), container): raise ProviderFailedException( "Namespace with name %s already deployed in Docker" % self.namespace) for artifact in self.artifacts: artifact_path = os.path.join(self.path, artifact) label_run = None with open(artifact_path, "r") as fp: label_run = fp.read().strip() run_args = label_run.split() # If --name is provided, do not re-name due to potential linking of containers. Warn user instead. # Else use namespace provided within answers.conf if '--name' in run_args: logger.info( "WARNING: Using --name provided within artifact file.") else: run_args.insert( run_args.index('run') + 1, "--name=%s_%s_%s" % (self.default_name, self.namespace, Utils.getUniqueUUID())) cmd = run_args if self.dryrun: logger.info("DRY-RUN: %s", " ".join(cmd)) else: subprocess.check_call(cmd)
def init(self): self.namespace = DEFAULT_NAMESPACE self.default_name = DEFAULT_CONTAINER_NAME logger.debug("Given config: %s", self.config) if self.config.get("namespace"): self.namespace = self.config.get("namespace") logger.debug("Namespace: %s", self.namespace) if "image" in self.config: self.image = Utils.sanitizeName(self.config.get("image")) else: self.image = Utils.getUniqueUUID() logger.warning( "The artifact name has not been provided within Nulecule, using a UUID instead" ) logger.debug( "No image name found for artifact, using UUID %s in container name" % self.image) if self.dryrun: logger.info("DRY-RUN: Did not check Docker version compatibility") else: cmd_check = ["docker", "version"] try: docker_version = subprocess.check_output(cmd_check).split("\n") except Exception as ex: raise ProviderFailedException(ex) client = "" server = "" for line in docker_version: if line.startswith("Client API version"): client = line.split(":")[1] if line.startswith("Server API version"): server = line.split(":")[1] if client > server: msg = ( "Docker version in app image (%s) is higher than the one " "on host (%s). Please update your host." % (client, server)) raise ProviderFailedException(msg)
def prepareOrder(self): for artifact in self.artifacts: data = None with open(os.path.join(self.path, artifact), "r") as fp: logger.debug(os.path.join(self.path, artifact)) data = anymarkup.parse(fp) if "kind" in data: self.kube_order[data["kind"].lower()] = artifact else: raise ProviderFailedException("Malformed kube file")
def init(self): cmd_check = ["docker", "version"] try: docker_version = subprocess.check_output(cmd_check).split("\n") except Exception as ex: raise ProviderFailedException(ex) client = "" server = "" for line in docker_version: if line.startswith("Client API version"): client = line.split(":")[1] if line.startswith("Server API version"): server = line.split(":")[1] if client > server: msg = ("Docker version in app image (%s) is higher than the one " "on host (%s). Please update your host." % (client, server)) raise ProviderFailedException(msg)
def _resource_identity(self, path): """Finds the Kubernetes resource name / identity from resource manifest and raises if manifest is not supported. :arg path: Absolute path to Kubernetes resource manifest :return: str -- Resource name / identity :raises: ProviderFailedException """ data = anymarkup.parse_file(path) if data["apiVersion"] == "v1": return data["metadata"]["name"] elif data["apiVersion"] in ["v1beta3", "v1beta2", "v1beta1"]: msg = ("%s is not supported API version, update Kubernetes " "artifacts to v1 API version. Error in processing " "%s manifest." % (data["apiVersion"], path)) raise ProviderFailedException(msg) else: raise ProviderFailedException("Malformed kube file: %s" % path)
def parse_kubeconf_data(kubecfg): """ Parse kubeconf data. Args: kubecfg (dict): Kubernetes config data Returns: dict of parsed values from config """ url = None token = None namespace = None tls_verify = True ca = None current_context = kubecfg["current-context"] logger.debug("current context: %s", current_context) try: context = filter(lambda co: co["name"] == current_context, kubecfg["contexts"])[0] logger.debug("context: %s", context) cluster = filter( lambda cl: cl["name"] == context["context"]["cluster"], kubecfg["clusters"])[0] logger.debug("cluster: %s", cluster) user = filter( lambda usr: usr["name"] == context["context"]["user"], kubecfg["users"])[0] logger.debug("user: %s", user) except IndexError: raise ProviderFailedException() url = cluster["cluster"]["server"] token = user["user"]["token"] if "namespace" in context["context"]: namespace = context["context"]["namespace"] if "insecure-skip-tls-verify" in cluster["cluster"]: tls_verify = not cluster["cluster"]["insecure-skip-tls-verify"] elif "certificate-authority" in cluster["cluster"]: ca = cluster["cluster"]["certificate-authority"] return { PROVIDER_API_KEY: url, PROVIDER_AUTH_KEY: token, NAMESPACE_KEY: namespace, PROVIDER_TLS_VERIFY_KEY: tls_verify, PROVIDER_CA_KEY: ca }
def init(self): self.cli = find_executable(self.cli) if self.container and not self.cli: host_path = [] for path in os.environ.get("PATH").split(":"): host_path.append("/host%s" % path) self.cli = find_executable("osc", path=":".join(host_path)) if not self.cli or not os.access(self.cli, os.X_OK): raise ProviderFailedException("Command %s not found" % self.cli) else: logger.debug("Using %s to run OpenShift commands.", self.cli) if "openshiftconfig" in self.config: self.config_file = self.config["openshiftconfig"] else: logger.warning("Configuration option 'openshiftconfig' not found") if not self.config_file or not os.access(self.config_file, os.R_OK): raise ProviderFailedException( "Cannot access configuration file %s" % self.config_file)
def _process_artifacts(self): """ Parse and validate Marathon artifacts Parsed artifacts are saved to self.marathon_artifacts """ for artifact in self.artifacts: logger.debug("Procesesing artifact: %s", artifact) data = None with open(os.path.join(self.path, artifact), "r") as fp: try: # env variables in marathon artifacts have to be string:string # force_types=None respects types from json file data = anymarkup.parse(fp, force_types=None) logger.debug("Parsed artifact %s", data) # every marathon app has to have id. 'id' key is also used for showing messages if "id" not in data.keys(): msg = "Error processing %s artifact. There is no id" % artifact cockpit_logger.error(msg) raise ProviderFailedException(msg) except anymarkup.AnyMarkupError, e: msg = "Error processing artifact - %s" % e cockpit_logger.error(msg) raise ProviderFailedException(msg) self.marathon_artifacts.append(data)
def deploy(self, url, artifact): (status_code, return_data) = \ Utils.make_rest_request("post", url, verify=self._requests_tls_verify(), data=artifact) if status_code == 201: logger.info("Object %s successfully deployed.", artifact['metadata']['name']) else: msg = "%s %s" % (status_code, return_data) logger.error(msg) # TODO: remove running components (issue: #428) raise ProviderFailedException(msg)
def process_template(self, url, template): (status_code, return_data) = \ Utils.make_rest_request("post", url, verify=self._requests_tls_verify(), data=template) if status_code == 201: logger.info("template processed %s", template['metadata']['name']) logger.debug("processed template %s", return_data) return return_data['objects'] else: msg = "%s %s" % (status_code, return_data) logger.error(msg) raise ProviderFailedException(msg)
def _process_artifact_data(self, artifact, data): """ Process the data for an artifact Args: artifact (str): Artifact name data (dict): Artifact data """ # kind has to be specified in artifact if "kind" not in data.keys(): raise ProviderFailedException( "Error processing %s artifact. There is no kind" % artifact) kind = data["kind"].lower() resource = self._kind_to_resource(kind) # check if resource is supported by apis if resource not in self.oapi_resources \ and resource not in self.kapi_resources: raise ProviderFailedException( "Unsupported kind %s in artifact %s" % (kind, artifact)) # process templates if kind == "template": processed_objects = self._process_template(data) # add all processed object to artifacts dict for obj in processed_objects: obj_kind = obj["kind"].lower() if obj_kind not in self.openshift_artifacts.keys(): self.openshift_artifacts[obj_kind] = [] self.openshift_artifacts[obj_kind].append(obj) return # add parsed artifact to dict if kind not in self.openshift_artifacts.keys(): self.openshift_artifacts[kind] = [] self.openshift_artifacts[kind].append(data)
def _check_required_params(self, exception=False): ''' This checks to see if required parameters associated to the Kubernetes provider are passed. Only PROVIDER_API_KEY is *required*. Token may be blank. ''' paramdict = self._build_param_dict() logger.debug("List of parameters passed: %s" % paramdict) # Check that the required parameters are passed. If not, error out. for k in [PROVIDER_API_KEY]: if paramdict[k] is None: if exception: msg = "You need to set %s in %s or pass it as a CLI param" % (k, ANSWERS_FILE) raise ProviderFailedException(msg) else: return False return True
def process_k8s_artifacts(self): """Processes Kubernetes manifests files and checks if manifest under process is valid. """ for artifact in self.artifacts: data = None with open(os.path.join(self.path, artifact), "r") as fp: logger.debug(os.path.join(self.path, artifact)) try: data = anymarkup.parse(fp) except Exception: msg = "Error processing %s artifcats, Error:" % os.path.join( self.path, artifact) cockpit_logger.error(msg) raise if "kind" in data: self.k8s_manifests.append((data["kind"].lower(), artifact)) else: apath = os.path.join(self.path, artifact) raise ProviderFailedException("Malformed kube file: %s" % apath)
def init(self): self.cli = find_executable(self.cli_str) if self.container and not self.cli: host_path = [] for path in os.environ.get("PATH").split(":"): host_path.append(os.path.join(Utils.getRoot(), path.lstrip("/"))) self.cli = find_binary(self.cli_str, path=":".join(host_path)) if not self.cli: # if run as non-root we need a symlink in the container os.symlink(os.path.join(Utils.getRoot(), "usr/bin/oc"), "/usr/bin/oc") self.cli = "/usr/bin/oc" if not self.dryrun: if not self.cli or not os.access(self.cli, os.X_OK): raise ProviderFailedException("Command %s not found" % self.cli) else: logger.debug("Using %s to run OpenShift commands.", self.cli) # Check if OpenShift config file is accessible self.checkConfigFile()
def _check_namespaces(self): ''' This function checks to see whether or not the namespaces created in the cluster match the namespace that is associated and/or provided in the deployed application ''' # Get the namespaces and output the currently used ones namespace_list = self.api.namespaces() logger.debug("There are currently %s namespaces in the cluster." % str(len(namespace_list))) # Create a namespace list namespaces = [] for ns in namespace_list: namespaces.append(ns["metadata"]["name"]) # Output the namespaces and check to see if the one provided exists logger.debug("Namespaces: %s" % namespaces) if self.namespace not in namespaces: msg = "%s namespace does not exist. Please create the namespace and try again." % self.namespace raise ProviderFailedException(msg)
def get_kapi_resources(self): """ Get kubernetes API resources """ # get list of supported resources for each api (status_code, return_data) = \ Utils.make_rest_request("get", self.kubernetes_api, verify=self._requests_tls_verify()) if status_code == 200: kapi_resources = return_data["resources"] else: raise ProviderFailedException("Cannot get Kubernetes resource list") # convert resources list of dicts to list of names kapi_resources = [res['name'] for res in kapi_resources] logger.debug("Kubernetes resources %s", kapi_resources) return kapi_resources
def get_oapi_resources(self): """ Get Openshift API resources """ # get list of supported resources for each api (status_code, return_data) = \ Utils.make_rest_request("get", self.openshift_api, verify=self._requests_tls_verify()) if status_code == 200: oapi_resources = return_data["resources"] else: raise ProviderFailedException("Cannot get OpenShift resource list") # convert resources list of dicts to list of names oapi_resources = [res['name'] for res in oapi_resources] logger.debug("Openshift resources %s", oapi_resources) return oapi_resources
def delete(self, url): """ Delete object on given url Args: url (str): full url for artifact Raises: ProviderFailedException: error when calling remote api """ (status_code, return_data) = \ Utils.make_rest_request("delete", url, verify=self._requests_tls_verify()) if status_code == 200: logger.info("Successfully deleted.") else: msg = "%s %s" % (status_code, return_data) logger.error(msg) raise ProviderFailedException(msg)
def _parse_kubeconf(self, filename): """" Parse kubectl config file Args: filename (string): path to configuration file (e.g. ./kube/config) Returns: dict of parsed values from config Example of expected file format: apiVersion: v1 clusters: - cluster: server: https://10.1.2.2:8443 certificate-authority: path-to-ca.cert insecure-skip-tls-verify: false name: 10-1-2-2:8443 contexts: - context: cluster: 10-1-2-2:8443 namespace: test user: test-admin/10-1-2-2:8443 name: test/10-1-2-2:8443/test-admin current-context: test/10-1-2-2:8443/test-admin kind: Config preferences: {} users: - name: test-admin/10-1-2-2:8443 user: token: abcdefghijklmnopqrstuvwxyz0123456789ABCDEF """ logger.debug("Parsing %s", filename) with open(filename, 'r') as fp: kubecfg = anymarkup.parse(fp.read()) try: return self._parse_kubeconf_data(kubecfg) except ProviderFailedException: raise ProviderFailedException('Invalid %s' % filename)
def persistent_storage(self, graph, action): """ Actions are either: run, stop or uninstall as per the Requirements class Curently run is the only function implemented for k8s persistent storage """ logger.debug("Persistent storage enabled! Running action: %s" % action) if graph["accessMode"] not in PERSISTENT_STORAGE_FORMAT: raise ProviderFailedException("{} is an invalid storage format " "(choose from {})" .format(graph["accessMode"], ', '.join(PERSISTENT_STORAGE_FORMAT))) if action not in ['run']: logger.warning( "%s action is not available for provider %s. Doing nothing." % (action, self.key)) return self._check_persistent_volumes() # Get the path of the persistent storage yaml file includes in /external # Plug the information from the graph into the persistent storage file base_path = os.path.dirname(os.path.realpath(__file__)) template_path = os.path.join(base_path, 'external/kubernetes/persistent_storage.yaml') with open(template_path, 'r') as f: content = f.read() template = Template(content) rendered_template = template.safe_substitute(graph) tmp_file = Utils.getTmpFile(rendered_template, '.yaml') # Pass the .yaml file and execute if action is "run": cmd = [self.kubectl, "create", "-f", tmp_file, "--namespace=%s" % self.namespace] if self.config_file: cmd.append("--kubeconfig=%s" % self.config_file) self._call(cmd) os.unlink(tmp_file)
def run(self): """ Deploys the app by given resource manifests. """ for artifact in self.marathon_artifacts: url = urlparse.urljoin(self.marathon_api, "apps/") if self.dryrun: logger.info("DRY-RUN: %s", url) continue logger.debug("Deploying appid: %s", artifact["id"]) (status_code, return_data) = \ Utils.make_rest_request("post", url, data=artifact) if status_code == 201: logger.info("Marathon app %s sucessfully deployed.", artifact["id"]) else: msg = "Error deploying app: %s, Marathon API response %s - %s" % ( artifact["id"], status_code, return_data) logger.error(msg) raise ProviderFailedException(msg)
def init(self): logger.debug("Given config: %s", self.config) if self.config.get("namespace"): self.namespace = self.config.get("namespace") logger.info("Using namespace %s", self.namespace) if self.container: self.kubectl = self._findKubectl("/host") if not os.path.exists("/etc/kubernetes"): if self.dryrun: logger.info( "DRY-RUN: link /etc/kubernetes from /host/etc/kubernetes" ) else: os.symlink("/host/etc/kubernetes", "/etc/kubernetes") else: self.kubectl = self._findKubectl() if not self.dryrun: if not os.access(self.kubectl, os.X_OK): raise ProviderFailedException("Command: " + self.kubectl + " not found")
def deploy(self): kube_order = OrderedDict([("service", None), ("rc", None), ("pod", None)]) #FIXME for artifact in self.artifacts: data = None artifact_path = os.path.join(self.path, artifact) with open(artifact_path, "r") as fp: data = anymarkup.parse(fp, force_types=None) if "kind" in data: if data["kind"].lower() == "template": logger.info("Processing template") artifact = self._processTemplate(artifact_path) kube_order[data["kind"].lower()] = artifact else: raise ProviderFailedException("Malformed artifact file") for artifact in kube_order: if not kube_order[artifact]: continue k8s_file = os.path.join(self.path, kube_order[artifact]) self._callCli(k8s_file)
def scale(self, url, replicas): """ Scale ReplicationControllers or DeploymentConfig Args: url (str): full url for artifact replicas (int): number of replicas scale to """ patch = [{"op": "replace", "path": "/spec/replicas", "value": replicas}] (status_code, return_data) = \ Utils.make_rest_request("patch", url, data=patch, verify=self._requests_tls_verify()) if status_code == 200: logger.info("Successfully scaled to %s replicas", replicas) else: msg = "%s %s" % (status_code, return_data) logger.error(msg) raise ProviderFailedException(msg)
def _set_config_values(self): """ Reads providerapi, namespace and accesstoken from answers.conf and corresponding values from providerconfig (if set). Use one that is set, if both are set and have conflicting values raise exception. Raises: ProviderFailedException: values in providerconfig and answers.conf are in conflict """ # First things first, if we are running inside of an openshift pod via # `oc new-app` then get the config from the environment (files/env vars) # NOTE: pick up provider_tls_verify from answers if exists if Utils.running_on_openshift(): self.providerapi = Utils.get_openshift_api_endpoint_from_env() self.namespace = os.environ['POD_NAMESPACE'] self.access_token = os.environ['TOKEN_ENV_VAR'] self.provider_ca = OPENSHIFT_POD_CA_FILE self.provider_tls_verify = \ self.config.get(PROVIDER_TLS_VERIFY_KEY, True) return # No need to process other information # initialize result to default values result = {PROVIDER_API_KEY: self.providerapi, ACCESS_TOKEN_KEY: self.access_token, NAMESPACE_KEY: self.namespace, PROVIDER_TLS_VERIFY_KEY: self.provider_tls_verify, PROVIDER_CA_KEY: self.provider_ca} # create keys in dicts and initialize values to None answers = {} providerconfig = {} for k in result.keys(): answers[k] = None providerconfig[k] = None # get values from answers.conf for k in result.keys(): answers[k] = self.config.get(k) # get values from providerconfig if self.config_file: providerconfig = self._parse_kubeconf(self.config_file) # decide between values from answers.conf and providerconfig # if only one is set use that, report if they are in conflict for k in result.keys(): if answers[k] is not None and providerconfig[k] is None: result[k] = answers[k] if answers[k] is None and providerconfig[k] is not None: result[k] = providerconfig[k] if answers[k] is not None and providerconfig[k] is not None: if answers[k] == providerconfig[k]: result[k] = answers[k] else: msg = "There are conflicting values in %s (%s) and %s (%s)"\ % (self.config_file, providerconfig[k], ANSWERS_FILE, answers[k]) logger.error(msg) raise ProviderFailedException(msg) logger.debug("config values: %s" % result) # this items are required, they have to be not None for k in [PROVIDER_API_KEY, ACCESS_TOKEN_KEY, NAMESPACE_KEY]: if result[k] is None: msg = "You need to set %s in %s" % (k, ANSWERS_FILE) logger.error(msg) raise ProviderFailedException(msg) # set config values self.providerapi = result[PROVIDER_API_KEY] self.access_token = result[ACCESS_TOKEN_KEY] self.namespace = result[NAMESPACE_KEY] self.provider_tls_verify = result[PROVIDER_TLS_VERIFY_KEY] if result[PROVIDER_CA_KEY]: # if we are in container translate path to path on host self.provider_ca = os.path.join(Utils.getRoot(), result[PROVIDER_CA_KEY].lstrip('/')) else: self.provider_ca = None
def stop(self): """ Undeploy application. Cascade the deletion of the resources managed other resource (e.g. ReplicationControllers created by a DeploymentConfig and Pods created by a ReplicationController). When using command line client this is done automatically by `oc` command. When using API calls we have to cascade deletion manually. """ logger.debug("Starting undeploy") delete_artifacts = [] for kind, objects in self.openshift_artifacts.iteritems(): # Add deployment configs to beginning of the list so they are deleted first. # Do deployment config first because if you do replication controller # before deployment config then the deployment config will re-spawn # the replication controller before the deployment config is deleted. if kind == "deploymentconfig": delete_artifacts = objects + delete_artifacts else: delete_artifacts = delete_artifacts + objects for artifact in delete_artifacts: kind = artifact["kind"].lower() namespace = self._get_namespace(artifact) # Get name from metadata so we know which object to delete. if "metadata" in artifact and \ "name" in artifact["metadata"]: name = artifact["metadata"]["name"] else: raise ProviderFailedException("Cannot undeploy. There is no" " name in artifacts metadata " "artifact=%s" % artifact) logger.info("Undeploying artifact name=%s kind=%s" % (name, kind)) # If this is a deployment config we need to delete all # replication controllers that were created by this. # Find the replication controller that was created by this deployment # config by querying for all replication controllers and filtering based # on automatically created label openshift.io/deployment-config.name if kind.lower() == "deploymentconfig": params = {"labelSelector": "openshift.io/deployment-config.name=%s" % name} url = self._get_url(namespace, "replicationcontroller", params=params) (status_code, return_data) = \ Utils.make_rest_request("get", url, verify=self.oc._requests_tls_verify()) if status_code != 200: raise ProviderFailedException("Cannot get Replication" "Controllers for Deployment" "Config %s (status code %s)" % (name, status_code)) # kind of returned data is ReplicationControllerList # https://docs.openshift.com/enterprise/3.1/rest_api/kubernetes_v1.html#v1-replicationcontrollerlist # we need modify items to get valid ReplicationController items = return_data["items"] for item in items: item["kind"] = "ReplicationController" item["apiVersion"] = return_data["apiVersion"] # add items to list of artifact to be deleted delete_artifacts.extend(items) url = self._get_url(namespace, kind, name) # Scale down replication controller to 0 replicas before deleting. # This should take care of all pods created by this replication # controller and we can safely delete it. if kind.lower() == "replicationcontroller": if self.dryrun: logger.info("DRY-RUN: SCALE %s down to 0", url) else: self.oc.scale(url, 0) if self.dryrun: logger.info("DRY-RUN: DELETE %s", url) else: self.oc.delete(url)