def init(self): self.namespace = DEFAULT_NAMESPACE self.default_name = DEFAULT_CONTAINER_NAME logger.debug("Given config: %s", self.config) if self.config.get("namespace"): self.namespace = self.config.get("namespace") logger.debug("Namespace: %s", self.namespace) if self.dryrun: logger.info("DRY-RUN: Did not check Docker version compatibility") else: cmd_check = ["docker", "version"] try: docker_version = subprocess.check_output(cmd_check).split("\n") except Exception as ex: raise ProviderFailedException(ex) client = "" server = "" for line in docker_version: if line.startswith("Client API version"): client = line.split(":")[1] if line.startswith("Server API version"): server = line.split(":")[1] if client > server: msg = ( "Docker version in app image (%s) is higher than the one " "on host (%s). Please update your host." % (client, server)) raise ProviderFailedException(msg)
def _wait_till_pod_runs(self, namespace, pod, timeout=300): """ Wait till pod runs, with a timeout. Args: namespace (str): Openshift namespace pod (str): Pod name timeout (int): Timeout in seconds. Raises: ProviderFailedException on timeout or when the pod goes to failed state. """ now = datetime.datetime.now() timeout_delta = datetime.timedelta(seconds=timeout) while datetime.datetime.now() - now < timeout_delta: status = self.oc.get_pod_status(namespace, pod) if status == 'running': break elif status == 'failed': raise ProviderFailedException( 'Unable to run pod for extracting content: ' '{namespace}/{pod}'.format(namespace=namespace, pod=pod)) time.sleep(1) if status != 'running': raise ProviderFailedException( 'Timed out to extract content from pod: ' '{namespace}/{pod}'.format(namespace=namespace, pod=pod))
def test_connection(self): """ Test connection to OpenShift server Raises: ProviderFailedException - Invalid SSL/TLS certificate """ logger.debug("Testing connection to OpenShift server") if self.provider_ca and not os.path.exists(self.provider_ca): raise ProviderFailedException("Unable to find CA path %s" % self.provider_ca) try: (status_code, return_data) = \ Utils.make_rest_request("get", self.openshift_api, verify=self._requests_tls_verify()) except SSLError as e: if self.provider_tls_verify: msg = "SSL/TLS ERROR: invalid certificate. " \ "Add certificate of correct Certificate Authority providing" \ " `%s` or you can disable SSL/TLS verification by `%s=False`" \ % (PROVIDER_CA_KEY, PROVIDER_TLS_VERIFY_KEY) raise ProviderFailedException(msg) else: # this shouldn't happen raise ProviderFailedException(e.message)
def _process_artifact_data(self, artifact, data): """ Process the data for an artifact Args: artifact (str): Artifact name data (dict): Artifact data """ # Check if kind exists if "kind" not in data.keys(): raise ProviderFailedException( "Error processing %s artifact. There is no kind" % artifact) # Change to lower case so it's easier to parse kind = data["kind"].lower() if kind not in self.oc_artifacts.keys(): self.oc_artifacts[kind] = [] # Fail if there is no metadata if 'metadata' not in data: raise ProviderFailedException( "Error processing %s artifact. There is no metadata object" % artifact) # Change to the namespace specified on init() data['metadata']['namespace'] = self.namespace if 'labels' not in data['metadata']: data['metadata']['labels'] = {'namespace': self.namespace} else: data['metadata']['labels']['namespace'] = self.namespace self.oc_artifacts[kind].append(data)
def init(self): self.cli = find_executable(self.cli_str) if self.container and not self.cli: host_path = [] for path in os.environ.get("PATH").split(":"): host_path.append("/host%s" % path) self.cli = find_executable(self.cli_str, path=":".join(host_path)) if not self.cli: # if run as non-root we need a symlink in the container os.symlink("/host/usr/bin/openshift", "/usr/bin/oc") self.cli = "/usr/bin/oc" if not self.cli or not os.access(self.cli, os.X_OK): raise ProviderFailedException("Command %s not found" % self.cli) else: logger.debug("Using %s to run OpenShift commands.", self.cli) if "openshiftconfig" in self.config: self.config_file = self.config["openshiftconfig"] if self.container: self.config_file = os.path.join("/host", self.config_file.lstrip("/")) else: logger.warning("Configuration option 'openshiftconfig' not found") if not self.config_file or not os.access(self.config_file, os.R_OK): raise ProviderFailedException( "Cannot access configuration file %s. Try adding " "'openshiftconfig = /path/to/your/.kube/config' in the " "[general] section of the answers.conf file." % self.config_file)
def _parse_kubeconf_data(self, kubecfg): """ Parse kubeconf data. Args: kubecfg (dict): Kubernetes config data Returns: dict of parsed values from config """ url = None token = None namespace = None tls_verify = True ca = None current_context = kubecfg["current-context"] logger.debug("current context: %s", current_context) context = None for co in kubecfg["contexts"]: if co["name"] == current_context: context = co if not context: raise ProviderFailedException() cluster = None for cl in kubecfg["clusters"]: if cl["name"] == context["context"]["cluster"]: cluster = cl user = None for usr in kubecfg["users"]: if usr["name"] == context["context"]["user"]: user = usr if not cluster or not user: raise ProviderFailedException() logger.debug("context: %s", context) logger.debug("cluster: %s", cluster) logger.debug("user: %s", user) url = cluster["cluster"]["server"] token = user["user"]["token"] if "namespace" in context["context"]: namespace = context["context"]["namespace"] if "insecure-skip-tls-verify" in cluster["cluster"]: tls_verify = not cluster["cluster"]["insecure-skip-tls-verify"] elif "certificate-authority" in cluster["cluster"]: ca = cluster["cluster"]["certificate-authority"] return {PROVIDER_API_KEY: url, ACCESS_TOKEN_KEY: token, NAMESPACE_KEY: namespace, PROVIDER_TLS_VERIFY_KEY: tls_verify, PROVIDER_CA_KEY: ca}
def run(self): logger.info("Deploying to provider: Docker") for container in self._get_containers(): if re.match("%s_+%s+_+[a-zA-Z0-9]{12}" % (self.namespace, self.image), container): raise ProviderFailedException("Container with name %s-%s already deployed in Docker" % (self.namespace, self.image)) for artifact in self.artifacts: artifact_path = os.path.join(self.path, artifact) label_run = None with open(artifact_path, "r") as fp: label_run = fp.read().strip() # if docker-run provided as multiline command label_run = ' '.join(label_run.split('\\\n')) run_args = label_run.split() # If --name is provided, do not re-name due to potential linking of containers. Warn user instead. # Else use namespace provided within answers.conf if '--name' in run_args: logger.warning("WARNING: Using --name provided within artifact file.") else: run_args.insert(run_args.index('run') + 1, "--name=%s_%s_%s" % (self.namespace, self.image, Utils.getUniqueUUID())) cmd = run_args if self.dryrun: logger.info("DRY-RUN: %s", " ".join(cmd)) else: try: subprocess.check_output(cmd) except subprocess.CalledProcessError as e: raise DockerException("%s. \n%s" % (cmd, e.output))
def init(self): self.namespace = "default" self.k8s_manifests = [] logger.debug("Given config: %s", self.config) if self.config.get("namespace"): self.namespace = self.config.get("namespace") logger.info("Using namespace %s", self.namespace) if self.container: self.kubectl = self._find_kubectl(Utils.getRoot()) kube_conf_path = "/etc/kubernetes" host_kube_conf_path = Utils.get_real_abspath(kube_conf_path) if not os.path.exists(kube_conf_path) and os.path.exists(host_kube_conf_path): if self.dryrun: logger.info("DRY-RUN: link %s from %s" % (kube_conf_path, host_kube_conf_path)) else: os.symlink(host_kube_conf_path, kube_conf_path) else: self.kubectl = self._find_kubectl() if not self.dryrun: if not os.access(self.kubectl, os.X_OK): raise ProviderFailedException("Command: " + self.kubectl + " not found") # Check if Kubernetes config file is accessible, but only # if one was provided by the user; config file is optional. if self.config_file: self.checkConfigFile()
def init(self): self.namespace = "default" self.k8s_manifests = [] logger.debug("Given config: %s", self.config) if self.config.get("namespace"): self.namespace = self.config.get("namespace") logger.info("Using namespace %s", self.namespace) if self.container: self.kubectl = self._find_kubectl(Utils.getRoot()) kube_conf_path = "/etc/kubernetes" if not os.path.exists(kube_conf_path): if self.dryrun: logger.info("DRY-RUN: link %s from %s%s" % (kube_conf_path, HOST_DIR, kube_conf_path)) else: os.symlink( os.path.join(Utils.getRoot(), kube_conf_path.lstrip("/")), kube_conf_path) else: self.kubectl = self._find_kubectl() if not self.dryrun: if not os.access(self.kubectl, os.X_OK): raise ProviderFailedException("Command: " + self.kubectl + " not found") # Check if Kubernetes config file is accessible self.checkConfigFile()
def _find_kubectl(self, prefix=""): """Determine the path to the kubectl program on the host. 1) Check the config for a provider_cli in the general section remember to add /host prefix 2) Search /usr/bin:/usr/local/bin Use the first valid value found """ if self.dryrun: # Testing env does not have kubectl in it return "/usr/bin/kubectl" test_paths = ['/usr/bin/kubectl', '/usr/local/bin/kubectl'] if self.config.get("provider_cli"): logger.info("caller gave provider_cli: " + self.config.get("provider_cli")) test_paths.insert(0, self.config.get("provider_cli")) for path in test_paths: test_path = prefix + path logger.info("trying kubectl at " + test_path) kubectl = test_path if os.access(kubectl, os.X_OK): logger.info("found kubectl at " + test_path) return kubectl raise ProviderFailedException("No kubectl found in %s" % ":".join(test_paths))
def init(self): self.namespace = "default" self.kube_order = OrderedDict([("service", None), ("rc", None), ("pod", None)]) # FIXME logger.debug("Given config: %s", self.config) if self.config.get("namespace"): self.namespace = self.config.get("namespace") logger.info("Using namespace %s", self.namespace) if self.container: self.kubectl = self._findKubectl("/host") if not os.path.exists("/etc/kubernetes"): if self.dryrun: logger.info( "DRY-RUN: link /etc/kubernetes from /host/etc/kubernetes" ) else: os.symlink("/host/etc/kubernetes", "/etc/kubernetes") else: self.kubectl = self._findKubectl() if not self.dryrun: if not os.access(self.kubectl, os.X_OK): raise ProviderFailedException("Command: " + self.kubectl + " not found")
def scale(self, url, replicas): """ Scale ReplicationControllers or DeploymentConfig Args: url (str): full url for artifact replicas (int): number of replicas scale to """ patch = [{ "op": "replace", "path": "/spec/replicas", "value": replicas }] (status_code, return_data) = \ Utils.make_rest_request("patch", url, data=patch, verify=self._requests_tls_verify()) if status_code == 200: logger.info("Successfully scaled to %s replicas", replicas) else: msg = "%s %s" % (status_code, return_data) logger.error(msg) raise ProviderFailedException(msg)
def get_pod_status(self, namespace, pod): """ Get pod status. Args: namespace (str): Openshift namespace pod (str): Pod name Returns: Status of pod (str) Raises: ProviderFailedException when unable to fetch Pod status. """ args = { 'namespace': namespace, 'pod': pod, 'access_token': self.access_token } url = urljoin( self.kubernetes_api, 'namespaces/{namespace}/pods/{pod}?' 'access_token={access_token}'.format(**args)) (status_code, return_data) = \ Utils.make_rest_request("get", url, verify=self._requests_tls_verify()) if status_code != 200: raise ProviderFailedException( 'Could not fetch status for pod: {namespace}/{pod}'.format( namespace=namespace, pod=pod)) return return_data['status']['phase'].lower()
def deploy(self): logger.info("Deploying to provider: Docker") for container in self._get_containers(): if re.match( "%s_+%s+_+[a-zA-Z0-9]{12}" % (self.default_name, self.namespace), container): raise ProviderFailedException( "Namespace with name %s already deployed in Docker" % self.namespace) for artifact in self.artifacts: artifact_path = os.path.join(self.path, artifact) label_run = None with open(artifact_path, "r") as fp: label_run = fp.read().strip() run_args = label_run.split() # If --name is provided, do not re-name due to potential linking of containers. Warn user instead. # Else use namespace provided within answers.conf if '--name' in run_args: logger.info( "WARNING: Using --name provided within artifact file.") else: run_args.insert( run_args.index('run') + 1, "--name=%s_%s_%s" % (self.default_name, self.namespace, Utils.getUniqueUUID())) cmd = run_args if self.dryrun: logger.info("DRY-RUN: %s", " ".join(cmd)) else: subprocess.check_call(cmd)
def prepareOrder(self): for artifact in self.artifacts: data = None with open(os.path.join(self.path, artifact), "r") as fp: logger.debug(os.path.join(self.path, artifact)) data = anymarkup.parse(fp) if "kind" in data: self.kube_order[data["kind"].lower()] = artifact else: raise ProviderFailedException("Malformed kube file")
def init(self): self.namespace = DEFAULT_NAMESPACE self.default_name = DEFAULT_CONTAINER_NAME logger.debug("Given config: %s", self.config) if self.config.get("namespace"): self.namespace = self.config.get("namespace") logger.debug("Namespace: %s", self.namespace) if "image" in self.config: self.image = Utils.sanitizeName(self.config.get("image")) else: self.image = Utils.getUniqueUUID() logger.warning( "The artifact name has not been provided within Nulecule, using a UUID instead" ) logger.debug( "No image name found for artifact, using UUID %s in container name" % self.image) if self.dryrun: logger.info("DRY-RUN: Did not check Docker version compatibility") else: cmd_check = ["docker", "version"] try: docker_version = subprocess.check_output(cmd_check).split("\n") except Exception as ex: raise ProviderFailedException(ex) client = "" server = "" for line in docker_version: if line.startswith("Client API version"): client = line.split(":")[1] if line.startswith("Server API version"): server = line.split(":")[1] if client > server: msg = ( "Docker version in app image (%s) is higher than the one " "on host (%s). Please update your host." % (client, server)) raise ProviderFailedException(msg)
def _resource_identity(self, path): """Finds the Kubernetes resource name / identity from resource manifest and raises if manifest is not supported. :arg path: Absolute path to Kubernetes resource manifest :return: str -- Resource name / identity :raises: ProviderFailedException """ data = anymarkup.parse_file(path) if data["apiVersion"] == "v1": return data["metadata"]["name"] elif data["apiVersion"] in ["v1beta3", "v1beta2", "v1beta1"]: msg = ("%s is not supported API version, update Kubernetes " "artifacts to v1 API version. Error in processing " "%s manifest." % (data["apiVersion"], path)) raise ProviderFailedException(msg) else: raise ProviderFailedException("Malformed kube file: %s" % path)
def init(self): cmd_check = ["docker", "version"] try: docker_version = subprocess.check_output(cmd_check).split("\n") except Exception as ex: raise ProviderFailedException(ex) client = "" server = "" for line in docker_version: if line.startswith("Client API version"): client = line.split(":")[1] if line.startswith("Server API version"): server = line.split(":")[1] if client > server: msg = ("Docker version in app image (%s) is higher than the one " "on host (%s). Please update your host." % (client, server)) raise ProviderFailedException(msg)
def init(self): self.cli = find_executable(self.cli) if self.container and not self.cli: host_path = [] for path in os.environ.get("PATH").split(":"): host_path.append("/host%s" % path) self.cli = find_executable("osc", path=":".join(host_path)) if not self.cli or not os.access(self.cli, os.X_OK): raise ProviderFailedException("Command %s not found" % self.cli) else: logger.debug("Using %s to run OpenShift commands.", self.cli) if "openshiftconfig" in self.config: self.config_file = self.config["openshiftconfig"] else: logger.warning("Configuration option 'openshiftconfig' not found") if not self.config_file or not os.access(self.config_file, os.R_OK): raise ProviderFailedException( "Cannot access configuration file %s" % self.config_file)
def parse_kubeconf_data(kubecfg): """ Parse kubeconf data. Args: kubecfg (dict): Kubernetes config data Returns: dict of parsed values from config """ url = None token = None namespace = None tls_verify = True ca = None current_context = kubecfg["current-context"] logger.debug("current context: %s", current_context) try: context = filter(lambda co: co["name"] == current_context, kubecfg["contexts"])[0] logger.debug("context: %s", context) cluster = filter( lambda cl: cl["name"] == context["context"]["cluster"], kubecfg["clusters"])[0] logger.debug("cluster: %s", cluster) user = filter( lambda usr: usr["name"] == context["context"]["user"], kubecfg["users"])[0] logger.debug("user: %s", user) except IndexError: raise ProviderFailedException() url = cluster["cluster"]["server"] token = user["user"]["token"] if "namespace" in context["context"]: namespace = context["context"]["namespace"] if "insecure-skip-tls-verify" in cluster["cluster"]: tls_verify = not cluster["cluster"]["insecure-skip-tls-verify"] elif "certificate-authority" in cluster["cluster"]: ca = cluster["cluster"]["certificate-authority"] return { PROVIDER_API_KEY: url, PROVIDER_AUTH_KEY: token, NAMESPACE_KEY: namespace, PROVIDER_TLS_VERIFY_KEY: tls_verify, PROVIDER_CA_KEY: ca }
def _process_artifacts(self): """ Parse and validate Marathon artifacts Parsed artifacts are saved to self.marathon_artifacts """ for artifact in self.artifacts: logger.debug("Procesesing artifact: %s", artifact) data = None with open(os.path.join(self.path, artifact), "r") as fp: try: # env variables in marathon artifacts have to be string:string # force_types=None respects types from json file data = anymarkup.parse(fp, force_types=None) logger.debug("Parsed artifact %s", data) # every marathon app has to have id. 'id' key is also used for showing messages if "id" not in data.keys(): msg = "Error processing %s artifact. There is no id" % artifact cockpit_logger.error(msg) raise ProviderFailedException(msg) except anymarkup.AnyMarkupError, e: msg = "Error processing artifact - %s" % e cockpit_logger.error(msg) raise ProviderFailedException(msg) self.marathon_artifacts.append(data)
def process_template(self, url, template): (status_code, return_data) = \ Utils.make_rest_request("post", url, verify=self._requests_tls_verify(), data=template) if status_code == 201: logger.info("template processed %s", template['metadata']['name']) logger.debug("processed template %s", return_data) return return_data['objects'] else: msg = "%s %s" % (status_code, return_data) logger.error(msg) raise ProviderFailedException(msg)
def deploy(self, url, artifact): (status_code, return_data) = \ Utils.make_rest_request("post", url, verify=self._requests_tls_verify(), data=artifact) if status_code == 201: logger.info("Object %s successfully deployed.", artifact['metadata']['name']) else: msg = "%s %s" % (status_code, return_data) logger.error(msg) # TODO: remove running components (issue: #428) raise ProviderFailedException(msg)
def _process_artifact_data(self, artifact, data): """ Process the data for an artifact Args: artifact (str): Artifact name data (dict): Artifact data """ # kind has to be specified in artifact if "kind" not in data.keys(): raise ProviderFailedException( "Error processing %s artifact. There is no kind" % artifact) kind = data["kind"].lower() resource = self._kind_to_resource(kind) # check if resource is supported by apis if resource not in self.oapi_resources \ and resource not in self.kapi_resources: raise ProviderFailedException( "Unsupported kind %s in artifact %s" % (kind, artifact)) # process templates if kind == "template": processed_objects = self._process_template(data) # add all processed object to artifacts dict for obj in processed_objects: obj_kind = obj["kind"].lower() if obj_kind not in self.openshift_artifacts.keys(): self.openshift_artifacts[obj_kind] = [] self.openshift_artifacts[obj_kind].append(obj) return # add parsed artifact to dict if kind not in self.openshift_artifacts.keys(): self.openshift_artifacts[kind] = [] self.openshift_artifacts[kind].append(data)
def _check_required_params(self, exception=False): ''' This checks to see if required parameters associated to the Kubernetes provider are passed. Only PROVIDER_API_KEY is *required*. Token may be blank. ''' paramdict = self._build_param_dict() logger.debug("List of parameters passed: %s" % paramdict) # Check that the required parameters are passed. If not, error out. for k in [PROVIDER_API_KEY]: if paramdict[k] is None: if exception: msg = "You need to set %s in %s or pass it as a CLI param" % (k, ANSWERS_FILE) raise ProviderFailedException(msg) else: return False return True
def delete(self, url): """ Delete object on given url Args: url (str): full url for artifact Raises: ProviderFailedException: error when calling remote api """ (status_code, return_data) = \ Utils.make_rest_request("delete", url, verify=self._requests_tls_verify()) if status_code == 200: logger.info("Successfully deleted.") else: msg = "%s %s" % (status_code, return_data) logger.error(msg) raise ProviderFailedException(msg)
def get_kapi_resources(self): """ Get kubernetes API resources """ # get list of supported resources for each api (status_code, return_data) = \ Utils.make_rest_request("get", self.kubernetes_api, verify=self._requests_tls_verify()) if status_code == 200: kapi_resources = return_data["resources"] else: raise ProviderFailedException("Cannot get Kubernetes resource list") # convert resources list of dicts to list of names kapi_resources = [res['name'] for res in kapi_resources] logger.debug("Kubernetes resources %s", kapi_resources) return kapi_resources
def process_k8s_artifacts(self): """Processes Kubernetes manifests files and checks if manifest under process is valid. """ for artifact in self.artifacts: data = None with open(os.path.join(self.path, artifact), "r") as fp: logger.debug(os.path.join(self.path, artifact)) try: data = anymarkup.parse(fp) except Exception: msg = "Error processing %s artifcats, Error:" % os.path.join( self.path, artifact) cockpit_logger.error(msg) raise if "kind" in data: self.k8s_manifests.append((data["kind"].lower(), artifact)) else: apath = os.path.join(self.path, artifact) raise ProviderFailedException("Malformed kube file: %s" % apath)
def get_oapi_resources(self): """ Get Openshift API resources """ # get list of supported resources for each api (status_code, return_data) = \ Utils.make_rest_request("get", self.openshift_api, verify=self._requests_tls_verify()) if status_code == 200: oapi_resources = return_data["resources"] else: raise ProviderFailedException("Cannot get OpenShift resource list") # convert resources list of dicts to list of names oapi_resources = [res['name'] for res in oapi_resources] logger.debug("Openshift resources %s", oapi_resources) return oapi_resources
def _check_namespaces(self): ''' This function checks to see whether or not the namespaces created in the cluster match the namespace that is associated and/or provided in the deployed application ''' # Get the namespaces and output the currently used ones namespace_list = self.api.namespaces() logger.debug("There are currently %s namespaces in the cluster." % str(len(namespace_list))) # Create a namespace list namespaces = [] for ns in namespace_list: namespaces.append(ns["metadata"]["name"]) # Output the namespaces and check to see if the one provided exists logger.debug("Namespaces: %s" % namespaces) if self.namespace not in namespaces: msg = "%s namespace does not exist. Please create the namespace and try again." % self.namespace raise ProviderFailedException(msg)