def retrieve_test_result(name, namespace): count = 0 try: openshift_config.load_kube_config() api = kubernetes_client.CoreV1Api() except Exception as e: print("Failed to get api client: {}".format(e)) while True: try: count += 1 api_response = kubernetes_stream( api.connect_get_namespaced_pod_exec, name, namespace, command="/usr/bin/test-retrieval", stderr=True, stdin=False, stdout=True, tty=False) if "test results are not available" not in api_response: return api_response sleep(WATCH_POD_SLEEP) except ApiException as e: if count >= 50: return None pod_phase = api.read_namespaced_pod(name, namespace).status.phase if pod_phase == 'Succeeded' or pod_phase == 'Failed': print("Pod phase {} without returning test results".format(pod_phase)) return None sleep(WATCH_POD_SLEEP) except Exception as e: print("exception: %s" % e) return None
def delete_old_images(image_name): # Let's ignore the registry prefix for now because sometimes our tag doesn't match the registry registry, image_name = image_name.split('/', 1) try: openshift_config.load_kube_config() oapi = openshift_client.OapiApi() image_list = oapi.list_image(_preload_content=False) image_list = json.loads(image_list.data) for image in image_list['items']: image_fqn, image_sha = image['dockerImageReference'].split("@") if image_name in image_fqn: print("Found image: %s" % image_fqn) if registry not in image_fqn: # This warning will only get displayed if a user has used --registry-route # This is because the route name gets collapsed into the service hostname # when pushed to the registry. print("Warning: Tagged image registry prefix doesn't match. Deleting anyway. Given: %s; Found: %s" % (registry, image_fqn.split('/')[0])) oapi.delete_image(name=image_sha, body={}) print("Successfully deleted %s" % image_sha) except Exception as e: print("Exception deleting old images: %s" % e) print("Not erroring out, this may cause duplicate images in the registry. Try: `oc get images`.") return
def create_role_binding(): try: openshift_config.load_kube_config() api = openshift_client.OapiApi() role_binding = { 'apiVersion': 'v1', 'kind': 'RoleBinding', 'metadata': { 'name': 'service-account-1', 'namespace': 'default', }, 'subjects': [{ 'kind': 'ServiceAccount', 'name': 'service-account-1', 'namespace': 'default', }], 'roleRef': { 'name': 'cluster-admin', }, } api.create_namespaced_role_binding("default", role_binding) except Exception: api = openshift_client.OapiApi() # HACK: this is printing an error but is still actually creating the # role binding. # print("failed -%s" % e) print("Created Role Binding")
def get_asb_route(): asb_route = None try: openshift_config.load_kube_config() oapi = openshift_client.OapiApi() route_list = oapi.list_namespaced_route('ansible-service-broker') if route_list.items == []: print("Didn't find OpenShift Ansible Broker route in namespace: ansible-service-broker.\ Trying openshift-ansible-service-broker") route_list = oapi.list_namespaced_route('openshift-ansible-service-broker') if route_list.items == []: print("Still failed to find a route to OpenShift Ansible Broker.") return None for route in route_list.items: if 'asb' in route.metadata.name and 'etcd' not in route.metadata.name: asb_route = route.spec.host except Exception: asb_route = None return asb_route url = asb_route + "/ansible-service-broker" if url.find("http") < 0: url = "https://" + url return url
def broker_request(broker, service_route, method, **kwargs): if broker is None: broker = get_asb_route() if broker is None: raise Exception("Could not find route to ansible-service-broker. " "Use --broker or log into the cluster using \"oc login\"") url = broker + service_route try: openshift_config.load_kube_config() headers = {} if kwargs['basic_auth_username'] is not None and kwargs['basic_auth_password'] is not None: headers = {'Authorization': "Basic " + base64.b64encode("{0}:{1}".format(kwargs['basic_auth_username'], kwargs['basic_auth_password'])) } else: token = openshift_client.configuration.api_key.get("authorization", "") headers = {'Authorization': token} response = requests.request(method, url, verify=kwargs["verify"], headers=headers, data=kwargs.get("data")) except Exception as e: print("ERROR: Failed broker request (%s) %s" % (method, url)) raise e return response
def create_image_pod(image_name): try: openshift_config.load_kube_config() api = kubernetes_client.CoreV1Api() pod_manifest = { 'apiVersion': 'v1', 'kind': 'Pod', 'metadata': { 'name': "test", }, 'spec': { 'containers': [{ 'image': image_name, 'imagePullPolicy': 'IfNotPresent', 'name': 'test', 'command': ['entrypoint.sh', 'test'] }], 'restartPolicy': 'Never', 'serviceAccountName': 'service-account-1', } } create_service_account() create_role_binding() api.create_namespaced_pod("default", pod_manifest) print("Created Pod") except Exception as e: print("failed - %s" % e)
def push_apb(registry, tag): try: client = create_docker_client() openshift_config.load_kube_config() api_key = openshift_client.Configuration().get_api_key_with_prefix( 'authorization') if api_key is None: raise Exception( "No api key found in kubeconfig. NOTE: system:admin " + "*cannot* be used with apb, since it does not have a token.") token = api_key.split(" ")[1] username = "******" if is_minishift() else "unused" client.login(username=username, password=token, registry=registry, reauth=True) delete_old_images(tag) print("Pushing the image, this could take a minute...") client.images.push(tag) print("Successfully pushed image: " + tag) except docker.errors.DockerException: print("Error accessing the docker API. Is the daemon running?") raise except docker.errors.APIError: print("Failed to login to the docker API.") raise
def get_openshift_client(self, conf_filepath=None): """ Method to get a OpenShift client connected to remote or local OpenShift. """ config.load_kube_config() self.openshift_client = o_client.OapiApi() self.kube_client = client.CoreV1Api() self.kube_v1_batch_client = client.BatchV1Api()
def delete_project(project): print("Deleting project {}".format(project)) try: openshift_config.load_kube_config() api = openshift_client.OapiApi() api.delete_project(project) print("Project deleted") except ApiException as e: print("Delete project failure: {}".format(e)) raise e
def relist_service_broker(kwargs): try: openshift_config.load_kube_config() token = openshift_client.configuration.api_key['authorization'] cluster_host = openshift_client.configuration.host broker_name = kwargs['broker_name'] headers = {} if kwargs['basic_auth_username'] is not None and kwargs['basic_auth_password'] is not None: headers = {'Authorization': "Basic " + base64.b64encode("{0}:{1}".format(kwargs['basic_auth_username'], kwargs['basic_auth_password'])) } else: headers = {'Authorization': token} response = requests.request( "get", broker_resource_url(cluster_host, broker_name), verify=kwargs['verify'], headers=headers) if response.status_code != 200: errMsg = "Received non-200 status code while retrieving broker: {}\n".format(broker_name) + \ "Response body:\n" + \ str(response.text) raise Exception(errMsg) spec = response.json().get('spec', None) if spec is None: errMsg = "Spec not found in broker reponse. Response body: \n{}".format(response.text) raise Exception(errMsg) relist_requests = spec.get('relistRequests', None) if relist_requests is None: errMsg = "relistRequests not found within the spec of broker: {}\n".format(broker_name) + \ "Are you sure you are using a ServiceCatalog of >= v0.0.21?" raise Exception(errMsg) inc_relist_requests = relist_requests + 1 headers['Content-Type'] = 'application/strategic-merge-patch+json' response = requests.request( "patch", broker_resource_url(cluster_host, broker_name), json={'spec': {'relistRequests': inc_relist_requests}}, verify=kwargs['verify'], headers=headers) if response.status_code != 200: errMsg = "Received non-200 status code while patching relistRequests of broker: {}\n".format( broker_name) + \ "Response body:\n{}".format(str(response.text)) raise Exception(errMsg) print("Successfully relisted the Service Catalog") except Exception as e: print("Relist failure: {}".format(e))
def clean_up_image_run(): try: openshift_config.load_kube_config() api = kubernetes_client.CoreV1Api() oapi = openshift_client.OapiApi() body = kubernetes_client.V1DeleteOptions() api.delete_namespaced_service_account("service-account-1", "default", body) api.delete_namespaced_pod("test", "default", body) oapi.delete_namespaced_role_binding("service-account-1", "default", body) except Exception as e: print("unable to clean up image - %s" % e)
def _get_api_client(self): try: config.load_kube_config() except IOError as e: if e.errno != 2: raise else: print("could not find existing kube config") traceback.print_exc() oapi = client.OapiApi() return oapi
def cmdrun_setup(**kwargs): try: create_docker_client() except Exception as e: print("Error! Failed to connect to Docker client. Please ensure it is running. Exception: %s" % e) exit(1) try: openshift_config.load_kube_config() # base64.b64decode(username = kubernetes_client.configuration.get_basic_auth_token().split(' ')[1])) # print(kubernetes_client.configuration.password) oapi = openshift_client.OapiApi() projlist = oapi.list_project() except Exception as e: print("\nError! Failed to list namespaces on OpenShift cluster. Please ensure OCP is running.") print("Exception: %s" % e) exit(1) try: helper = OpenShiftObjectHelper(api_version='v1', kind='user') user_body = {'metadata': {'name': 'apb-developer'}} helper.create_object(body=user_body) except Exception as e: print("\nError! Failed to create APB developer user. Exception: %s" % e) try: crb = create_cluster_role_binding('apb-development', 'apb-developer') print(crb) except Exception as e: print("\nError! %s" % e) broker_installed = False svccat_installed = False proj_default_access = False for project in projlist.items: name = project.metadata.name if name == "default": proj_default_access = True elif "ansible-service-broker" in name: broker_installed = True elif "service-catalog" in name: svccat_installed = True if broker_installed is False: print("Error! Could not find OpenShift Ansible Broker namespace. Please ensure that the broker is\ installed and that the current logged in user has access.") exit(1) if svccat_installed is False: print("Error! Could not find OpenShift Service Catalog namespace. Please ensure that the Service\ Catalog is installed and that the current logged in user has access.") if proj_default_access is False: print("Error! Could not find the Default namespace. Please ensure that the current logged in user has access.")
def init_openshift_client(self): """ Method to get a OpenShift client connected to remote or local OpenShift """ kubecfg_path = os.environ.get('KUBECFG_PATH') if kubecfg_path is None: config.load_kube_config() else: config.load_kube_config(config_file=kubecfg_path) self.openshift_client = o_client.OapiApi() self.kube_client = k_client.CoreV1Api() self.kube_v1_batch_client = k_client.BatchV1Api()
def get_asb_route(): asb_route = None try: openshift_config.load_kube_config() oapi = openshift_client.OapiApi() route_list = oapi.list_namespaced_route('ansible-service-broker') for route in route_list.items: if route.metadata.name.find('asb-') >= 0: asb_route = route.spec.host except: asb_route = None return asb_route
def get_registry_service_ip(): ip = None try: openshift_config.load_kube_config() api = kubernetes_client.CoreV1Api() service = api.read_namespaced_service(namespace="default", name="docker-registry") ip = service.spec.cluster_ip + ":" + str(service.spec.ports[0].port) print("Found registry IP at: " + ip) except ApiException as e: print("Exception occurred trying to find docker-registry service: %s", e) return None return ip
def delete_controller_manager_pod(): pod_name = None try: openshift_config.load_kube_config() api = kubernetes_client.CoreV1Api() pod_list = api.list_namespaced_pod('service-catalog') for pod in pod_list.items: if pod.metadata.name.find('controller-manager-') >= 0: pod_name = pod.metadata.name except Exception as e: pod_name = None if pod_name: api.delete_namespaced_pod(pod_name, 'service-catalog', kubernetes_client.V1DeleteOptions())
def cmdrun_push(**kwargs): project = kwargs['base_path'] spec = get_spec(project, 'string') dict_spec = get_spec(project, 'dict') blob = base64.b64encode(spec) broker = kwargs["broker"] if broker is None: broker = get_asb_route() data_spec = {'apbSpec': blob} print(spec) if kwargs['openshift']: # Assume we are using internal registry, no need to push to broker registry = get_registry_service_ip() tag = registry + "/" + kwargs['namespace'] + "/" + dict_spec['name'] print("Building image with the tag: " + tag) try: client = docker.DockerClient(base_url='unix://var/run/docker.sock', version='auto') client.images.build(path=project, tag=tag, dockerfile=kwargs['dockerfile']) openshift_config.load_kube_config() token = openshift_client.configuration.api_key['authorization'].split(" ")[1] client.login(username="******", password=token, registry=registry, reauth=True) client.images.push(tag) print("Successfully pushed image: " + tag) bootstrap(broker, kwargs.get("basic_auth_username"), kwargs.get("basic_auth_password"), kwargs["verify"]) except docker.errors.DockerException: print("Error accessing the docker API. Is the daemon running?") raise except docker.errors.APIError: print("Failed to login to the docker API.") raise else: response = broker_request(kwargs["broker"], "/apb/spec", "post", data=data_spec, verify=kwargs["verify"], basic_auth_username=kwargs.get("basic_auth_username"), basic_auth_password=kwargs.get("basic_auth_password")) if response.status_code != 200: print("Error: Attempt to add APB to the Broker returned status: %d" % response.status_code) print("Unable to add APB to Ansible Service Broker.") exit(1) print("Successfully added APB to Ansible Service Broker") if not kwargs['no_relist']: relist_service_broker(kwargs)
def create_service_account(): try: openshift_config.load_kube_config() api = kubernetes_client.CoreV1Api() service_account = { 'apiVersion': 'v1', 'kind': 'ServiceAccount', 'metadata': { 'name': 'service-account-1', 'namespace': 'default', }, } api.create_namespaced_service_account("default", service_account) print("Created Serice Account") except Exception as e: print("failed - %s" % e)
def retrieve_test_result(): cont = True count = 0 while cont: try: count += 1 openshift_config.load_kube_config() api = kubernetes_client.CoreV1Api() api_response = api.connect_post_namespaced_pod_exec("test", "default", command="/usr/bin/test-retrieval", tty=False) if "non-zero exit code" not in api_response: return api_response except ApiException as e: if count >= 50: cont = False except Exception as e: print("execption: %s" % e) cont = False
def get_asb_route(): asb_route = None try: openshift_config.load_kube_config() oapi = openshift_client.OapiApi() route_list = oapi.list_namespaced_route('ansible-service-broker') for route in route_list.items: if route.metadata.name.find('asb-') >= 0: asb_route = route.spec.host except Exception: asb_route = None return asb_route url = asb_route + "/ansible-service-broker" if url.find("http") < 0: url = "https://" + url return url
def get_registry_service_ip(namespace, svc_name): ip = None try: openshift_config.load_kube_config() api = kubernetes_client.CoreV1Api() service = api.read_namespaced_service(namespace=namespace, name=svc_name) if service is None: print("Couldn't find docker-registry service in namespace default. Erroring.") return None if service.spec.ports == []: print("Service spec appears invalid. Erroring.") return None ip = service.spec.cluster_ip + ":" + str(service.spec.ports[0].port) print("Found registry IP at: " + ip) except ApiException as e: print("Exception occurred trying to find %s service in namespace %s: %s" % (svc_name, namespace, e)) return None return ip
def push_apb(registry, tag): try: client = create_docker_client() openshift_config.load_kube_config() token = openshift_client.Configuration().get_api_key_with_prefix( 'authorization').split(" ")[1] username = "******" if is_minishift() else "unused" client.login(username=username, password=token, registry=registry, reauth=True) print("Pushing the image, this could take a minute...") client.images.push(tag) print("Successfully pushed image: " + tag) except docker.errors.DockerException: print("Error accessing the docker API. Is the daemon running?") raise except docker.errors.APIError: print("Failed to login to the docker API.") raise
def create_project(project): print("Creating project {}".format(project)) try: openshift_config.load_kube_config() api = openshift_client.OapiApi() api.create_project_request({ 'apiVersion': 'v1', 'kind': 'ProjectRequest', 'metadata': { 'name': project } }) print("Created project") # TODO: Evaluate the project request to get the actual project name return project except ApiException as e: if e.status == 409: print("Project {} already exists".format(project)) return project else: raise e
def reset(self): try: config.load_kube_config(persist_config=True) except FileNotFoundError: pass
import os.path import re from bitmath import parse_string_unsafe, GiB, MiB from openshift import client,config from kubernetes import config, client as kubeclient from kubernetes.client.rest import ApiException from pprint import pprint from flask import Flask, render_template application = Flask(__name__) SERVICE_TOKEN_FILENAME = "/var/run/secrets/kubernetes.io/serviceaccount/token" if os.path.isfile(SERVICE_TOKEN_FILENAME): config.load_incluster_config() else: config.load_kube_config() def to_bytes(value): return parse_string_unsafe(value).to_Byte().bytes def to_mib(value): return MiB(bytes=to_bytes(value)) def to_gib(value): b = parse_string_unsafe(value).to_Byte().bytes return GiB(bytes=b) def to_millicores(value): try: unit = re.split('([a-zA-Z]+)',value)
def main(): global start_time start_time = time.time() parser = argparse.ArgumentParser( description='Nagios check for OpenShift deployments.') parser.add_argument( '--use_pvc', help='Include adding a PersistentVolumeClaim in the test.', action='store_true', dest='use_pvc', default=False) parser.add_argument( '--pvc_delay', help='Time in seconds to wait after PVC has been created.', dest='pvc_delay', default=5) parser.add_argument('--timeout', help='How long to wait for the test to finish.', dest='timeout', default=300) parser.add_argument('--storage_class', help='What storage class to use if a PVC is created.', dest='storage_class', default=None) args = parser.parse_args() if args.use_pvc: string_to_grep = CHECK_TEXT else: string_to_grep = 'Welcome to nginx!' timeout = int(args.timeout) pvc_delay = int(args.pvc_delay) try: oso_config.load_kube_config() kube_config.load_kube_config() oso_api = oso_client.OapiApi() kube_api = kube_client.CoreV1Api() rnd = random.randint(0, 999) namespace = 'nrpe-check-{}-{}'.format( datetime.datetime.now().strftime('%y-%m-%d-%H-%M-%S'), rnd) except: print('Unexpected error:', sys.exc_info()[0]) exit_with_stats(NAGIOS_STATE_CRITICAL) try: route_url = create_nginx(oso_api, kube_api, namespace, args.use_pvc, pvc_delay, args.storage_class) poll_nginx(route_url, string_to_grep, timeout) except kube_client.rest.ApiException as e: print(e) exit_with_stats(NAGIOS_STATE_CRITICAL) except PollTimeoutException as e: print(e) exit_with_stats(NAGIOS_STATE_CRITICAL) except requests.exceptions.ConnectionError as e: print(e) exit_with_stats(NAGIOS_STATE_CRITICAL) except: print('Unexpected error: ', sys.exc_info()[0]) exit_with_stats(NAGIOS_STATE_CRITICAL) finally: # Cleanup is more reliable if we sleep few seconds here (with openshift 3.11) # Increased sleep time from 5 > 15 > 25 for avoiding nrpe namespace stuck time.sleep(25) cleanup(oso_api, namespace) exit_with_stats(NAGIOS_STATE_OK)
def cmdrun_push(**kwargs): project = kwargs['base_path'] spec = get_spec(project, 'string') dict_spec = get_spec(project, 'dict') blob = base64.b64encode(spec) data_spec = {'apbSpec': blob} broker = kwargs["broker"] if broker is None: broker = get_asb_route() print(spec) if kwargs['broker_push']: response = broker_request( broker, "/v2/apb", "post", data=data_spec, verify=kwargs["verify"], basic_auth_username=kwargs.get("basic_auth_username"), basic_auth_password=kwargs.get("basic_auth_password")) if response.status_code != 200: print( "Error: Attempt to add APB to the Broker returned status: %d" % response.status_code) print("Unable to add APB to Ansible Service Broker.") exit(1) print("Successfully added APB to Ansible Service Broker") return namespace = kwargs['reg_namespace'] service = kwargs['reg_svc_name'] registry_route = kwargs['reg_route'] if registry_route: registry = registry_route elif is_minishift(): registry = get_minishift_registry() else: registry = get_registry_service_ip(namespace, service) if registry is None: print("Failed to find registry service IP address.") raise Exception("Unable to get registry IP from namespace %s" % namespace) tag = registry + "/" + kwargs['namespace'] + "/" + dict_spec['name'] print("Building image with the tag: " + tag) try: client = create_docker_client() client.images.build(path=project, tag=tag, dockerfile=kwargs['dockerfile']) openshift_config.load_kube_config() token = openshift_client.Configuration().get_api_key_with_prefix( 'authorization').split(" ")[1] username = "******" if is_minishift() else "unused" client.login(username=username, password=token, registry=registry, reauth=True) print("Pushing the image, this could take a minute...") client.images.push(tag) print("Successfully pushed image: " + tag) bootstrap(broker, kwargs.get("basic_auth_username"), kwargs.get("basic_auth_password"), kwargs["verify"]) except docker.errors.DockerException: print("Error accessing the docker API. Is the daemon running?") raise except docker.errors.APIError: print("Failed to login to the docker API.") raise if not kwargs['no_relist']: relist_service_broker(kwargs)
data = load(stream) # Disable SSL warnings urllib3.disable_warnings() # FIXME Authentication should be via pyhton API if data['endpoint'].has_key('token'): print "Attempting to auth using token..." print "--token={}".format(data['endpoint']['token']) result = subprocess.check_output(['oc', 'login', data['endpoint']['options'] if data['endpoint'].has_key('options') else '', data['endpoint']['uri'], "--token={}".format(data['endpoint']['token'])]) else: print "Attempting to auth using username & password..." result = subprocess.check_output(['oc', 'login', '-u', data['endpoint']['username'], '-p', data['endpoint']['password'], data['endpoint']['options'] if data['endpoint'].has_key('options') else '', data['endpoint']['uri']]) print result config.load_kube_config() oapi = client.OapiApi() project_list = oapi.list_project() now = datetime.utcnow() default_max_age_in_hours = data['default_max_age_in_hours'] filtered_projects = [] for project in project_list.items: if project.metadata.name in data['projects']['preserve']: print "Project {} is whitelisted and will not be deleted".format(project.metadata.name) else: filtered_projects.append(project) print "\n"
def reset(self): try: config.load_kube_config(persist_config=True) except FileNotFoundError: pass