def check_status_waves(app_name): """Verify if all the WAVE home and workers have been deployed and UP in the system. """ jupiter_config.set_globals() """ This loads the node lists in use """ path1 = jupiter_config.HERE + 'nodes.txt' nodes, homes = utilities.k8s_get_nodes_worker(path1) pprint(nodes) """ This loads the kubernetes instance configuration. In our case this is stored in admin.conf. You should set the config file path in the jupiter_config.py file. """ config.load_kube_config(config_file=jupiter_config.KUBECONFIG_PATH) namespace = jupiter_config.MAPPER_NAMESPACE # We have defined the namespace for deployments in jupiter_config # Get proper handles or pointers to the k8-python tool to call different functions. extensions_v1_beta1_api = client.ExtensionsV1beta1Api() v1_delete_options = client.V1DeleteOptions() core_v1_api = client.CoreV1Api() result = True for key in nodes: label = "app=%s_wave_" % (app_name) label = label + key resp = None resp = core_v1_api.list_namespaced_pod(namespace, label_selector=label) # if a pod is running just delete it if resp.items: a = resp.items[0] if a.status.phase != "Running": print("Pod Not Running", key) result = False if result: print("All systems GOOOOO!!") else: print("Wait before trying again!!!!") return result
def __init__(self, args={}): super().__init__('kubernetes', args) try: # attempt to load incluster config if available config.load_incluster_config() except kubernetes.config.ConfigException: # load local config config.load_kube_config(config_file=self.args.get( 'kube_config_file', None), context=self.args.get('context', None)) self.core = client.CoreV1Api() self.ext = client.ExtensionsV1beta1Api() self.custom = client.CustomObjectsApi() self.router = create_router(self, self.args.get('router', 'none'))
def main(): # 1. 加载配置文件,找到endpoint和获取权限 # if no argument provided, the config will be loaded from default location. config.load_kube_config(config_file="kubeconfig.yaml") # 2. 创建python客户端api extensions_v1beta1 = client.ExtensionsV1beta1Api() # 3. 创建客户端对象 # Create a deployment object with client-python API. The deployment we # created is same as the `nginx-deployment.yaml` in the /examples folder. deployment = create_deployment_object() # 4. 调用客户端对象,完成创建具体请求 # kubectl get deployment # create_deployment(extensions_v1beta1, deployment) # update_deployment(extensions_v1beta1, deployment) # 调用客户端对象,完成删除具体请求 delete_deployment(extensions_v1beta1)
def main(): # Configs can be set in Configuration class directly or using helper # utility. If no argument provided, the config will be loaded from # default location. config.load_kube_config() extensions_v1beta1 = client.ExtensionsV1beta1Api() # Create a deployment object with client-python API. The deployment we # created is same as the 'nginx-deployment.yaml' in the /examples folder. deployment = create_deployment_object() # get_deployment() if get_deployment() == 0: delete_deployment(extensions_v1beta1) time.sleep(30) create_deployment(extensions_v1beta1, deployment) else: print("It is not exits!") create_deployment(extensions_v1beta1, deployment)
def create_deployment(file_name, name_space, deployment_id=None, replica_size=1, container_specs=None, time_out=None): with open(os.path.join(os.path.dirname(__file__), file_name)) as f: dep = yaml.safe_load(f) # Set unique deployment id if deployment_id: dep['metadata']['generateName'] += '{0}-'.format(deployment_id) # Set replica size dep['spec']['replicas'] = replica_size if container_specs: dep = container_specs.update_deployment(dep) k8s_beta = client.ExtensionsV1beta1Api() resp1 = k8s_beta.create_namespaced_deployment(body=dep, namespace=name_space) wait_to_deployment_to_be_ready(resp1.metadata._name, name_space, time_out=time_out) return resp1
def rollDeployment(): """ Patches the deployment to trigger a reboot """ extApi = client.ExtensionsV1beta1Api() body = """{"spec":{"template":{"spec":{"containers":[{"name":\ "","env":[{"name":"LAST_MANUAL_RESTART",\ "value":""}]}]}}}}""" patch = json.loads(body) patch['spec']['template']['spec']['containers'][0]['name'] = containerName patch['spec']['template']['spec']['containers'][0]['env'][0]['value'] \ = str(nowUnix) logger.debug("Patch created: %s", json.dumps(patch, indent=2)) logger.info("Applying patch to restart pods") ret = extApi.patch_namespaced_deployment(deploymentName, namespace, patch, pretty=True) logger.debug(ret)
def api_client_from_version(api_version): return { "v1": kube_client.CoreV1Api(), "apps/v1": kube_client.AppsV1Api(), "batch/v1": kube_client.BatchV1Api(), "batch/v1beta1": kube_client.BatchV1beta1Api(), "extensions/v1beta1": kube_client.ExtensionsV1beta1Api(), "rbac.authorization.k8s.io/v1beta1": kube_client.RbacAuthorizationV1beta1Api(), "rbac.authorization.k8s.io/v1": kube_client.RbacAuthorizationV1Api(), }[api_version]
def patch_namespaced_deployment(name, namespace="default", body={}, extv1Client=None): """ Patch the given deployment """ if extv1Client is None: # load the kubernetes config load_k8s_config() #create the api client extv1Client = client.ExtensionsV1beta1Api() log.debug("Patching deployment '%s' in namespace '%s'" % (name, namespace)) log.debug("Patch body: \n{0}\n".format(body)) deployment = extv1Client.patch_namespaced_deployment(name=name, namespace=namespace, body=body) log.debug("Patched deployment:\n{0}\n".format(deployment))
def get_hosts(): config.load_kube_config() k8s_client = client.ExtensionsV1beta1Api() result = k8s_client.list_ingress_for_all_namespaces() print("Listing result") hosts = [] if result.items: for item in result.items: for rules in item.spec.rules: hosts.append(rules.host) pprint(hosts) return hosts
def kill_microservice(name: str, ns: str = "default", label_selector: str = "name in ({name})", secrets: Secrets = None): """ Kill a microservice by `name` in the namespace `ns`. The microservice is killed by deleting the deployment for it without a graceful period to trigger an abrupt termination. The selected resources are matched by the given `label_selector`. """ label_selector = label_selector.format(name=name) api = create_k8s_api_client(secrets) v1 = client.AppsV1beta1Api(api) ret = v1.list_namespaced_deployment(ns, label_selector=label_selector) logger.debug("Found {d} deployments named '{n}'".format( d=len(ret.items), n=name)) body = client.V1DeleteOptions() for d in ret.items: res = v1.delete_namespaced_deployment( d.metadata.name, ns, body) v1 = client.ExtensionsV1beta1Api(api) ret = v1.list_namespaced_replica_set(ns, label_selector=label_selector) logger.debug("Found {d} replica sets named '{n}'".format( d=len(ret.items), n=name)) body = client.V1DeleteOptions() for r in ret.items: res = v1.delete_namespaced_replica_set( r.metadata.name, ns, body) v1 = client.CoreV1Api(api) ret = v1.list_namespaced_pod(ns, label_selector=label_selector) logger.debug("Found {d} pods named '{n}'".format( d=len(ret.items), n=name)) body = client.V1DeleteOptions() for p in ret.items: res = v1.delete_namespaced_pod( p.metadata.name, ns, body)
def __init__(self): """ loads authentication and cluster information and init API this is simple to load config file from minikube use configuration.api_key['authorization'] = $token for API key """ if 'KUBERNETES_PORT' in os.environ: config.load_incluster_config() else: config.load_kube_config() self.v1api = client.CoreV1Api() self.v1ext = client.ExtensionsV1beta1Api() self.conf = ConfigParser.ConfigParser() conf.read('config.ini') self.neoclient = Neo4jClient(conf.get('neo4j', 'connect_url'), conf.get('neo4j', 'user'), conf.get('neo4j', 'password'))
def watch_for_policies(): config.load_kube_config() v1 = client.ExtensionsV1beta1Api() network_policy = {} network_policy_update = {} w = watch.Watch() for event in w.stream(v1.list_network_policy_for_all_namespaces): print event print("Event: %s %s %s" % (event['type'], event['object'].kind, event['object'].metadata.name)) if event['type'] == 'ADDED': if event['object'].metadata.uid not in network_policy: network_policy[event['object'].metadata.uid] = event['raw_object'] result = yield gen.Task(create_new_policy_rules, network_policy, event['object'].metadata.uid) elif event['type'] == 'UPDATED': network_policy_updated[event['object'].metadata.uid] = event['raw_object'] result = yield gen.Task(create_updated_policy_rules, network_policy_updated) IOLoop.instance().stop()
def update(self): """ Update an ingress rule. """ #1. Retrieve the spec ingress = self.specifications() #2. Apply try: v1beta1 = client.ExtensionsV1beta1Api() v1beta1.patch_namespaced_ingress( name=self.definition['repo'], namespace=self.definition['namespace'], body=ingress ) except: LOGGER.exception('Cannot update the ingress rule') return False return True
def install_gpu_drivers(api_client): """Install GPU drivers on the cluster.""" logging.info("Install GPU Drivers.") # Fetch the daemonset to install the drivers. link = "https://raw.githubusercontent.com/NVIDIA/k8s-device-plugin/v1.11/nvidia-device-plugin.yml" # pylint: disable=line-too-long logging.info("Using daemonset file: %s", link) f = urllib.urlopen(link) daemonset_spec = yaml.load(f) ext_client = k8s_client.ExtensionsV1beta1Api(api_client) try: namespace = daemonset_spec["metadata"]["namespace"] ext_client.create_namespaced_daemon_set(namespace, daemonset_spec) except rest.ApiException as e: # Status appears to be a string. if e.status == 409: logging.info("GPU driver daemon set has already been installed") else: raise
def main(): # Load the client configurations config.load_kube_config( os.path.join(os.environ["HOME"], '/home/osboxes/.kube/config')) # Get network policys from all namespaces try: v1 = client.ExtensionsV1beta1Api() net_pol_list = v1.list_network_policy_for_all_namespaces() pprint(net_pol_list) for net_pol in net_pol_list.items: print("Name: %s" % (net_pol.metadata.name)) print("Ingress Rules: %s" % (net_pol.spec.ingress)) print("Egress Rules: %s" % (net_pol.spec.egress)) except ApiExceptio as e: print( "Exception Occured when calling list_network_policy_for_all_namespaces: %s\n" % e)