def create_nlu_hpa(hpa, namespace): api_instance = client.AutoscalingV1Api() api_response = api_instance.create_namespaced_horizontal_pod_autoscaler( body=hpa, namespace=namespace) log.debug("Horizontal pod autoscaler created %s. status='%s'" % (hpa.metadata.name, str(api_response.status))) return api_response
def main(): parser = argparse.ArgumentParser(description="deployment scaling replicas") parser.add_argument( "--namespace", "-n", type=str, required=True, help="hpa namespace" ) parser.add_argument("--hpa", type=str, required=True, help="hpa name") parser.add_argument( "--min_replicas", type=int, help="minReplicas number", default=-1 ) parser.add_argument( "--max_replicas", type=int, help="maxReplicas number", default=-1 ) args = parser.parse_args() # jitter time.sleep(random.uniform(1, 10)) if os.getenv("KUBERNETES_SERVICE_HOST"): config.load_incluster_config() else: config.load_kube_config() v1 = client.AutoscalingV1Api() patch_hpa( client=v1, hpa=args.hpa, namespace=args.namespace, max_replicas=args.max_replicas, min_replicas=args.min_replicas, )
def handle_autoscaler(self, name: str): if self.resources and self.resources.get('auto_scale', False): if self.find_autoscaler(name): self.LOG.debug('Removing old autoscaler: {}'.format(name)) k8s_client.AutoscalingV1Api(self.api_client). \ delete_namespaced_horizontal_pod_autoscaler(name=name, namespace=self.namespace) self.LOG.info("Creating horizontal Pod autoscaler") template = dict( apiVersion='autoscaling/v1', kind='HorizontalPodAutoscaler', metadata=dict(name=name, namespace=self.namespace), spec=dict(minReplicas=self.resources.get('minReplicas', 1), maxReplicas=self.resources.get('maxReplicas', 10), targetCPUUtilizationPercentage=self.resources.get( 'targetCPUUtilizationPercentage', 50), scaleTargetRef=dict(apiVersion='apps/v1', name=name, kind='Deployment'))) try: k8s_utils.create_from_dict(self.api_client, template) self.LOG.debug(template) except Exception as e: self.LOG.debug("Failed to create autoscaler: {}".format(name)) self.LOG.debug(repr(e))
def _detect_api_object(self, api_version): # Due to https://github.com/kubernetes-client/python/issues/387 if api_version == 'apps/v1beta1': return client.AppsV1beta1Api() if api_version == 'v1': return client.CoreV1Api() if api_version == 'extensions/v1beta1': return client.ExtensionsV1beta1Api() if api_version == 'batch/v1': return client.BatchV1Api() if api_version == 'batch/v2alpha1': return client.BatchV2alpha1Api() if api_version == 'batch/v1beta1': return client.BatchV1beta1Api() if api_version == 'policy/v1beta1': return client.PolicyV1beta1Api() if api_version == 'storage.k8s.io/v1': return client.StorageV1Api() if api_version == 'apps/v1': return client.AppsV1Api() if api_version == 'autoscaling/v1': return client.AutoscalingV1Api() if api_version == 'rbac.authorization.k8s.io/v1': return client.RbacAuthorizationV1Api() if api_version == 'scheduling.k8s.io/v1alpha1': return client.SchedulingV1alpha1Api() if api_version == 'scheduling.k8s.io/v1beta1': return client.SchedulingV1beta1Api() if api_version == 'test/test': return K8sClientMock(self.name)
def discover_hpa(): data = {"data": []} new_client = client.ApiClient(conf) api_instance = client.AutoscalingV1Api(new_client) try: api_response = api_instance.list_horizontal_pod_autoscaler_for_all_namespaces( watch=False) # pprint(api_response) for i in api_response.items: # print("%s\t%s\t%s" % (i.metadata.name, i.spec.scale_target_ref.name, i.status.current_replicas)) tmp = { "{#HPA_NAME}": i.metadata.name, "{#HPA_NAMESPACE}": i.metadata.namespace, "{#HPA_TARGET_NAME}": i.spec.scale_target_ref.name, "{#HPA_MIN_POD}": i.spec.min_replicas, "{#HPA_MAX_POD}": i.spec.max_replicas, "{#HPA_TARGET_CPU_PERCENTAGE}": i.spec.target_cpu_utilization_percentage } data['data'].append(tmp) print(json.dumps(data, sort_keys=True, indent=4)) except ApiException as e: print( "Exception when calling AutoscalingV1Api->list_horizontal_pod_autoscaler_for_all_namespaces: %s\n" % e)
def modify_k8s_hpa(): infos = None status = None try: Infos = request.get_json() api_instance = client.AutoscalingV1Api() if request.method == 'POST': try: api_instance.patch_namespaced_horizontal_pod_autoscaler(name=Infos['name'], namespace=namespace, body=client.V1HorizontalPodAutoscaler( spec=client.V1HorizontalPodAutoscalerSpec( max_replicas=int(Infos['max_update']), target_cpu_utilization_percentage=int(Infos['cpu_update'].replace('%','')), scale_target_ref=client.V1CrossVersionObjectReference( kind='Deployment', name=Infos['target_ref'])))) except Exception as e: logging.error(e) infos = '修改参数失败!' else: status = 'ok' infos = '修改参数成功!' if request.method == 'DELETE': try: api_instance.delete_namespaced_horizontal_pod_autoscaler(name=Infos['name'], namespace=namespace,body=client.V1DeleteOptions()) except Exception as e: logging.error(e) infos = '删除%s失败!' %Infos['name'] else: status = 'ok' infos = '删除%s成功!' %Infos['name'] except Exception as e: logging.error(e) finally: return jsonify({'status':status,'infos':infos})
def delete_nlu_hpa(hpa_name, namespace): api_instance = client.AutoscalingV1Api() api_response = api_instance.delete_namespaced_horizontal_pod_autoscaler( name=hpa_name, namespace=namespace) log.debug("Horizontal pod autoscaler deleted %s. status='%s'" % (hpa_name, str(api_response.status))) return api_response
def addHPA(rev_data): optDict = {} for i in rev_data.split('&'): temp = i.split('=') opt = {temp[0]: temp[1].strip()} optDict.update(opt) # print(optDict) api_instance = client.AutoscalingV1Api() hpa_target_ref = client.V1CrossVersionObjectReference(name=optDict['current_hpa_deployment'], kind=kind) hpa_spec = client.V1HorizontalPodAutoscalerSpec(max_replicas=optDict['hpa_max_qant'], scale_target_ref=hpa_target_ref, min_replicas=optDict['hpa_min_qant'], target_cpu_utilization_percentage=optDict['cpu_threshold']) hpa_metadata = client.V1ObjectMeta(name=optDict['current_hpa_deployment'], namespace=optDict['current_ns']) hpa_status = client.V1HorizontalPodAutoscalerStatus() # hpa_metadata = client.V1ObjectMeta(name=optDict['current_hpa_deployment'], namespace=optDict['current_ns']) body = client.V1HorizontalPodAutoscaler(metadata=hpa_metadata, spec=hpa_spec) namespace = optDict['current_ns'] # str | object name and auth scope, such as for teams and projects pretty = 'true' # str | If 'true', then the output is pretty printed. (optional) api_status = {} try: api_response = api_instance.create_namespaced_horizontal_pod_autoscaler(namespace=namespace, body=body, pretty=pretty) pprint(api_response) api_status['current_replicas'] = hpa_status.current_replicas api_status['cpu_util'] = hpa_status.current_cpu_utilization_percentage api_status['last_scale_time'] = hpa_status.last_scale_time api_status['status_code'] = 200 return api_status except ApiException as e: print("Exception when calling AutoscalingV1Api->create_namespaced_horizontal_pod_autoscaler: %s\n" % e)
def __init__(self, auth_dict): ''' set kubernetes configuration strings :param auth_dict: ''' try: self.configuration = client.Configuration() # kube cluster host self.configuration.host = auth_dict['host'] self.configuration.verify_ssl = True # certificate-authority-data self.configuration.ssl_ca_cert = auth_dict['ssl_ca_cert_file'] # client-certificate-data self.configuration.cert_file = auth_dict['ssl_cert_file'] # client-key-data self.configuration.key_file = auth_dict['ssl_key_file'] # http proxy self.configuration.proxy = auth_dict['proxy'] # check if debug env value is True/False to enable debugging self.configuration.debug = os.environ.get('KUBE_DEBUG', False) # kube cluster context self.configuration.api_key['context'] = auth_dict['context'] self.v1 = client.CoreV1Api(client.ApiClient(self.configuration)) self.v1_extension = client.ExtensionsV1beta1Api( client.ApiClient(self.configuration)) self.storage = client.StorageV1Api( client.ApiClient(self.configuration)) self.scale = client.AutoscalingV1Api( client.ApiClient(self.configuration)) except BaseException: logger_settings.logger.info( 'There is some generic problem parsing the config file')
def hpa_apply(): try: reload(MyForm) form = MyForm.FormK8sHpa() if form.submit.data: context = form.contexts.data deployment = form.deployment.data max_replica = form.max_replica.data min_replica = form.min_replica.data cpu_value = form.cpu_value.data if max_replica and min_replica and cpu_value: exist_hpa = [] config.load_kube_config(config_file, context) api_instance = client.AutoscalingV1Api() try: ret = api_instance.list_horizontal_pod_autoscaler_for_all_namespaces( ) for i in ret.items: exist_hpa.append(i.spec.scale_target_ref.name) if deployment in exist_hpa: #配置已存在进行更新 api_instance.patch_namespaced_horizontal_pod_autoscaler( name='%s-hpa' % deployment, namespace=namespace, body=client.V1HorizontalPodAutoscaler( spec=client.V1HorizontalPodAutoscalerSpec( max_replicas=int(max_replica), min_replicas=int(min_replica), target_cpu_utilization_percentage=int( cpu_value), scale_target_ref=client. V1CrossVersionObjectReference( api_version='extensions/v1beta1', kind='Deployment', name=deployment)))) else: # 配置不存在进行创建 api_instance.create_namespaced_horizontal_pod_autoscaler( namespace=namespace, body=client.V1HorizontalPodAutoscaler( metadata=client.V1ObjectMeta( name='%s-hpa' % deployment, namespace=namespace), spec=client.V1HorizontalPodAutoscalerSpec( max_replicas=int(max_replica), min_replicas=int(min_replica), target_cpu_utilization_percentage=int( cpu_value), scale_target_ref=client. V1CrossVersionObjectReference( api_version='extensions/v1beta1', kind='Deployment', name=deployment)))) return redirect(url_for('k8s.hpa')) except Exception as e: logging.error(e) except Exception as e: logging.error(e) return redirect(url_for('error')) return render_template('k8s_hpa.html', form=form)
def dump_horizontalpodautoscaler(self, name, namespace): api_instance = client.AutoscalingV1Api(self.new_client) try: # print("dump secret:" + name) api_response = api_instance.read_namespaced_horizontal_pod_autoscaler(name, namespace, async_req=False) data = filter_dict(api_response.to_dict()) self.write_yaml_file(data, "horizontal_pod_autoscaler", name, namespace) except ApiException as e: print("Exception when calling AutoscalingV1Api->read_namespaced_horizontal_pod_autoscaler: %s\n" % e)
def get_resource_api(api_client: client.ApiClient = None, **kwargs) -> "client.AutoscalingV1Api": """ Returns an instance of the kubernetes API client associated with this object. """ if api_client: kwargs["apl_client"] = api_client return client.AutoscalingV1Api(**kwargs)
def hpa(): try: td = time.strftime("%Y-%m-%d", time.localtime()) valus = [] db_k8s_deploy = db_op.k8s_deploy db_project = db_op.project_list Key = 'op_k8s_ingress_log' keys = tables = ('name','deployment','最大副本','最小副本', '当前副本', 'CPU阀值','CPU当前值','QPS当前值','管理') v1 = client.AutoscalingV1Api() ret = v1.list_horizontal_pod_autoscaler_for_all_namespaces() for i in ret.items: try: rps = 0 RPS = [] project = db_k8s_deploy.query.with_entities(db_k8s_deploy.project).filter(db_k8s_deploy.deployment==i.spec.scale_target_ref.name).limit(1).all() if project: domains = db_project.query.with_entities(db_project.domain).filter(db_project.project==project[0][0]).limit(1).all() if domains[0][0]: for domain in domains[0][0].split(','): vals = RC.hgetall('%s_%s_%s' % (Key, domain, td)) vals = sorted(vals.items(), key=lambda item: item[0]) if vals: RPS.append(int(int(vals[-1][-1])/60)) if RPS: rps = RPS[0] if len(RPS) >1: rps = reduce(lambda x,y:x+y,RPS) valus.append([i.metadata.name, i.spec.scale_target_ref.name, i.spec.max_replicas, i.spec.min_replicas, i.status.current_replicas, '{0}%'.format(i.spec.target_cpu_utilization_percentage), '{0}%'.format(i.status.current_cpu_utilization_percentage), rps] ) except Exception as e: logging.error(e) td = time.strftime('%Y-%m-%d', time.localtime()) Key = 'op_hpa_chart_%s_%s' % (g.context,td) infos = RC.hgetall(Key) infos = sorted(infos.items(), key=lambda item: item[0].split('_')[-1]) line = Line('HPA动态伸缩实时状态', width='110%', height='250px', title_pos='8%', title_text_size=14) for project in valus: attr = [] vals = [] for info in infos: if project[0] in info[0]: attr.append(str(info[0].split('_')[-1])) vals.append(int(info[1])) line.add(project[0], attr, vals, is_toolbox_show=False, is_smooth=True, mark_point=["max", "min"], mark_point_symbolsize=60,legend_pos='40%',is_datazoom_show=True, datazoom_range=[v for v in range(100, 10)], datazoom_type='both',) return render_template('k8s-resource.html', valus=valus, tables=tables, keys=keys, line=line, resource='HPA') except Exception as e: logging.error(e)
def find_autoscaler(self, name: str): api_response = k8s_client.AutoscalingV1Api(self.api_client). \ list_namespaced_horizontal_pod_autoscaler(namespace=self.namespace).to_dict() for i in api_response.get('items', []): if i.get('metadata', {}).get('name', '') == name: return i return None
def backup_kubernetes_deployment(kubernetes_model: KubernetesModel, application_model: ApplicationModel, service_model: ServiceModel): """ Backup Kubernetes deployment. :param kubernetes_model: :param application_model: :param service_model: :return: """ full_config_path = get_full_config_path(kubernetes_model.config_path) from kubernetes import client, config config.load_kube_config(full_config_path) save_dir = Path(api.dashboard_config.DIR_KUBE_CONFIG, application_model.application_name) save_dir.mkdir(parents=True, exist_ok=True) api_client = client.ApiClient() """Deployment""" apps_v1_api = client.AppsV1Api() v1_deployment = apps_v1_api.read_namespaced_deployment( name="deploy-{0}".format(service_model.service_id), namespace=service_model.service_level, exact=True, export=True) json.dump(api_client.sanitize_for_serialization(v1_deployment), Path(save_dir, "deploy-{0}.json".format( service_model.service_id)).open("w", encoding='utf-8'), ensure_ascii=False, indent=2) """Service""" core_vi_api = client.CoreV1Api() v1_service = core_vi_api.read_namespaced_service( name="svc-{0}".format(service_model.service_id), namespace=service_model.service_level, exact=True, export=True) json.dump(api_client.sanitize_for_serialization(v1_service), Path(save_dir, "svc-{0}.json".format( service_model.service_id)).open("w", encoding='utf-8'), ensure_ascii=False, indent=2) """Autoscaler""" autoscaling_v1_api = client.AutoscalingV1Api() v1_horizontal_pod_autoscaler = autoscaling_v1_api.read_namespaced_horizontal_pod_autoscaler( name="hpa-{0}".format(service_model.service_id), namespace=service_model.service_level, exact=True, export=True) json.dump( api_client.sanitize_for_serialization(v1_horizontal_pod_autoscaler), Path(save_dir, "hpa-{0}.json".format(service_model.service_id)).open( "w", encoding='utf-8'), ensure_ascii=False, indent=2) return
def delete(self, application_id: int, service_id: int): """delete_service""" aobj = db.session.query(Application).filter( Application.application_id == application_id).one_or_none() if aobj is None: raise Exception("No such application_id.") sobj = db.session.query(Service).filter( Service.application_id == application_id, Service.service_id == service_id).one_or_none() if sobj is None: raise Exception("No such service_id.") if aobj.kubernetes_id is None: db.session.query(Service).filter( Service.application_id == application_id, Service.service_id == service_id).delete() db.session.flush() else: kubernetes_id = aobj.kubernetes_id kobj = db.session.query(Kubernetes).filter( Kubernetes.kubernetes_id == kubernetes_id).one_or_none() if kobj is None: raise Exception("No such kubernetes_id.") config_path = kobj.config_path from kubernetes import client, config config.load_kube_config(config_path) apps_v1 = client.AppsV1Api() v1_deployment = apps_v1.delete_namespaced_deployment( name="{0}-deployment".format(sobj.service_name), namespace=sobj.service_level, body=client.V1DeleteOptions()) core_vi = client.CoreV1Api() v1_service = core_vi.delete_namespaced_service( name="{0}-service".format(sobj.service_name), namespace=sobj.service_level, #body=client.V1DeleteOptions() #FIXME add this after v6.0.0 ) extensions_v1_beta = client.ExtensionsV1beta1Api() v1_beta1_ingress = extensions_v1_beta.delete_namespaced_ingress( name="{0}-ingress".format(sobj.service_name), namespace=sobj.service_level, body=client.V1DeleteOptions()) autoscaling_v1 = client.AutoscalingV1Api() v1_horizontal_pod_autoscaler = autoscaling_v1.delete_namespaced_horizontal_pod_autoscaler( name="{0}-autoscaling".format(sobj.service_name), namespace=sobj.service_level, body=client.V1DeleteOptions()) applist = set((aobj.application_name, )) update_dbs_kubernetes(kubernetes_id, applist) response_body = {"status": True, "message": "Success."} db.session.commit() db.session.close() return response_body
def task_run(): try: # 获取业务访问数据 db_business = db_op.business db_project = db_op.project_list business = db_business.query.with_entities(db_business.id, db_business.business).all() year = time.strftime('%Y', time.localtime()) ot = datetime.datetime.now() - datetime.timedelta(days=0) ot = ot.strftime('%Y-%m-%dT00:00:00Z') Key = 'op_business_pv_%s' % ot.split('T')[0] Influx_cli = InfluxDBClient(influxdb_host, influxdb_port, influxdb_user, influxdb_pw, 'analysis_logs') business = {busi[0]: busi[1] for busi in business if busi} for id in business: business_domain = db_project.query.with_entities(distinct(db_project.domain)).filter(and_( db_project.business_id == int(id),db_project.domain != '')).all() if business_domain: pv_sum = [] try: for hosts in [domain[0] for domain in business_domain if domain]: if ',' in hosts: hosts = hosts.split(',') for host in hosts: try: cmd = 'select sum(mean_pv) from ' + 'analysis%s' % year + " where time >='%s' and host = '%s';" % ( ot, host) result = Influx_cli.query(cmd) if result: for infos in result.get_points(): if infos: pv_sum.append(infos['sum'] * 60) except Exception as e: logging.error(e) except Exception as e: logging.error(e) if pv_sum: pv_sum = reduce(lambda x, y: x + y, pv_sum) RC_CLUSTER.hset(Key, business[id], pv_sum) except Exception as e: logging.error(e) #获取k8s的hpa副本数量 try: td = time.strftime('%Y-%m-%d',time.localtime()) th = time.strftime('%H:%M',time.localtime()) for context in contexts: config.load_kube_config(config_file, context=context) v1 = client.AutoscalingV1Api() ret = v1.list_horizontal_pod_autoscaler_for_all_namespaces() Key = 'op_hpa_chart_%s_%s' %(context,td) for i in ret.items: RC.hset(Key,'%s_%s'%(i.metadata.name,th),i.status.current_replicas) except Exception as e: logging.error(e)
def dump_horizontalpodautoscalers(self): print("start dump horizontal pod autoscalers...") api_instance = client.AutoscalingV1Api(self.new_client) try: for ns in self.namespaces: api_response = api_instance.list_namespaced_horizontal_pod_autoscaler(ns, watch=False) for i in api_response.items: self.dump_horizontalpodautoscaler(i.metadata.name, ns) except ApiException as e: print("Exception when calling AutoscalingV1Api->list_namespaced_ingress: %s\n" % e) print("dump horizontal pod autoscaler done.\n")
def confirm_horizontal_pod_autoscaler(context, namespace): if context is None: raise SystemExit( "invalid empty context for HorizontalPodAutoscaler given") if namespace is None: raise SystemExit( "invalid empty namespace for HorizontalPodAutoscaler given") load_kube(context) api = client.AutoscalingV1Api() return general_confirm( "HorizontalPodAutoscaler", lambda: api.list_namespaced_horizontal_pod_autoscaler(namespace), lambda i: i.metadata.name)
def scale_down(): # Called at night # 1. Check whether we should scale down. core_client = client.CoreV1Api() pods: client.V1PodList = core_client.list_namespaced_pod('default') pod: client.V1Pod for pod in pods.items: pod_name: str = pod.metadata.name if pod.status.phase != 'Running': logging.info(f'Ignoring {pod_name} with phase {pod.status.phase}') continue if not any( [pod_name.startswith(app_name) for app_name in PLATFORM_APPS]): logging.info(f'Non-platform {pod_name} is still running,' f' not scaling down the cluster') return # 2. Scale down apps logging.info('Deleting all autoscalers') auto_client = client.AutoscalingV1Api() autoscalers: client.V1HorizontalPodAutoscalerList = \ auto_client.list_namespaced_horizontal_pod_autoscaler('default') a: client.V1HorizontalPodAutoscaler for a in autoscalers.items: name = a.metadata.name logging.info(f'Deleting autoscaler {name}') auto_client.delete_namespaced_horizontal_pod_autoscaler( name, 'default') logging.info('Scaling down platform apps') apps_client = client.AppsV1Api() zero_spec = { 'spec': { 'replicas': 0, } } for app in PLATFORM_APPS: if app in NONSCALABLE_APPS: continue try: logging.info(f'Scaling down {app}') apps_client.patch_namespaced_deployment_scale( app, 'default', zero_spec) except ApiException as e: logging.info(e) # Likely app doesn't exist logging.info('Sleeping for 10s to allow nodes to terminate first') time.sleep(10)
def __init__(self): self.logger = logging.getLogger(__name__) try: config.load_kube_config() except: self.logger.warning("unable to load kube-config") self.v1 = client.CoreV1Api() self.v1Beta1 = client.AppsV1beta1Api() self.extensionsV1Beta1 = client.ExtensionsV1beta1Api() self.autoscalingV1Api = client.AutoscalingV1Api() self.rbacApi = client.RbacAuthorizationV1beta1Api() self.batchV1Api = client.BatchV1Api() self.batchV2Api = client.BatchV2alpha1Api()
def __init__(self, kubeconfig, namespace, hpa_name, **kwargs): config.load_kube_config(kubeconfig) self.namespace = namespace self.hpa_name = hpa_name self.target_api_version = kwargs.get('target_api_version') self.target_kind = kwargs.get('target_kind') self.target_name = kwargs.get('target_name') self.min_replicas = kwargs.get('min_replicas') self.max_replicas = kwargs.get('max_replicas') self.target_cpu_utilization_percentage = kwargs.get( 'target_cpu_utilization_percentage') self.autoscaling_v1 = client.AutoscalingV1Api()
def deploy(self): load_kube_config() self.api = client.CoreV1Api(client.ApiClient(config=kube_api_configuration)) self.api_beta1 = client.AppsV1beta1Api(client.ApiClient(config=kube_api_configuration)) self.api_extensions_beta1 = client.ExtensionsV1beta1Api(client.ApiClient(config=kube_api_configuration)) self.api_autoscaling = client.AutoscalingV1Api(client.ApiClient(config=kube_api_configuration)) self.create_service() self.create_deployment() self.create_hpa() if self.exposed: self.create_ingress()
def horizontal_scale(self, name, pod_name, pod_kind, max, metric): """Create horizontal pod scaller, based on metric.""" api = client.AutoscalingV1Api() pd_scale = client.V1HorizontalPodAutoscaler() pd_scale.metadata = client.V1ObjectMeta(name=name) target = client.V1CrossVersionObjectReference(name=pod_name, kind=pod_kind) spec = client.V1HorizontalPodAutoscalerSpec(min_replicas=1, max_replicas=max, scale_target_ref=target) status = client.V1HorizontalPodAutoscalerStatus(current_replicas=1, desired_replicas=2) pd_scale.spec = spec pd_scale.status = status try: api.create_namespaced_horizontal_pod_autoscaler(namespace=self._namespace, body=pd_scale) LOG.info(f'Persistent Volume: {name} created.') except ApiException as e: LOG.error(f'Exception message: {e}')
def fake_k8s_client_dict(): k8s_client_dict = { 'v1': client.CoreV1Api(), 'apiregistration.k8s.io/v1': client.ApiregistrationV1Api(), 'apps/v1': client.AppsV1Api(), 'authentication.k8s.io/v1': client.AuthenticationV1Api(), 'authorization.k8s.io/v1': client.AuthorizationV1Api(), 'autoscaling/v1': client.AutoscalingV1Api(), 'batch/v1': client.BatchV1Api(), 'coordination.k8s.io/v1': client.CoordinationV1Api(), 'networking.k8s.io/v1': client.NetworkingV1Api(), 'rbac.authorization.k8s.io/v1': client.RbacAuthorizationV1Api(), 'scheduling.k8s.io/v1': client.SchedulingV1Api(), 'storage.k8s.io/v1': client.StorageV1Api() } return k8s_client_dict
def remove_horizontal_pod_autoscaler(context, namespace, name): if context is None: raise SystemExit( "invalid empty context for HorizontalPodAutoscaler given") if namespace is None: raise SystemExit( "invalid empty namespace for HorizontalPodAutoscaler given") if name is None: raise SystemExit( "invalid empty name for HorizontalPodAutoscaler given") load_kube(context) api = client.AutoscalingV1Api() ret, status, _ = api.delete_namespaced_horizontal_pod_autoscaler_with_http_info( name, namespace=namespace) handle_status(ret, status, "HorizontalPodAutoscaler", namespace, name)
def wait_for_horizontal_pod_autoscaler_is_up(context, namespace, name): if name is None: raise SystemExit( "invalid empty name for HorizontalPodAutoscaler given") if context is None: raise SystemExit("invalid empty name context given") if namespace is None: raise SystemExit("invalid empty name namespace given") load_kube(context) print("check availability of", "HorizontalPodAutoscaler", name, "in namespace", namespace) api = client.AutoscalingV1Api() return general_up_check( namespace, "HorizontalPodAutoscaler", name, lambda: api.read_namespaced_horizontal_pod_autoscaler_with_http_info( name, namespace=namespace))
def get_hpa_by_deployment_name(namespace, deploy_name): # data = json.loads(request.get_data().decode("utf-8")) # current_app.logger.debug("接收的数据:{}".format(data)) # namespace = handle_input(data.get("namespace")) # deploy_name = handle_input(data.get("name")) myclient = client.AutoscalingV1Api() hpas = myclient.list_namespaced_horizontal_pod_autoscaler( namespace=namespace) hpa = None for item in hpas.items: scaleTargetRef = item.spec.scale_target_ref kind = scaleTargetRef.kind name = scaleTargetRef.name if kind == 'Deployment' and name == deploy_name: hpa = item break return hpa
def delete_hpa(): data = json.loads(request.get_data().decode('utf-8')) name = handle_input(data.get('name')) namespace = handle_input(data.get('namespace')) myclient = client.AutoscalingV1Api() try: # body=client.V1DeleteOptions(propagation_policy='Foreground',grace_period_seconds=5) result = myclient.delete_namespaced_horizontal_pod_autoscaler( namespace=namespace, name=name) except Exception as e: body = json.loads(e.body) msg = { "status": e.status, "reason": e.reason, "message": body['message'] } # return simple_error_handle(msg) return jsonify({'error': '删除hpa异常', "msg": msg}) return jsonify({"ok": "删除成功"})
def __init__(self): self.logger = logging.getLogger(__name__) try: config_file = os.path.expanduser(kubeconfig_filepath) config.load_kube_config(config_file=config_file) except: self.logger.warning("unable to load kube-config") self.v1 = client.CoreV1Api() self.v1Beta1 = client.AppsV1beta1Api() self.extensionsV1Beta1 = client.ExtensionsV1beta1Api() self.autoscalingV1Api = client.AutoscalingV1Api() self.rbacApi = client.RbacAuthorizationV1beta1Api() self.batchV1Api = client.BatchV1Api() self.batchV2Api = client.BatchV2alpha1Api() self._namespace = self._parse_namespace() self._cached_resources: Dict[str, List[str]] = {} self._init_cached_resources(self._namespace)