def __init__(self): logging.info('Getting Kubernetes HTTPClient') try: #if not os.path.exists('/var/run/secrets/kubernetes.io/serviceaccount/ca.crt'): # logging.debug('Stat Kube Config from /serviceaccount') # shutil.copytree('/var/run/secrets/kubernetes.io/serviceaccount', '/serviceaccount') # shutil.copyfile('/ca.crt', '/serviceaccount/ca.crt') # self.api = HTTPClient(KubeConfig.from_service_account('/serviceaccount')) #else: # logging.debug('Staty Kube Config from /var/run/secrets/kubernetes.io/serviceaccount') self.api = HTTPClient(KubeConfig.from_service_account()) except Exception as ex: logging.exception( 'Error Getting Kubernetes HTTPClient using ServiceAccount') logging.info('Trying with URL') kube_host = os.getenv('KUBERNETES_PROXY_API_HOST', '127.0.0.1') kube_port = os.getenv('KUBERNETES_PROXY_API_PORT', '8080') logging.info('Kubernetes Host: {}, Kubernetes Port: {}'.format( kube_host, kube_port)) try: self.api = HTTPClient( KubeConfig.from_url('http://{}:{}'.format( kube_host, kube_port))) except Exception as ex: logging.exception( 'Error Getting Kubernetes HTTPClient using URL') self.api = None self.kube_objects = {}
def __init__(self, api_server_urls: list): self._clusters = [] if not api_server_urls: try: config = KubeConfig.from_service_account() except FileNotFoundError: # we are not running inside a cluster # => assume default kubectl proxy URL config = KubeConfig.from_url(DEFAULT_CLUSTERS) client = HTTPClient(config) cluster = Cluster( generate_cluster_id(DEFAULT_CLUSTERS), DEFAULT_CLUSTER_NAME, DEFAULT_CLUSTERS, client, ) else: client = HTTPClient(config) cluster = Cluster( generate_cluster_id(config.cluster["server"]), DEFAULT_CLUSTER_NAME, config.cluster["server"], client, ) self._clusters.append(cluster) else: for api_server_url in api_server_urls: config = KubeConfig.from_url(api_server_url) client = HTTPClient(config) generated_id = generate_cluster_id(api_server_url) self._clusters.append( Cluster(generated_id, generated_id, api_server_url, client) )
def main(): args = parse_args() if args.debug: log_level = logging.DEBUG elif args.verbose: log_level = logging.INFO elif args.quiet: log_level = logging.CRITICAL else: log_level = logging.WARNING setup_logging(log_level) api = HTTPClient(KubeConfig.from_file(os.environ['KUBECONFIG'])) resource_map = ( (CassandraDaemonSet, 'cassandra/k8s/daemonset.yaml'), (CassandraService, 'cassandra/k8s/service.yaml'), (CassandraService, 'cassandra/k8s/peer-service.yaml') ) pool = CassandraResourceFactory(api, resource_map) ctl = CassandraResourceController(pool) if args.action == 'create': ctl.create() if args.action == 'update': ctl.update() elif args.action == 'delete': ctl.delete() elif args.action == 'validate': ctl.validate()
def refresh(self): try: response = self._session.get(urljoin(self._url, "/kubernetes-clusters"), timeout=10) response.raise_for_status() clusters = [] for row in response.json()["items"]: # only consider "ready" clusters if row.get("lifecycle_status", "ready") == "ready": config = KubeConfig.from_url(row["api_server_url"]) client = HTTPClient(config) client.session.auth = OAuth2BearerTokenAuth( self._oauth2_bearer_token_path) labels = {} for key in ( "id", "channel", "environment", "infrastructure_account", "region", ): if key in row: labels[key.replace("_", "-")] = row[key] clusters.append(Cluster(row["alias"], client, labels)) self._clusters = clusters self._last_cache_refresh = time.time() except: logger.exception( f"Failed to refresh from cluster registry {self._url}")
def uninstall(spec, logger, **kwargs): logger.info('uninstall') try: delete('tekton-pipelines', ["tekton-pipelines-controller", "tekton-pipelines-webhook", "tekton-triggers-controller", "tekton-triggers-webhook", "tekton-dashboard"], logger) try: subprocess.run(f"kubectl delete task.tekton.dev/kaniko -n {spec.get('namespace', 'default')}", shell=True, check=False, env=osenv) subprocess.run(f"kubectl delete task.tekton.dev/git-clone -n {spec.get('namespace', 'default')}", shell=True, check=False, env=osenv) except subprocess.CalledProcessError as e: logger.error(e.output) raise e api = HTTPClient(KubeConfig.from_file()) obj = { 'apiVersion': 'v1', 'kind': 'Namespace', 'metadata': { 'name': spec.get('namespace'), } } Namespace(api, obj).delete() except ObjectDoesNotExist: pass
def main(): logging.basicConfig(level=logging.DEBUG) api = HTTPClient(KubeConfig.from_service_account()) while True: games = get_games() maintain_games(api, games) time.sleep(5)
def __init__(self, clusters: dict): self._clusters = [] for cluster_name, url in clusters.items(): config = KubeConfig.from_url(url) client = HTTPClient(config) cluster = Cluster(cluster_name, client) self._clusters.append(cluster)
def create_from_secrets(cls): logging.info("Using service account from kubernetes secrets") kube_config = KubeConfig.from_service_account() api_client = HTTPClient(kube_config) namespace = _get_namespace_from_secrets() return Kubernetes(api_client=api_client, kube_config=kube_config, namespace=namespace)
def delete(namespace, names, logger): api = HTTPClient(KubeConfig.from_file()) for name in names: deploy = Deployment.objects(api, namespace=namespace).get(name=name) deploy.delete() logger.info(f'delete Deployment: {str(deploy)}') service = Service.objects(api, namespace=namespace).get(name=name) service.delete() logger.info(f'delete Service: {str(service)}')
def __init__(self): self._clusters = [] try: config = KubeConfig.from_service_account() except FileNotFoundError: # we are not running inside a cluster raise ServiceAccountNotFound() client = HTTPClient(config) cluster = Cluster("local", client) self._clusters.append(cluster)
def get_clusters(self): # Kubernetes Python client expects "vintage" string path config_file = str(self._path) if self._path else None config = KubeConfig.from_file(config_file) for context in config.contexts: if self._contexts and context not in self._contexts: # filter out continue # create a new KubeConfig with new "current context" context_config = KubeConfig(config.doc, context) client = HTTPClient(context_config) cluster = Cluster(context, client) yield cluster
def refresh(self): try: response = self._session.get( urljoin(self._url, "/kubernetes-clusters"), timeout=10 ) response.raise_for_status() clusters = [] for row in response.json()["items"]: # only consider "ready" clusters if row.get("lifecycle_status", "ready") == "ready": config = KubeConfig.from_url(row["api_server_url"]) client = HTTPClient(config) client.session.auth = OAuthTokenAuth("read-only") clusters.append( Cluster(row["id"], row["alias"], row["api_server_url"], client) ) self._clusters = clusters self._last_cache_refresh = time.time() except: logger.exception(f"Failed to refresh from cluster registry {self._url}")
def namespace(spec, old, new, logger, **kwargs): logger.info(f'namespace: {old=}, {new=}') api = HTTPClient(KubeConfig.from_file()) if new: obj = { 'apiVersion': 'v1', 'kind': 'Namespace', 'metadata': { 'name': new, } } Namespace(api, obj).create() elif old: obj = { 'apiVersion': 'v1', 'kind': 'Namespace', 'metadata': { 'name': old, } } Namespace(api, obj).delete()
def create(self) -> HTTPClient: kube_config = KubeConfig.from_file(self.kube_config_path) self._kube_client = HTTPClient(kube_config) return self._kube_client
file_conf = open('/etc/haproxy/haproxy.cfg', 'w') file_conf.write(template_render) file_conf.close() system('chown haproxy:haproxy /etc/haproxy/haproxy.cfg') system('service haproxy reload') system('echo [UPDATE] Changes in replicas of %s' % (name_set)) # RUN! system('echo "Start Slug Load Balancer v.0.0.1"') # python main.py namespace="default" url_heapster="http://heapster/api/v1/model" autoscaler_count="5" time_query="10" patch_exec = path.dirname(path.realpath(__file__)) + "/" #api = HTTPClient(KubeConfig.from_file(patch_exec + "credentials/config")) api = HTTPClient(KubeConfig.from_service_account()) # Arguments list_argv = [] argv.remove(argv[0]) for elements in argv: variable_entrada = elements.split("=") if len(variable_entrada) == 1 or variable_entrada[1] == '': raise NameError( '[ERROR] Invalid Arguments [python example.py var="text"]') list_argv.append(variable_entrada) dic_argv = argument_to_dic(list_argv) namespace = dic_argv["namespace"] # url_heapster = dic_argv["url_heapster"] time_query = int(dic_argv["time_query"])
def __init__(self, *args, **kwargs): self.api = HTTPClient(KubeConfig.from_service_account()) self.game_id = os.environ['GAME_ID'] self.game_url = os.environ['GAME_URL'] super(KubernetesWorkerManager, self).__init__(*args, **kwargs)
def from_env(): return HTTPClient(KubeConfig.from_env())
def api(kubeconfig): config = KubeConfig.from_file(str(kubeconfig)) return HTTPClient(config)
import yaml import json from datetime import datetime from time import sleep import kopf from pykube import KubeConfig, HTTPClient, Secret, Deployment, all from pykube.exceptions import PyKubeError, HTTPError, KubernetesError from pprint import pprint try: cfg = KubeConfig.from_service_account() except FileNotFoundError: cfg = KubeConfig.from_file() api = HTTPClient(cfg) @kopf.on.create('bazinga.io', 'v1', 'secretz') def create_secret(body, meta, spec, status, logger, **kwargs): secretName = spec.get('secretName') print(f"Create Secret.... {secretName}") data = _render_yaml(spec, meta) obj = Secret(api, data) try: obj.create() except HTTPError as e: obj.update() if e.code == 409: