def main(): if NODENAME is None: raise Exception("NODENAME cannot be None") api = pykube.HTTPClient(pykube.KubeConfig.from_service_account()) log("Der node doktor ist in für %s, checkup every %s seconds..." % (NODENAME, POLL_INTERVAL)) while True: try: bad_pods = [] for pod in pykube.Pod.objects(api).filter(field_selector={"spec.nodeName": NODENAME, "status.phase": "ContainerCreating"}): matched_event = False for pod_event in pykube.Event.objects(api).filter(field_selector={"involvedObject.name": pod.obj['metadata']['name']}): for event in MATCH_EVENTS: if re.search(event, pod_event.obj['message'], re.IGNORECASE): log("Matched event '%s' for pod %s" % (pod_event.obj['message'], pod.obj['metadata']['name'])) matched_event = True break if matched_event: bad_pods.append(pod) break if bad_pods: log("Cordoning node and deleting pods...") pykube.Node.objects(api).get(name=NODENAME).cordon() for pod in bad_pods: pod.delete() except (ProtocolError, ConnectionResetError, ConnectionError, ReqConnectionError, ReqReadTimeout): log("Connection error. Resetting API connection...") api = pykube.HTTPClient(pykube.KubeConfig.from_service_account()) time.sleep(POLL_INTERVAL)
def __init__(self): if args.kube_conf: api = pykube.HTTPClient(pykube.KubeConfig.from_file(args.kube_conf)) else: api = pykube.HTTPClient(pykube.KubeConfig.from_service_account()) self._nodes_query = pykube.Node.objects(api)
def login(self): subscriptions = login(self.service_principal_app_id, self.service_principal_secret, self.service_principal_tenant_id, self.subscription_id) self.arm_template = download_template(self.resource_group, self.acs_deployment) self.arm_parameters = download_parameters(self.resource_group, self.acs_deployment) #downloaded parameters do not include SecureStrings parameters, so we need to fill them manually self.fill_parameters_secure_strings() #firstConsecutiveStaticIP parameter is used as the private IP for the master os.environ["PYKUBE_KUBERNETES_SERVICE_HOST"] = self.arm_parameters[ 'firstConsecutiveStaticIP']['value'] if self.kubeconfig: # for using locally logger.debug('Using kubeconfig %s', self.kubeconfig) self.api = pykube.HTTPClient( pykube.KubeConfig.from_file(self.kubeconfig)) else: # for using on kube logger.debug('Using kube service account') self.api = pykube.HTTPClient( pykube.KubeConfig.from_service_account())
def __init__(self, regions, aws_access_key, aws_secret_key, kubeconfig, pod_namespace, idle_threshold, type_idle_threshold, instance_init_time, cluster_name, notifier, scale_up=True, maintainance=True, datadog_api_key=None, over_provision=5, dry_run=False): if kubeconfig: # for using locally logger.debug('Using kubeconfig %s', kubeconfig) self.api = pykube.HTTPClient( pykube.KubeConfig.from_file(kubeconfig)) else: # for using on kube logger.debug('Using kube service account') self.api = pykube.HTTPClient( pykube.KubeConfig.from_service_account()) if pod_namespace is None: self.pod_namespace = pykube.all else: self.pod_namespace = pod_namespace self._drained = {} self.session = boto3.session.Session( aws_access_key_id=aws_access_key, aws_secret_access_key=aws_secret_key, region_name=regions[0]) # provide a default region self.autoscaling_groups = autoscaling_groups.AutoScalingGroups( session=self.session, regions=regions, cluster_name=cluster_name) self.autoscaling_timeouts = autoscaling_groups.AutoScalingTimeouts( self.session) # config self.regions = regions self.idle_threshold = idle_threshold self.instance_init_time = instance_init_time self.type_idle_threshold = type_idle_threshold self.over_provision = over_provision self.scale_up = scale_up self.maintainance = maintainance self.notifier = notifier if datadog_api_key: datadog.initialize(api_key=datadog_api_key) logger.info('Datadog initialized') self.stats = datadog.ThreadStats() self.stats.start() self.dry_run = dry_run
def __init__(self, regions, aws_access_key, aws_secret_key, kubeconfig, idle_threshold, type_idle_threshold, instance_init_time, cluster_name, over_provision=5, slack_hook=None, dry_run=False): if kubeconfig: # for using locally logger.debug('Using kubeconfig %s', kubeconfig) self.api = pykube.HTTPClient(pykube.KubeConfig.from_file(kubeconfig)) else: # for using on kube logger.debug('Using kube service account') self.api = pykube.HTTPClient(pykube.KubeConfig.from_service_account()) self._drained = {} self.session = boto3.session.Session( aws_access_key_id=aws_access_key, aws_secret_access_key=aws_secret_key, region_name=regions[0]) # provide a default region self.autoscaling_groups = autoscaling_groups.AutoScalingGroups( session=self.session, regions=regions, cluster_name=cluster_name) # config self.regions = regions self.idle_threshold = idle_threshold self.instance_init_time = instance_init_time self.type_idle_threshold = type_idle_threshold self.over_provision = over_provision self.slack_hook = slack_hook self.dry_run = dry_run
def __init__(self, service_principal_app_id, service_principal_secret, service_principal_tenant_id, kubeconfig, template_file, parameters_file, idle_threshold, spare_agents, instance_init_time, container_service_name, resource_group, notifier, scale_up=True, maintainance=True, datadog_api_key=None, over_provision=5, dry_run=False): if kubeconfig: # for using locally logger.debug('Using kubeconfig %s', kubeconfig) self.api = pykube.HTTPClient( pykube.KubeConfig.from_file(kubeconfig)) else: # for using on kube logger.debug('Using kube service account') self.api = pykube.HTTPClient( pykube.KubeConfig.from_service_account()) self._drained = {} self.container_service_name = container_service_name self.template_file = template_file self.parameters_file = parameters_file self.resource_group = resource_group self.agent_pools = {} self.pools_instance_type = {} azure_login.login(service_principal_app_id, service_principal_secret, service_principal_tenant_id) # config self.idle_threshold = idle_threshold self.instance_init_time = instance_init_time self.spare_agents = spare_agents self.over_provision = over_provision self.scale_up = scale_up self.maintainance = maintainance self.notifier = notifier if datadog_api_key: datadog.initialize(api_key=datadog_api_key) logger.info('Datadog initialized') self.stats = datadog.ThreadStats() self.stats.start() self.dry_run = dry_run
def get_api(self): kubeconfig = '/root/.kube/config' if os.path.isfile(kubeconfig): # locally return pykube.HTTPClient( pykube.KubeConfig.from_file(kubeconfig)) else: # from a pod return pykube.HTTPClient( pykube.KubeConfig.from_service_account())
def __init__(self, service_principal_app_id, service_principal_secret, service_principal_tenant_id, kubeconfig, idle_threshold, reserve_idle_threshold, instance_init_time, container_service_name, resource_group, notifier, scale_up=True, maintainance=True, datadog_api_key=None, over_provision=5, dry_run=False): if kubeconfig: # for using locally logger.debug('Using kubeconfig %s', kubeconfig) self.api = pykube.HTTPClient( pykube.KubeConfig.from_file(kubeconfig)) else: # for using on kube logger.debug('Using kube service account') self.api = pykube.HTTPClient( pykube.KubeConfig.from_service_account()) self._drained = {} azure_login.login( service_principal_app_id, service_principal_secret, service_principal,tenant) # Create container service self.container_service = ContainerService( get_mgmt_service_client(ComputeManagementClient).container_services, container_service_name, resource_group) # self.autoscaling_timeouts = autoscaling_groups.AutoScalingTimeouts( # self.session) # config self.idle_threshold = idle_threshold self.instance_init_time = instance_init_time self.reserve_idle_threshold = reserve_idle_threshold self.over_provision = over_provision self.scale_up = scale_up self.maintainance = maintainance self.notifier = notifier if datadog_api_key: datadog.initialize(api_key=datadog_api_key) logger.info('Datadog initialized') self.stats = datadog.ThreadStats() self.stats.start() self.dry_run = dry_run
def process(self): if os.path.exists("/boot/klot-io/reset"): self.reset() if os.path.exists("/boot/klot-io/lib/config.py"): self.restart() self.reload() self.load() self.uninitialized() if "account" in self.modified: self.account() if "network" in self.modified: self.network() if "kubernetes" in self.modified: self.kubernetes() if not self.kube and os.path.exists("/home/pi/.kube/config"): self.kube = pykube.HTTPClient( pykube.KubeConfig.from_file("/home/pi/.kube/config")) if self.kube: if self.config["kubernetes"]["role"] == "master": self.apps() self.services() self.clean()
def login(): config = pykube.KubeConfig.from_env() client = pykube.HTTPClient( config=config, timeout=settings.OSCTL_PYKUBE_HTTP_REQUEST_TIMEOUT ) LOG.info(f"Created k8s api client from context {config.current_context}") return client
def setUp(self): # load dummy kube specs dir_path = os.path.dirname(os.path.realpath(__file__)) with open(os.path.join(dir_path, 'data/busybox.yaml'), 'r') as f: self.dummy_pod = yaml.load(f.read()) with open(os.path.join(dir_path, 'data/ds-pod.yaml'), 'r') as f: self.dummy_ds_pod = yaml.load(f.read()) with open(os.path.join(dir_path, 'data/rc-pod.yaml'), 'r') as f: self.dummy_rc_pod = yaml.load(f.read()) with open(os.path.join(dir_path, 'data/node.yaml'), 'r') as f: self.dummy_node = yaml.load(f.read()) # this isn't actually used here # only needed to create the KubePod object... self.api = pykube.HTTPClient( pykube.KubeConfig.from_file('~/.kube/config')) # start creating our mock ec2 environment self.mocks = [moto.mock_ec2(), moto.mock_autoscaling()] for moto_mock in self.mocks: moto_mock.start() client = boto3.client('autoscaling', region_name='us-west-2') self.asg_client = client client.create_launch_configuration(LaunchConfigurationName='dummy-lc', ImageId='ami-deadbeef', KeyName='dummy-key', SecurityGroups=[ 'sg-cafebeef', ], InstanceType='t2.medium') client.create_auto_scaling_group(AutoScalingGroupName='dummy-asg', LaunchConfigurationName='dummy-lc', MinSize=0, MaxSize=10, VPCZoneIdentifier='subnet-beefbeef', Tags=[{ 'Key': 'KubernetesCluster', 'Value': 'dummy-cluster', 'PropagateAtLaunch': True }, { 'Key': 'KubernetesRole', 'Value': 'worker', 'PropagateAtLaunch': True }]) # finally our cluster self.cluster = Cluster(aws_access_key='', aws_secret_key='', regions=['us-west-2', 'us-east-1', 'us-west-1'], kubeconfig='~/.kube/config', pod_namespace=None, idle_threshold=60, instance_init_time=60, type_idle_threshold=60, cluster_name='dummy-cluster', notifier=Notifier(), dry_run=False)
def execute(self, apiversion=None, kind=None, filters={}, foreach=None, returns=None): api = self.payload.get("_k8s_api_client") if api is None: config = pykube.KubeConfig.from_service_account() api = pykube.HTTPClient(config) # self.payload["_k8s_api_client"] = api if apiversion is None: apiversion = self.subject.get_ext("apiversion") if "group" in self.subject.get_ext_props(): apiversion = "{}/{}".format(self.subject.get_ext("group"), apiversion) if kind is None: kind = self.subject.get_ext("kind") obj = pykube.object_factory(api, apiversion, kind) if "namespace" not in filters: filters.update({"namespace": self.subject.get_ext("namespace")}) qobjs = obj.objects(api).filter(**filters) if foreach is not None: for obj in qobjs: foreach(obj) if returns is not None: return returns(qobjs) return len(qobjs)
def main(): parser = argparse.ArgumentParser() parser.add_argument('--seconds', type=int, default=3600, help='Delete all finished jobs older than ..') parser.add_argument('--timeout-seconds', type=int, default=-1, help='Kill all jobs older than ..') parser.add_argument('--dry-run', action='store_true', help='Dry run mode') parser.add_argument('--namespace', type=str, default=None, help='Only search for completed jobs in a single namespace') parser.add_argument('--ignore-namespaces', type=str, default=None, help='Ignore objects in this namespaces(--ignore-namespaces hoge,fuga,...)') args = parser.parse_args() try: config = pykube.KubeConfig.from_service_account() except FileNotFoundError: # local testing config = pykube.KubeConfig.from_file(os.path.expanduser('~/.kube/config')) api = pykube.HTTPClient(config) namespace = args.namespace or pykube.all ignore_ns = args.ignore_namespaces.split(',') if args.ignore_namespaces else [] for job in pykube.Job.objects(api, namespace=namespace): if not job.namespace in ignore_ns: delete_if_expired(args.dry_run, job, job_expired(args.seconds, args.timeout_seconds, job)) for pod in pykube.Pod.objects(api, namespace=namespace): if not job.namespace in ignore_ns: delete_if_expired(args.dry_run, pod, pod_expired(args.seconds, pod))
def _wait_for_cluster(self): """Wait until we can confirm all nodes have registered with cluster.""" api = pykube.HTTPClient(self._get_kops_config()) # We poll the cluster until the number of nodes it returns matches what we expect total_nodes = self._get_total_nodes() logging.info('Waiting for %d nodes', total_nodes) logging.info('This can take several minutes to complete') while True: try: nodes = pykube.Node.objects(api) if len(nodes.all()) == 0: logging.info('No nodes available yet') time.sleep(10) elif len(nodes.all()) > 0 and len(nodes.all()) < total_nodes: logging.info('Some nodes available: %d of %d', len(nodes.all()), total_nodes) time.sleep(2) elif len(nodes.all()) == total_nodes: logging.info('All nodes available: %d', len(nodes.all())) return except Exception, err: #pylint: disable=W0703 logging.info('Failed getting nodes') logging.info(err) time.sleep(10) time.sleep(2)
def login(self): #firstConsecutiveStaticIP parameter is used as the private IP for the master #os.environ["PYKUBE_KUBERNETES_SERVICE_HOST"] = self.arm_parameters['firstConsecutiveStaticIP']['value'] if self.kubeconfig: # for using locally logger.debug('Using kubeconfig %s', self.kubeconfig) config = pykube.KubeConfig.from_file(self.kubeconfig) # config.set_current_context(self.kubecontext) self.api = pykube.HTTPClient(config) else: # for using on kube logger.debug('Using kube service account') self.api = pykube.HTTPClient( pykube.KubeConfig.from_service_account())
def check_for_active_services(self): """Checks current cluster for active services""" logging.info('Checking for active services') api = pykube.HTTPClient(self._get_kops_config()) namespaces = pykube.Namespace.objects(api).all() active_services = False for namespace in namespaces: logging.info('Checking namespace %s for service', namespace.name) if namespace.name == 'kube-system': logging.info('Skipping kube-system') continue services = pykube.Service.objects(api).filter(namespace='%s' % namespace.name) if len(services) > 0: logging.info('Active services: %d', len(services)) for service in services: if namespace.name == 'default' and service.name == 'kubernetes': continue #Skip this built in service else: logging.info('Service: %s', service.name) active_services = True return active_services
def api(self): config = pykube.KubeConfig.from_file( os.path.join(os.environ['HOME'], ".kube/config")) if self.context: config.set_current_context(self.context) return pykube.HTTPClient(config)
def check_for_running_pods(self): """Checks current cluster for running pods""" logging.info('Checking for running pods') api = pykube.HTTPClient(self._get_kops_config()) namespaces = pykube.Namespace.objects(api).all() running_pods = False for namespace in namespaces: logging.info('Checking namespace %s for pods', namespace.name) if namespace.name == 'kube-system': logging.info('Skipping kube-system') continue pods = pykube.Pod.objects(api).filter(namespace='%s' % namespace.name) if len(pods) > 0: logging.info('Pods found running: %d', len(pods)) for pod in pods: logging.info('Pod: %s', pod.name) running_pods = True return running_pods
def test_no_auth_with_empty_user(self): """ Cluster does not require any authentication--so no credentials are provided in the user info """ config = { "clusters": [{ "name": "no-auth-cluster", "cluster": { "server": "http://localhost:8080" }, }], "users": [{ "name": "no-auth-cluster", "user": {} }], "contexts": [{ "name": "no-auth-cluster", "context": { "cluster": "no-auth-cluster", "user": "******", }, }], "current-context": "no-auth-cluster", } client = pykube.HTTPClient(pykube.KubeConfig(doc=config)) self.ensure_no_auth(client)
def get_kubernetes_api(): api = pykube.HTTPClient( pykube.KubeConfig.from_file( os.path.expanduser('~/.kube/config') ) ) return api
def auth_pykube(**_kw): """ Create an authenticated pykube API client at startup. """ global _kcfg, _kapi _kcfg = pykube.KubeConfig.from_env() _kapi = pykube.HTTPClient(_kcfg)
def __init__(self, name, kubeconfig_path): super().__init__(name) self.kubeconfig_path = kubeconfig_path config = pykube.KubeConfig.from_file(self.kubeconfig_path) self.api = pykube.HTTPClient(config)
def _get_api_client(self): if self._override_api_url: config = pykube.KubeConfig.from_url(self._override_api_url) else: config = pykube.KubeConfig.from_env() api = pykube.HTTPClient(config) return api
def get_client(kube_apiserver=None, key_file=None, cert_file=None, ca_cert=None): cluster = {"server": kube_apiserver} if ca_cert: cluster["certificate-authority"] = ca_cert user = {} if cert_file and key_file: user["client-certificate"] = cert_file user["client-key"] = key_file config = { "clusters": [{ "name": "ccp", "cluster": cluster }], "users": [{ "name": "ccp", "user": user }], "contexts": [{ "name": "ccp", "context": { "cluster": "ccp", "user": "******" }, }], "current-context": "ccp" } return pykube.HTTPClient(pykube.KubeConfig(config))
def main(): parser = argparse.ArgumentParser() parser.add_argument('--seconds', type=int, default=3600, help='Delete all finished jobs older than ..') parser.add_argument('--timeout-seconds', type=int, default=-1, help='Kill all jobs older than ..') parser.add_argument('--dry-run', action='store_true', help='Dry run mode') args = parser.parse_args() try: config = pykube.KubeConfig.from_service_account() except FileNotFoundError: # local testing config = pykube.KubeConfig.from_file( os.path.expanduser('~/.kube/config')) api = pykube.HTTPClient(config) for job in pykube.Job.objects(api, namespace=pykube.all): delete_if_expired(args.dry_run, job, job_expired(args.seconds, args.timeout_seconds, job)) for pod in pykube.Pod.objects(api, namespace=pykube.all): delete_if_expired(args.dry_run, pod, pod_expired(args.seconds, pod))
def execute(self, apiversion=None, kind=None, filters={}, foreach=None, returns=None): api = self.payload.get("_k8s_api_client") if api is None: config = pykube.KubeConfig.from_service_account() api = pykube.HTTPClient(config) self.payload["_k8s_api_client"] = api use_context = apiversion is None and kind is None and len(filters) == 0 context = self.payload if use_context and context.get("metadata", {}).get("name") is None: resp = api.session.get(url=f"{api.url}{self.subject.name[len('k8s:'):]}") resp.raise_for_status() context = resp.json() if use_context: apiversion = context["apiversione"] kind = context["kind"] obj = pykube.object_factory(api, apiversion, kind) if "namespace" not in filters: filters.update({ "namespace": self.subject.get_ext("namespace") }) qobjs = obj.objects(api).filter(**filters) if foreach is not None: for obj in qobjs: foreach(obj) if returns is not None: return returns(qobjs) return len(qobjs)
def create_fn(spec, **kwargs): # Render the pod yaml with some spec fields used in the template. doc = yaml.safe_load(f""" apiVersion: v1 kind: Pod spec: containers: - name: the-only-one image: busybox command: ["sh", "-x", "-c"] args: - | echo "FIELD=$FIELD" sleep {spec.get('duration', 0)} env: - name: FIELD value: {spec.get('field', 'default-value')} """) # Make it our child: assign the namespace, name, labels, owner references, etc. kopf.adopt(doc) # Actually create an object by requesting the Kubernetes API. api = pykube.HTTPClient(pykube.KubeConfig.from_env()) pod = pykube.Pod(api, doc) pod.create() api.session.close() # Update the parent's status. return {'children': [pod.metadata['uid']]}
def __init__(self, **kwargs): self.kind = 'kubernetes' self.scope = kwargs.get('scope', 'local') super(KubernetesInput, self).__init__(**kwargs) config_file, filename = tempfile.mkstemp() config_content = { 'apiVersion': 'v1', 'clusters': [{ 'cluster': self.config['cluster'], 'name': self.name, }], 'contexts': [{ 'context': { 'cluster': self.name, 'user': self.name, }, 'name': self.name, }], 'current-context': self.name, 'kind': 'Config', 'preferences': {}, 'users': [{ 'name': self.name, 'user': self.config['user'] }] } os.write(config_file, yaml.safe_dump(config_content).encode()) os.close(config_file) self.config_wrapper = pykube.KubeConfig.from_file(filename) os.remove(filename) self.api = pykube.HTTPClient(self.config_wrapper)
def create_kubernetes_secret(doc: dict) -> pykube.Secret: api = pykube.HTTPClient(pykube.KubeConfig.from_env()) secret = pykube.Secret(api, doc) secret.create() api.session.close() return secret
def create_or_update_deployment(manifest): # Actually create an object by requesting the Kubernetes API. api = pykube.HTTPClient(pykube.KubeConfig.from_env()) deployment = pykube.Deployment(api, manifest) deployment.create() api.session.close() return deployment