def test_create_delete_deployment(api, requests_mock): # example from README with requests_mock as rsps: rsps.add( responses.POST, "https://localhost:9443/apis/apps/v1/namespaces/gondor-system/deployments", json={}, ) obj = { "apiVersion": "apps/v1", "kind": "Deployment", "metadata": { "name": "my-deploy", "namespace": "gondor-system" }, "spec": { "replicas": 3, "selector": { "matchLabels": { "app": "nginx" } }, "template": { "metadata": { "labels": { "app": "nginx" } }, "spec": { "containers": [{ "name": "nginx", "image": "nginx", "ports": [{ "containerPort": 80 }], }] }, }, }, } pykube.Deployment(api, obj).create() rsps.add( responses.DELETE, "https://localhost:9443/apis/apps/v1/namespaces/gondor-system/deployments/my-deploy", json={}, ) obj = { "apiVersion": "apps/v1", "kind": "Deployment", "metadata": { "name": "my-deploy", "namespace": "gondor-system" }, } pykube.Deployment(api, obj).delete()
def _trigger_deployment(kube_connection, namespace, timestamp=datetime.datetime.utcnow(), checksum=None): res = pykube.Deployment.objects(kube_connection).filter(namespace=namespace).all() response = {"items": []} try: response = res.response except requests.exceptions.HTTPError as e: if e.response.status_code == 404: raise NamespaceNoDeploymentError("no deployments found in namespace '%s'" % (namespace)) raise for deployment in response['items']: if 'annotations' not in deployment['spec']['template']['metadata']: deployment['spec']['template']['metadata']['annotations'] = {} if 'annotations' in deployment['metadata'] and 'secretupdater.ffx.io/skip_reload' in deployment['metadata']['annotations']: app.logger.debug('Skipping deployment reload due to "skip_reload" annotation') continue if checksum and 'secretupdater.ffx.io/hash' in deployment['spec']['template']['metadata']['annotations'] and deployment['spec']['template']['metadata']['annotations']['secretupdater.ffx.io/hash'] == checksum: app.logger.debug('Skipping deployment due to no changes') continue deployment['spec']['template']['metadata']['annotations']['secretupdater.ffx.io/last_update'] = timestamp.isoformat() + "Z" if checksum: deployment['spec']['template']['metadata']['annotations']['secretupdater.ffx.io/hash'] = checksum pykube.Deployment(kube_connection, deployment).update() app.logger.debug(deployment['spec']['template']['metadata'])
def kubernetes_deployment_api_object(self, deployment): obj = { "kind": "Deployment", "apiVersion": "extensions/v1beta1", "metadata": { "namespace": self.kubernetes_namespace_name, "name": self.kubernetes_deployment_name, "labels": self.kubernetes_labels(), }, "spec": { "replicas": deployment.replicas, "selector": { "matchLabels": self.kubernetes_labels(), }, "template": { "metadata": { "labels": self.kubernetes_labels(), }, "spec": { "containers": self.kubernetes_containers(deployment), }, } } } return pykube.Deployment(self.kubernetes_api, obj)
def destroy(api, spec): if spec["kind"] == 'Deployment': pykube.Deployment(api, spec).delete() if spec["kind"] == 'Service': pykube.Service(api, spec).delete() if spec["kind"] == 'Pod': pykube.Pod(api, spec).delete()
def constroy(api, spec): if spec["kind"] == 'Deployment': pykube.Deployment(api, spec).create() if spec["kind"] == 'Service': pykube.Service(api, spec).create() if spec["kind"] == 'Pod': pykube.Pod(api, spec).create()
def create_or_update_deployment(manifest): # Actually create an object by requesting the Kubernetes API. api = pykube.HTTPClient(pykube.KubeConfig.from_env()) deployment = pykube.Deployment(api, manifest) deployment.create() api.session.close() return deployment
def deploy(self, obj): if obj['kind'] == 'Deployment': logging.info('Starting Deployment... ' + obj['metadata']['name']) pykube.Deployment(self.api, obj).create() elif obj['kind'] == 'Service': logging.info('Starting Service... ' + obj['metadata']['name']) pykube.Service(self.api, obj).create() elif obj['kind'] == 'Pod': logging.info('Starting Pod... ' + obj['metadata']['name'] + '(' + obj['spec']['containers'][0]['image'] + ')') pykube.Pod(self.api, obj).create()
def deploy(configs_json): tenant = configs_json.get('tenant') replaced = tenant.replace('_', '-') configs_str = dumps(configs_json) service_name = configs_json.get('service') deployment_obj = load_from_file(service_name) deployment_obj['metadata']['name'] += "-" + replaced deployment_obj['spec']['template']['metadata']['labels']['tenant'] = tenant deployment_obj['spec']['template']['spec']['containers'][0][ 'name'] += "-" + replaced deployment_obj['spec']['template']['spec']['containers'][0]['env'][0][ 'value'] = configs_str print(deployment_obj) # replication_controler_obj = { # "apiVersion": "v1", # "kind": "ReplicationController", # "metadata": { # "name": "sn-collector-rc-" + replaced # }, # "spec": { # "replicas": 1, # "selector": { # "app": "sn-collector" # }, # "template": { # "metadata": { # "labels": { # "app": "sn-collector", # "tenant": tenant # } # }, # "spec": { # "containers": [ # { # "name": "sn-collector-ctr-" + replaced, # "image": "gcr.io/xops-poc/sn-collector:1.0", # "env": [ # { # "name": "configs", # "value": configs_str # } # ] # } # ] # } # } # } # } # pykube.ReplicationController(api, replication_controler_obj).create() pykube.Deployment(api, deployment_obj).create()
def request_start(self): self.webservice.check() deployment = self._find_obj( pykube.Deployment, self.webservice_label_selector) if deployment is None: pykube.Deployment(self.api, self._get_deployment()).create() svc = self._find_obj( pykube.Service, self.webservice_label_selector) if svc is None: pykube.Service(self.api, self._get_svc()).create()
def create_fn(spec, **kwargs): url = spec["website_url"] name = kwargs["body"]["metadata"]["name"] doc = get_yaml(url, name) print(f"Serving html from: {url}") # Make it our child: assign the namespace, name, labels, owner references, etc. # When delete the custom resource, its children are also deleted. kopf.adopt(doc) api = pykube.HTTPClient(pykube.KubeConfig.from_env()) dep = pykube.Deployment(api, doc) dep.create() api.session.close() return {'children': [dep.metadata['uid']]}
def deploy_deployment(api, manifest, version, timeout, update): """Deploy Deployment.""" logging.info('Deploying deployment') deployment = pykube.Deployment(api, manifest) deployment.annotations[ 'kubernetes.io/change-cause'] = 'Deploying version %s' % version if 'metadata' in deployment.obj and 'namespace' in deployment.obj[ 'metadata']: check_namespace(api, deployment.obj['metadata']['namespace']) if not deployment.exists(): logging.info('Creating deployment') deployment.create() elif update: logging.info('Updating deployment') deployment.update() else: logging.info('Not updating deployment') return # We wait for deployments finish app_label = deployment.obj['metadata']['labels']['app'] if 'metadata' in deployment.obj and 'namespace' in deployment.obj[ 'metadata']: namespace = deployment.obj['metadata']['namespace'] else: namespace = 'default' revision = get_revision(api, app_label, version, timeout, namespace) # Hack to make sure deployment has a chance to start - Need a better way to detect this time.sleep(3) wait_for_deployment(deployment, revision, timeout) return deployment
def _deploy_addon(self, url): """Deploy addon.""" # Deploy dashboard addon addon_yaml = requests.get(url) # The above yaml file has the deployment and service manifest in same file, kubectl can handle that but # pykube can't addon = [] for doc in addon_yaml.text.split('---'): addon.append(yaml.load(doc)) api = pykube.HTTPClient(self._get_kops_config()) for manifest in addon: if manifest['kind'] == 'Deployment': logging.info('Creating addon deployment') # Add node selector for utility instance group manifest['spec']['nodeSelector'] = {'close.io/ig': 'utility'} pykube.Deployment(api, manifest).create() elif manifest['kind'] == 'Service': logging.info('Creating addon service') pykube.Service(api, manifest).create() else: raise SystemError('Unkown manifest type')
def createDeploy(self): deploy = createDeployObject(self.pod, False) pykube.Deployment(self.api, deploy).create() log.info('creating deploy')
def vmcreate(self, obj_attr_list) : ''' TBD ''' try : _status = 100 _fmsg = "An error has occurred, but no error message was captured" self.determine_instance_name(obj_attr_list) obj_attr_list["cloud_vm_name"] = obj_attr_list["cloud_vm_name"].lower() obj_attr_list["cloud_vv_name"] = obj_attr_list["cloud_vv_name"].lower() obj_attr_list["cloud_rs_name"] = obj_attr_list["cloud_vm_name"] obj_attr_list["cloud_d_name"] = obj_attr_list["cloud_vm_name"] obj_attr_list["cloud_rs_uuid"] = "NA" obj_attr_list["cloud_d_uuid"] = "NA" self.determine_key_name(obj_attr_list) self.take_action_if_requested("VM", obj_attr_list, "provision_originated") _mark_a = time() self.connect(obj_attr_list["access"], obj_attr_list["credentials"], \ obj_attr_list["vmc_name"], obj_attr_list["name"]) self.annotate_time_breakdown(obj_attr_list, "authenticate_time", _mark_a) _mark_a = time() if self.is_vm_running(obj_attr_list) : _msg = "An instance named \"" + obj_attr_list["cloud_vm_name"] _msg += " is already running. It needs to be destroyed first." _status = 187 cberr(_msg) raise CldOpsException(_msg, _status) self.annotate_time_breakdown(obj_attr_list, "check_existing_instance_time", _mark_a) _env = [ { "name": "CB_SSH_PUB_KEY", "value" : obj_attr_list["pubkey_contents"]}, {"name": "CB_LOGIN", "value" : obj_attr_list["login"]} ] if str(obj_attr_list["ports_base"]).lower() != "false" : obj_attr_list["prov_cloud_port"] = int(obj_attr_list["ports_base"]) + int(obj_attr_list["name"].replace("vm_",'')) _ports = [ { "hostPort": obj_attr_list["prov_cloud_port"], "containerPort": int(obj_attr_list["run_cloud_port"])} ] if obj_attr_list["check_boot_complete"] == "tcp_on_22": obj_attr_list["check_boot_complete"] = "tcp_on_" + str(obj_attr_list["prov_cloud_port"]) else : _ports = [] if obj_attr_list["abstraction"] == "pod" : _obj = { "apiVersion": "v1", \ "kind": "Pod", \ "id": obj_attr_list["cloud_vm_name"], \ "metadata": { "name": obj_attr_list["cloud_vm_name"], "namespace": obj_attr_list["namespace"] }, \ "spec": { "containers": \ [ { "env": _env, \ "name": obj_attr_list["cloud_vm_name"], \ "image": obj_attr_list["imageid1"], \ "ports": _ports } ] } } if obj_attr_list["abstraction"] == "replicaset" : _obj = { "apiVersion": "extensions/v1beta1", \ "kind": "ReplicaSet", \ "id": obj_attr_list["cloud_rs_name"], \ "metadata": { "name": obj_attr_list["cloud_rs_name"], "namespace": obj_attr_list["namespace"]}, \ "spec": { "replicas": int(obj_attr_list["replicas"]), \ "template": { "metadata": { "labels": { "app": obj_attr_list["cloud_vm_name"], "role": "master", "tier": "backend" } },\ "spec": { "containers": \ [ { "env": _env, \ "name": obj_attr_list["cloud_vm_name"], \ "image": obj_attr_list["imageid1"], \ "ports": _ports } ] } } } } obj_attr_list["selector"] = "app:" + obj_attr_list["cloud_rs_name"] + ',' + "role:master,tier:backend" if obj_attr_list["abstraction"] == "deployment" : _obj = { "apiVersion": "extensions/v1beta1", \ "kind": "Deployment", \ "id": obj_attr_list["cloud_d_name"], \ "metadata": { "name": obj_attr_list["cloud_d_name"], "namespace": obj_attr_list["namespace"]}, \ "spec": { "replicas": int(obj_attr_list["replicas"]), \ "template": { "metadata": { "labels": { "app": obj_attr_list["cloud_vm_name"], "role": "master", "tier": "backend" } },\ "spec": { "containers": \ [ { "env": _env, \ "name": obj_attr_list["cloud_vm_name"], \ "image": obj_attr_list["imageid1"], \ "ports": _ports } ] } } } } obj_attr_list["selector"] = "app:" + obj_attr_list["cloud_d_name"] + ',' + "role:master,tier:backend" self.vm_placement(obj_attr_list) _cpu, _memory = obj_attr_list["size"].split('-') if "userdata" not in obj_attr_list : obj_attr_list["userdata"] = "auto" if obj_attr_list["userdata"] != "none" : obj_attr_list["config_drive"] = True else : obj_attr_list["config_drive"] = False _time_mark_prs = int(time()) obj_attr_list["mgt_002_provisioning_request_sent"] = _time_mark_prs - int(obj_attr_list["mgt_001_provisioning_request_originated"]) obj_attr_list["last_known_state"] = "about to send create request" self.get_images(obj_attr_list) self.get_networks(obj_attr_list) self.pre_vmcreate_process(obj_attr_list) self.vvcreate(obj_attr_list) self.common_messages("VM", obj_attr_list, "creating", 0, '') _mark_a = time() if obj_attr_list["abstraction"] == "pod" : pykube.Pod(self.kubeconn, _obj).create() if obj_attr_list["abstraction"] == "replicaset" : pykube.ReplicaSet(self.kubeconn, _obj).create() if obj_attr_list["abstraction"] == "deployment" : pykube.Deployment(self.kubeconn, _obj).create() self.annotate_time_breakdown(obj_attr_list, "instance_scheduling_time", _mark_a) self.take_action_if_requested("VM", obj_attr_list, "provision_started") _time_mark_prc = self.wait_for_instance_ready(obj_attr_list, _time_mark_prs) self.wait_for_instance_boot(obj_attr_list, _time_mark_prc) obj_attr_list["arrival"] = int(time()) _status = 0 if obj_attr_list["force_failure"].lower() == "true" : _fmsg = "Forced failure (option FORCE_FAILURE set \"true\")" _status = 916 except CldOpsException, obj : _status = obj.status _fmsg = str(obj.msg)
def idea_collector(body, **kwargs): deployment_configs = [ { 'name': 'web', 'replicas': body['spec']['web_app_replicas'], 'command': '["pipenv run python manage.py makemigrations && pipenv run python manage.py migrate && pipenv run daphne -p 8080 -b 0.0.0.0 idea_collector.asgi:application"]' }, { 'name': 'celery', 'replicas': body['spec']['celery_worker_replicas'], 'command': '["pipenv run celery -A idea_collector worker --loglevel=INFO"]' }, ] api = pykube.HTTPClient(pykube.KubeConfig.from_file()) for deployment_config in deployment_configs: deployment_data = yaml.full_load(f""" apiVersion: apps/v1 kind: Deployment metadata: name: idea-collector-{deployment_config['name']} labels: app: idea-collector-{deployment_config['name']} spec: replicas: {deployment_config['replicas']} selector: matchLabels: app: idea-collector-{deployment_config['name']} template: metadata: labels: app: idea-collector-{deployment_config['name']} spec: containers: - name: idea-collector-{deployment_config['name']} image: maorc112/idea_collector_web:latest command: args: {deployment_config['command']} ports: - containerPort: 8080 env: - name: POSTGRESQLUSERNAME valueFrom: secretKeyRef: name: postgres-redis-credentials key: username - name: POSTGRESQLPASSWORD valueFrom: secretKeyRef: name: postgres-redis-credentials key: password - name: POSTGRESQLHOST valueFrom: secretKeyRef: name: postgres-redis-credentials key: host - name: REDIS_URL valueFrom: secretKeyRef: name: postgres-redis-credentials key: redis_url """) deployment = pykube.Deployment(api, deployment_data) deployment.create()
def updateDeployment(self): # we simply need to call an update function deploy = createDeployObject(self.pod) pykube.Deployment(self.api, deploy).update() log.info('Resyncing deployment')
def deleteDeploy(self): # we need to scale the pods to 0, since "delete" doesnt delete pods deploy = createDeployObject(self.pod, True) pykube.Deployment(self.api, deploy).update() pykube.Deployment(self.api, deploy).delete() log.info('deleting deploy')