示例#1
0
def create_fn(spec, name, namespace, ilogger, **kwargs):

    size = spec.get('size')
    if not size:
        raise kopf.PermanentError(f"Size must be set. Got {size!r}.")

    path = os.path.join(os.path.dirname(__file__), 'pvc.yaml')
    print("path: ", path)
    tmpl = open(path, 'rt').read()
    print("templ type: ", type(tmpl))
    text = tmpl.format(name=name, size=size)
    data = yaml.safe_load(text)
    print("Data: ", str(data))

    # Set the hierarchy so the pvc will be owned by the evc, so when evc is deleted the pvc is deleted too!
    kopf.adopt(data)

    #ilogger(f"Data:\n%s",str(data))
    kubernetes.config.load_kube_config(config_file="/root/.kube/config",
                                       context="k8sb1")
    api = kubernetes.client.CoreV1Api()
    print("********")
    obj = api.create_namespaced_persistent_volume_claim(
        namespace=namespace,
        body=data,
    )

    ilogger.info(f"PVC child is created: %s", obj)
示例#2
0
def create_fn(spec, **kwargs):

    # Render the pod yaml with some spec fields used in the template.
    doc = yaml.safe_load(f"""
        apiVersion: v1
        kind: Pod
        spec:
          containers:
          - name: the-only-one
            image: busybox
            command: ["sh", "-x", "-c"]
            args: 
            - |
              echo "FIELD=$FIELD"
              sleep {spec.get('duration', 0)}
            env:
            - name: FIELD
              value: {spec.get('field', 'default-value')}
    """)

    # Make it our child: assign the namespace, name, labels, owner references, etc.
    kopf.adopt(doc)

    # Actually create an object by requesting the Kubernetes API.
    api = kubernetes.client.CoreV1Api()
    pod = api.create_namespaced_pod(namespace=doc['metadata']['namespace'],
                                    body=doc)

    # Update the parent's status.
    return {'children': [pod.metadata.uid]}
示例#3
0
def _create_pod(namespace, complex_job, pod_def, logger):
    api = client.CoreV1Api()
    complex_job_name = complex_job['metadata']['name']

    pod_name = pod_def.get('name')
    if not pod_name:
        raise kopf.PermanentError(f"'spec.pods.pod.name' must be set.")

    podSpec = pod_def.get('spec')
    if not pod_name:
        raise kopf.PermanentError(f"'spec.pods.pod.spec' must be set.")

    pod = client.V1Pod()
    pod.metadata = client.V1ObjectMeta(name=f'{complex_job_name}-{pod_name}')
    pod.spec = podSpec

    pod.metadata.labels = {
        "mytracks4mac.info/complexJob": complex_job_name,
        "mytracks4mac.info/podName": pod_name
    }

    # Create resource in cluster
    pod_dict = pod.to_dict()
    kopf.adopt(pod_dict, owner=complex_job)

    pod_obj = api.create_namespaced_pod(namespace=namespace, body=pod_dict)

    logger.info(f"POD child is created: %s", pod)
示例#4
0
def test_adopting(mocker, forced, strict, nested):
    # These methods are tested in their own tests.
    # We just check that they are called at all.
    append_owner_ref = mocker.patch(
        'kopf.toolkits.hierarchies.append_owner_reference')
    harmonize_naming = mocker.patch(
        'kopf.toolkits.hierarchies.harmonize_naming')
    adjust_namespace = mocker.patch(
        'kopf.toolkits.hierarchies.adjust_namespace')
    label = mocker.patch('kopf.toolkits.hierarchies.label')

    obj = {}
    kopf.adopt(obj,
               owner=Body(OWNER),
               forced=forced,
               strict=strict,
               nested=nested)

    assert append_owner_ref.called
    assert harmonize_naming.called
    assert adjust_namespace.called
    assert label.called

    assert append_owner_ref.call_args == call(obj, owner=Body(OWNER))
    assert harmonize_naming.call_args == call(obj,
                                              name=OWNER_NAME,
                                              forced=forced,
                                              strict=strict)
    assert adjust_namespace.call_args == call(obj,
                                              namespace=OWNER_NAMESPACE,
                                              forced=forced)
    assert label.call_args == call(obj,
                                   labels=OWNER_LABELS,
                                   nested=nested,
                                   forced=forced)
示例#5
0
def create_tunnel(meta, spec, namespace, logger, **kwargs):

    name = meta['name']
    namespace = meta['namespace']

    default_port = spec['ports'][0]['port']
    forward_port = meta['annotations'].get(f'{ann_ns}/forward-port',
                                           default_port)
    remote_port = meta['annotations'].get(f'{ann_ns}/remote-port',
                                          default_port)

    path = os.path.join(os.path.dirname(__file__), 'tunnel-deployment.yaml')
    with open(path, 'rt') as tmpl_file:
        tmpl = tmpl_file.read()

    text = tmpl.format(name=name,
                       namespace=namespace,
                       forward_port=forward_port,
                       remote_port=remote_port)
    data = yaml.safe_load(text)

    kopf.adopt(data)

    api = kubernetes.client.AppsV1Api()
    api.create_namespaced_deployment(namespace="tunnel", body=data)
示例#6
0
def test_adopting(mocker):
    # These methods are tested in their own tests.
    # We just check that they are called at all.
    append_owner_ref = mocker.patch(
        'kopf.structs.hierarchies.append_owner_reference')
    harmonize_naming = mocker.patch(
        'kopf.structs.hierarchies.harmonize_naming')
    adjust_namespace = mocker.patch(
        'kopf.structs.hierarchies.adjust_namespace')
    label = mocker.patch('kopf.structs.hierarchies.label')

    obj = Mock()
    kopf.adopt(obj, owner=OWNER)

    assert append_owner_ref.called
    assert harmonize_naming.called
    assert adjust_namespace.called
    assert label.called

    assert append_owner_ref.call_args_list == [call(obj, owner=OWNER)]
    assert harmonize_naming.call_args_list == [call(obj, name=OWNER_NAME)]
    assert adjust_namespace.call_args_list == [
        call(obj, namespace=OWNER_NAMESPACE)
    ]
    assert label.call_args_list == [call(obj, labels=OWNER_LABELS)]
示例#7
0
文件: example.py 项目: nnazeer/kopf
def create_fn(spec, **kwargs):

    # Render the pod yaml with some spec fields used in the template.
    doc = yaml.safe_load(f"""
        apiVersion: v1
        kind: Pod
        spec:
          containers:
          - name: the-only-one
            image: busybox
            command: ["sh", "-x", "-c"]
            args: 
            - |
              echo "FIELD=$FIELD"
              sleep {spec.get('duration', 0)}
            env:
            - name: FIELD
              value: {spec.get('field', 'default-value')}
    """)

    # Make it our child: assign the namespace, name, labels, owner references, etc.
    kopf.adopt(doc)

    # Actually create an object by requesting the Kubernetes API.
    api = pykube.HTTPClient(pykube.KubeConfig.from_env())
    pod = pykube.Pod(api, doc)
    pod.create()
    api.session.close()

    # Update the parent's status.
    return {'children': [pod.metadata['uid']]}
示例#8
0
def kafka_secret_create(body, namespace, name, logger, **kwargs):
    new_obj = _copy_object(body)

    source_namespace = _source_namespace_for_secret(namespace, name, logger)

    if not _should_copy(name, namespace, source_namespace, body, logger):
        return

    new_secret = _create_new_secret(name, namespace, source_namespace, new_obj)

    # What we do here is to find the KafkaUser in the source namespace, and then let that adopt
    # the newly created secret. This gives us automatic deletion of the secret in the source namespace
    # if the KafkaUser is removed in the source namespace.
    corresponding_kafkauser = _load_kafkauser(
        new_secret.metadata["namespace"], name[len(source_namespace) + 1 :]
    )

    kopf.adopt([new_secret.obj], corresponding_kafkauser.obj)

    logger.info(
        f"Creating {new_secret.metadata['namespace']}/{new_secret} with a kafka-client.properties with SCRAM-SHA-256 configuration"
        % new_secret.metadata
    )
    _update_or_create(new_secret)

    return {"copied_to": f"{new_secret.metadata['namespace']}/{new_secret}"}
示例#9
0
async def MonitorTriadSets(spec, meta, **kwargs):
    logger = NHDCommon.GetLogger(__name__)
    logger.debug(f'Kicking off controller timer for {meta["namespace"]}/{meta["name"]}')
    try:
        config.load_incluster_config()
    except:
        config.load_kube_config()

    v1   = client.CoreV1Api()        

    for ord in range(spec['replicas']):
        podname = f'{spec["serviceName"]}-{ord}'
        try:
            p = v1.read_namespaced_pod(name = podname, namespace = meta["namespace"])
        except ApiException as e:
            logger.info(f'Triad pod {podname} not found in namespace {meta["namespace"]}, but TriadSet is still active. Restarting pod')
            podspec = yaml.dump(spec["template"])
            
            # Indent the pod spec to line up with the rest of the yaml
            podspec = f"apiVersion: v1\nkind: Pod\n{podspec}"

            # Reload the yaml to patch some fields
            podyaml = yaml.safe_load(podspec)                
            podyaml['metadata']['name'] = podname # Give it the canonical statefulset-type name

            # Patch in the hostname and subdomain to create a DNS record like a statefulset
            podyaml['spec']['hostname']  = podname
            podyaml['spec']['subdomain'] = meta["name"]
            kopf.adopt(podyaml)
            obj = v1.create_namespaced_pod(namespace = meta['namespace'], body = podyaml)
示例#10
0
async def sleepy(s, logger, event, namespace, name, body, spec, **kwargs):
    logger.info(f"Handler {s} for event {event} with field {spec['field']}")
    # snooze = 10 * random.choice((0, 1))
    if event == "update" and s == 0 and spec[
            'field'] == 'value1' and random.choice((True, False)):
        # logger.info(f"Will sleep for {snooze}s")
        # await asyncio.sleep(snooze)
        # time.sleep(snooze)
        raise kopf.TemporaryError("BOOM!")

    child_def = {
        "kind": "KopfChild",
        "apiVersion": "zalando.org/v1",
        "spec": body["spec"],
        "metadata": {
            "name": f"{name}.{s}",
        },
    }
    logger.info(f"Applying spec with value {spec['field']}")
    try:
        child = KopfChild.objects(api).filter(namespace=namespace).get(
            name=child_def["metadata"]["name"])
        kopf.adopt(child_def, body)
        child.set_obj(child_def)
        child.update()
    except pykube.ObjectDoesNotExist:
        child = KopfChild(api, child_def)
        kopf.adopt(child_def, body)
        child.create()
示例#11
0
def create_function(body, spec, **kwargs):
    # The pod definition launched by our SimpleDeployment
    simple_pod = yaml.safe_load(f"""
        apiVersion: v1
        kind: Pod
        spec:
          containers:
          - name: simple
            image: busybox
            command: ["sh", "-x", "-c"]
            args: 
            - |
              echo "I'm so very useful!"
              sleep {spec.get('sleepytime', 0)}
    """)

    # Make the pod a child of our custom resource object by adding namespace, labels etc to the pod definition
    kopf.adopt(simple_pod, owner=body)

    # Create the pod in kubernetes
    api = kubernetes.client.CoreV1Api()
    pod = api.create_namespaced_pod(
        namespace=simplepod['metadata']['namespace'], body=simplepod)

    # Now "register" the pod with our SimpleDeployment
    return {'children': [pod.metadata.uid]}
示例#12
0
def create(body, meta, spec, status, **kwargs):
    api = K8SApi.from_env()

    logger.info("Creating deployment ...")
    deployment_data = create_deployment_data(meta, spec)
    kopf.adopt(deployment_data)
    # kopf.label(pod_data, {'application': 'kopf-example-10'})

    deployment = Deployment(api, deployment_data)
    if deployment.exists():
        deployment.update()
    else:
        deployment.create()
    logger.info("Creating deployment ... done!")

    logger.info("Creating service ...")
    service_data = create_service_data(meta, spec)
    kopf.adopt(service_data)

    service = Service(api, service_data)
    if service.exists():
        service.update()
    else:
        service.create()
    logger.info("Creating service ... done!")

    api.session.close()
    return {'job1-status': 100}
示例#13
0
def create_fn(body, spec, **kwargs):
    name = body['metadata']['name']
    namespace = body['metadata']['namespace']

    v1_client = _get_kube_v1_client()

    NGNIX_JSON_TEMPLATE = {
        "apiVersion": "v1",
        "kind": "Pod",
        "metadata": {
            "name": name,
            "nameapce": namespace
        },
        "spec": {
            "containers": [{
                "name": "nginx",
                "image": "nginx",
                "ports": [{
                    "containerPort": 80
                }]
            }]
        }
    }

    # Make the Pod children of opearator
    kopf.adopt(NGNIX_JSON_TEMPLATE, owner=body)

    # Create Pod
    obj = v1_client.create_namespaced_pod(namespace, NGNIX_JSON_TEMPLATE)
    logger.info("%s pod created", obj.metadata.name)

    return {'message': "NGNIX pod created"}
示例#14
0
def reconcile(spec, name, namespace, logger, **kwargs):
    # Render the pod yaml with some spec fields used in the template.
    # TODO(asmacdo) read this from the CR
    size = 1
    # size = spec.get('size')
    # if not size:
    #     raise kopf.PermanentError(f"Size must be set on the Memcached resource.")

    name = "huzzah"
    # TODO(asmacdo) relative path isnt working? Should not need to hardcode this path
    # with open("memcached-deployment.yaml.j2", 'r') as template_file:
    with open("/src/memcached-deployment.yaml.j2", 'r') as template_file:
        template = Template(template_file.read())
        try:
            manifest_str = template.render(name=name, size=size)
            manifest = yaml.safe_load(manifest_str)
            # TODO(asmacdo) remove this
            print(manifest)
        except yaml.YAMLError as exc:
            print(exc)
            sys.exit(1)

    # Make it our child: assign the namespace, name, labels, owner references, etc.
    # TODO(asmacdo) research this
    kopf.adopt(manifest)
    deployment = create_or_update_deployment(manifest)

    # Update the parent's status.
    return {'children': [deployment.metadata['uid']]}
示例#15
0
def on_ingress_update(name: str, namespace: str, annotations: dict, spec: dict,
                      old: dict, logger, **_):
    if config.DISABLE_INGRESS_HANDLING:
        logger.debug('handling of Ingress resources has been disabled')
        return

    monitor_prefix = f'{GROUP}/monitor.'
    monitor_spec = {
        k.replace(monitor_prefix, ''): v
        for k, v in annotations.items() if k.startswith(monitor_prefix)
    }

    previous_rule_count = len(old['spec']['rules'])
    index = 0

    for rule in spec['rules']:
        if 'host' not in rule:
            continue

        if rule['host'].startswith('*'):  # filter out wildcard domains
            continue

        host = rule['host']

        # we default to a ping check
        if 'type' not in monitor_spec:
            monitor_spec['type'] = MonitorType.PING.name

        if monitor_spec['type'] == 'HTTP':
            monitor_spec['url'] = f"http://{host}"
        elif monitor_spec['type'] == 'HTTPS':
            monitor_spec['url'] = f"https://{host}"
        else:
            monitor_spec['url'] = host

        monitor_name = f"{name}-{index}"

        monitor_body = MonitorV1Beta1.construct_k8s_ur_monitor_body(
            namespace,
            name=monitor_name,
            **MonitorV1Beta1.annotations_to_spec_dict(monitor_spec))
        kopf.adopt(monitor_body)

        if index >= previous_rule_count:  # at first update existing UptimeRobotMonitors, we currently don't check if there's actually a change
            k8s.create_k8s_crd_obj_with_body(MonitorV1Beta1, namespace,
                                             monitor_body)
            logger.info(
                f'created new UptimeRobotMonitor object for URL {host}')
        else:  # then create new UptimeRobotMonitors
            k8s.update_k8s_crd_obj_with_body(MonitorV1Beta1, namespace,
                                             monitor_name, monitor_body)
            logger.info(f'updated UptimeRobotMonitor object for URL {host}')

        index += 1

    while index < previous_rule_count:  # make sure to clean up remaining UptimeRobotMonitors
        k8s.delete_k8s_crd_obj(MonitorV1Beta1, namespace, f"{name}-{index}")
        logger.info('deleted obsolete UptimeRobotMonitor object')
        index += 1
示例#16
0
def create_fn(spec, **kwargs):
    name = kwargs["body"]["metadata"]["name"]
    print("Name is %s\n" % name)
    # Create the deployment spec
    doc = yaml.safe_load(f"""
        apiVersion: apps/v1
        kind: Deployment
        metadata:
          name: {name}-deployment
          labels:
            app: {name}
        spec:
          replicas: {spec.get('replicas', 1)}
          selector:
            matchLabels:
              app: {name}
          template:
            metadata:
              labels:
                app: {name}
            spec:
              containers:
              - name: nginx
                image: nginx
                ports:
                - containerPort: 80
                volumeMounts:
                - name: workdir
                  mountPath: /usr/share/nginx/html
              initContainers:
              - name: install
                image: alpine/git
                command:
                - git
                - clone
                - {spec.get('gitrepo', 'https://github.com/gbaeke/static-web.git')}
                - /work-dir
                volumeMounts:
                - name: workdir
                  mountPath: /work-dir
              dnsPolicy: Default
              volumes:
              - name: workdir
                emptyDir: {{}}
    """)

    # Make it our child: assign the namespace, name, labels, owner references, etc.
    kopf.adopt(doc)

    # Actually create an object by requesting the Kubernetes API.
    api = kubernetes.client.AppsV1Api()
    try:
      depl = api.create_namespaced_deployment(namespace=doc['metadata']['namespace'], body=doc)
      # Update the parent's status.
      return {'children': [depl.metadata.uid]}
    except ApiException as e:
      print("Exception when calling AppsV1Api->create_namespaced_deployment: %s\n" % e)
示例#17
0
 def to_dict(self, adopt=True):
     resource = {
         "apiVersion": self.apiVersion,
         "kind": self.kind,
         "metadata": self.get_metadata(),
         "spec": self.get_spec()
     }
     if adopt:
         kopf.adopt(resource)
     return resource
示例#18
0
    def run_item(self, item_name) -> dict:
        """
        run_item

        Execute an item job Pod with the spec details from the appropriate
        OaatType object.
        """
        # TODO: check oaatType
        spec = self.oaattype.podspec()
        contspec = spec['container']
        del spec['container']
        contspec.setdefault('env', []).append({
            'name': 'OAAT_ITEM',
            'value': item_name
        })
        for idx in range(len(contspec.get('command', []))):
            contspec['command'][idx] = (contspec['command'][idx].replace(
                '%%oaat_item%%', item_name))
        for idx in range(len(contspec.get('args', []))):
            contspec['args'][idx] = (contspec['args'][idx].replace(
                '%%oaat_item%%', item_name))
        for env in contspec['env']:
            env['value'] = (env.get('value',
                                    '').replace('%%oaat_item%%', item_name))

        # TODO: currently only supports a single container. Do we want
        # multi-container?
        doc = {
            'apiVersion': 'v1',
            'kind': 'Pod',
            'metadata': {
                'generateName': self.name + '-' + item_name + '-',
                'labels': {
                    'parent-name': self.name,
                    'oaat-name': item_name,
                    'app': 'oaat-operator'
                }
            },
            'spec': {
                'containers': [contspec],
                **spec, 'restartPolicy': 'Never'
            },
        }

        kopf.adopt(doc)
        pod = Pod(self.api, doc)

        try:
            pod.create()
        except pykube.exceptions.KubernetesError as exc:
            self.items.mark_failed(item_name)
            raise ProcessingComplete(
                error=f'could not create pod {doc}: {exc}',
                message=f'error creating pod for {item_name}')
        return pod
示例#19
0
def estissuer_certrequest_handler(namespace, spec, meta, body, patch, **_):
    """reconcile CertificateRequests"""
    # gather resources
    issuer = get_issuer_from_resource(body)
    cert = get_owner_by_kind(body, ["Certificate"])
    cert_secret = get_secret_from_resource(cert)
    # Create an EstOrder for it in request namespace
    renewal = (False if cert_secret is None else
               (cert_secret.type == "kubernetes.io/tls"))
    resource = ESTORDER_TEMPLATE.format(
        ordername=meta["name"] + "-order",
        issuername=issuer["metadata"]["name"],
        issuerkind=issuer["kind"],
        request=spec["request"],
        renewal=renewal,
    )
    resource = yaml.safe_load(resource)
    # Set EstOrder owner to CertificateRequest
    kopf.adopt(resource)
    # create the resource
    try:
        api = k8s.CustomObjectsApi()
        _ = api.create_namespaced_custom_object(
            group=GROUP,
            version=VERSION,
            namespace=namespace,
            plural="estorders",
            body=resource,
        )
    except k8s.exceptions.OpenApiException as err:
        raise kopf.TemporaryError(eval(err.body)["message"]) from err
    # log event
    message = f"Created new EstOrder {resource['metadata']['name']}"
    kopf.info(
        body,
        reason="Ordered",
        message=message,
    )
    # set certificate request status to False,Pending
    # utcnow()+"Z" b/c python datetime doesn't do Zulu
    # timepec='seconds' b/c cert-manager webhook will trim to seconds
    # (causing the API to warn about the inconsistency)
    condition = dict(
        lastTransitionTime=
        f"{datetime.utcnow().isoformat(timespec='seconds')}Z",
        type="Ready",
        status="False",
        reason="Pending",
        message=message,
    )
    if patch.status.get("conditions") is None:
        patch.status["conditions"] = []
    patch.status["conditions"].append(condition)
async def create_chaos_experiment(
        meta: ResourceChunk, body: Dict[str, Any], spec: ResourceChunk,
        namespace: str, logger: logging.Logger, **kwargs) -> NoReturn:
    """
    Create a new pod running a Chaos Toolkit instance until it terminates.
    """
    v1 = client.CoreV1Api()
    v1rbac = client.RbacAuthorizationV1Api()

    cm_pod_spec_name = spec.get("template", {}).get(
        "name", "chaostoolkit-resources-templates")
    cm = v1.read_namespaced_config_map(
        namespace=namespace, name=cm_pod_spec_name)

    keep_resources_on_delete = spec.get("keep_resources_on_delete", False)
    if keep_resources_on_delete:
        logger.info("Resources will be kept even when the CRO is deleted")

    ns, ns_tpl = create_ns(v1, cm, spec, logger=logger)
    if not keep_resources_on_delete:
        kopf.adopt(ns_tpl, owner=body)
    logger.info(f"chaostoolkit resources will be created in namespace '{ns}'")

    name_suffix = generate_name_suffix()
    logger.info(f"Suffix for resource names will be '-{name_suffix}'")

    sa_tpl = create_sa(v1, cm, spec, ns, name_suffix, logger=logger)
    if sa_tpl:
        if not keep_resources_on_delete:
            kopf.adopt(sa_tpl, owner=body)
        logger.info(f"Created service account")

    role_tpl = create_role(v1rbac, cm, spec, ns, name_suffix, logger=logger)
    if role_tpl:
        if not keep_resources_on_delete:
            kopf.adopt(role_tpl, owner=body)
        logger.info(f"Created role")

    role_binding_tpl = create_role_binding(
        v1rbac, cm, spec, ns, name_suffix, logger=logger)
    if role_binding_tpl:
        if not keep_resources_on_delete:
            kopf.adopt(role_binding_tpl, owner=body)
        logger.info(f"Created rolebinding")

    pod_tpl = create_pod(v1, cm, spec, ns, name_suffix)
    if pod_tpl:
        if not keep_resources_on_delete:
            kopf.adopt(pod_tpl, owner=body)
        logger.info("Chaos Toolkit started")
示例#21
0
def create_proxy(meta, body, logger, namespace, **kwargs):
    to_own = []
    name = meta.get('name')
    spec = proxy.create_proxy_body(name)

    if not pod_exists(name, namespace):
        obj = corev1.create_namespaced_pod(namespace=namespace, body=spec)
        logger.info(f"Successfully created proxy {name}")
        to_own.append(obj)
    else:
        logger.info(f"Pod {name} already exists")

    kopf.adopt(to_own, owner=name)
    return { "message": "Ok" }
示例#22
0
def create_fn(spec, **kwargs):
    name = kwargs["body"]["metadata"]["name"]
    print("Name is %s\n" % name)
    # Create the deployment spec
    doc = yaml.safe_load(f"""
        apiVersion: apps/v1
        kind: Deployment
        metadata:
          name: {name}-deployment
          labels:
            app: {name}
        spec:
          replicas: {spec.get('replicas', 1)}
          selector:
            matchLabels:
              app: {name}
          template:
            metadata:
              labels:
                app: {name}
            spec:
              containers:
              - name: hello-kubernetes
                image: {spec.get('image', "paulbouwer/hello-kubernetes:1.8")}
                ports:
                - containerPort: {spec.get('port', 8080)}
                resources:
                  limits:
                    memory: "{spec.get('ram', 128)}Mi"
                    cpu: "{spec.get('cpu', 1)}"
    """)

    # Make it our child: assign the namespace, name, labels, owner references, etc.
    kopf.adopt(doc)

    # Actually create an object by requesting the Kubernetes API.
    api = kubernetes.client.AppsV1Api()
    try:
        depl = api.create_namespaced_deployment(
            namespace=doc['metadata']['namespace'], body=doc)
        # Update the parent's status.
        return {'children': [depl.metadata.uid]}
    except ApiException as e:
        print(
            "Exception when calling AppsV1Api->create_namespaced_deployment: %s\n"
            % e)
def create_fn(spec, **kwargs):
    url = spec["website_url"]
    name = kwargs["body"]["metadata"]["name"]
    doc = get_yaml(url, name)

    print(f"Serving html from: {url}")

    # Make it our child: assign the namespace, name, labels, owner references, etc.
    # When delete the custom resource, its children are also deleted.
    kopf.adopt(doc)

    api = pykube.HTTPClient(pykube.KubeConfig.from_env())
    dep = pykube.Deployment(api, doc)
    dep.create()
    api.session.close()

    return {'children': [dep.metadata['uid']]}
示例#24
0
def create_pvc(body, logger):
    size = VolumeConfig.VOLUME_SIZE
    storage_class_name = VolumeConfig.STORAGE_CLASS
    with open("templates/volume-template.yaml", 'r') as stream:
        try:
            volume = yaml.safe_load(stream)
        except yaml.YAMLError as exc:
            print(exc)
    volume['metadata']['name'] = body['metadata']['name']
    volume['metadata']['namespace'] = body['metadata']['namespace']
    volume['spec']['resources']['requests']['storage'] = size
    volume['spec']['storageClassName'] = storage_class_name
    kopf.adopt(volume, owner=body)

    api = kubernetes.client.CoreV1Api()
    obj = api.create_namespaced_persistent_volume_claim(
        body['metadata']['namespace'], volume)
    logger.info(f"{obj.kind} {obj.metadata.name} created")
示例#25
0
def create_configmap_workflow(body, logger):
    with open("templates/configmap-template.yaml", 'r') as stream:
        try:
            configmap = yaml.safe_load(stream)
        except yaml.YAMLError as exc:
            print(exc)

    configmap['metadata']['name'] = body['metadata']['name']
    configmap['metadata']['namespace'] = body['metadata']['namespace']
    data_to_dump = body['spec']['metadata']
    configmap['data']['workflow.yaml'] = yaml.dump(data_to_dump)
    configmap['data']['workflow.json'] = json.dumps(data_to_dump)
    kopf.adopt(configmap, owner=body)

    api = kubernetes.client.CoreV1Api()
    obj = api.create_namespaced_config_map(body['metadata']['namespace'],
                                           configmap)
    logger.info(f"{obj.kind} {obj.metadata.name} created")
示例#26
0
def create_function(body, spec, **kwargs):

    v1 = kubernetes.client.CoreV1Api()

    try:
        service = v1.read_namespaced_service(name=spec.test.serviceName,
                                             namespace=body.metadata.namespace)
    except:
        return {'state': 'failed'}

    port = 80

    for i in service.spec.ports:
        if i.name == spec.test.port:
            port = i.port

    pod = worker_pod(name="test",
                     method=spec.test.method,
                     url=service.metadata.name + service.metadata.namespace +
                     ".svc:" + str(port) + "/" + spec.test.path,
                     repetition=spec.test.repeat)

    # Make the pod a child of our custom resource object by adding namespace, labels etc to the pod definition
    kopf.adopt(pod, owner=body)

    # Create the pod in kubernetes
    if spec.replicas == 1:
        created = v1.create_namespaced_pod(
            namespace=pod['metadata']['namespace'], body=pod)
        return {'children': [created.metadata.uid]}
    elif spec.replicas > 1:
        children = []
        for x in range(0, spec.replicas - 1):
            pod.metadata.name = pod.metadata.name + "-" + str(x)
            created = v1.create_namespaced_pod(
                namespace=pod['metadata']['namespace'], body=pod)
            children.append(created.metadata.uid)
        return {'children': children}
    else:
        return {'state': 'failed'}

    # Now "register" the pod with our SimpleDeployment
    return {'children': [pod.metadata.uid]}
示例#27
0
def on_ingress_create(name: str, namespace: str, annotations: dict, spec: dict,
                      logger, **_):
    if config.DISABLE_INGRESS_HANDLING:
        logger.debug('handling of Ingress resources has been disabled')
        return

    monitor_prefix = f'{GROUP}/monitor.'
    monitor_spec = {
        k.replace(monitor_prefix, ''): v
        for k, v in annotations.items() if k.startswith(monitor_prefix)
    }

    index = 0
    for rule in spec['rules']:
        if 'host' not in rule:
            continue

        if rule['host'].startswith('*'):  # filter out wildcard domains
            continue

        host = rule['host']

        # we default to a ping check
        if 'type' not in monitor_spec:
            monitor_spec['type'] = MonitorType.PING.name

        if monitor_spec['type'] == 'HTTP':
            monitor_spec['url'] = f"http://{host}"
        elif monitor_spec['type'] == 'HTTPS':
            monitor_spec['url'] = f"https://{host}"
        else:
            monitor_spec['url'] = host

        monitor_body = MonitorV1Beta1.construct_k8s_ur_monitor_body(
            namespace,
            name=f"{name}-{index}",
            **MonitorV1Beta1.annotations_to_spec_dict(monitor_spec))
        kopf.adopt(monitor_body)

        k8s.create_k8s_crd_obj_with_body(MonitorV1Beta1, namespace,
                                         monitor_body)
        logger.info(f'created new UptimeRobotMonitor object for URL {host}')
        index += 1
示例#28
0
def create_fn(body, spec, **kwargs):
    log.info("Operator create function called")
    print(f"A handler is called with body: {body}")

    # Get info from resource object
    size = spec['size']
    name = body['metadata']['name']
    namespace = body['metadata']['namespace']
    image = 'nginx'

    if not size:
        raise kopf.HandlerFatalError(f"size must be set. Got {size}.")
    # Pod template
    pod = {
        'apiVersion': 'v1',
        'metadata': {
            'name': name,
            'labels': {
                'app': 'deepak'
            }
        },
        'spec': {
            'containers': [{
                'image': image,
                'name': name
            }]
        }
    }

    # Make the Pod for resource object
    kopf.adopt(pod, owner=body)

    # Object used to communicate with the API Server
    api = kubernetes.client.CoreV1Api()

    # Create Pod
    obj = api.create_namespaced_pod(namespace, pod)
    print(f"Pod {obj.metadata.name} created")

    # Update status
    msg = f"Pod created for resource object {name}"
    return {'message': msg}
示例#29
0
def create_pod(body, **kwargs):

    # Render the pod yaml with some spec fields used in the template.
    pod_data = yaml.safe_load(f"""
        apiVersion: v1
        kind: Pod
        spec:
          containers:
          - name: the-only-one
            image: busybox
            command: ["sh", "-x", "-c", "sleep 1"]
    """)

    # Make it our child: assign the namespace, name, labels, owner references, etc.
    kopf.adopt(pod_data, owner=body)
    kopf.label(pod_data, {'application': 'kopf-example-10'})

    # Actually create an object by requesting the Kubernetes API.
    pod = pykube.Pod(api, pod_data)
    pod.create()
示例#30
0
def create_fn(spec, name, namespace, logger, **kwargs):

    # CHECK IF POINTED INSTANCE EXISTS
    instance = get_custom_object(name=spec['instance'],
                                 plural='instances',
                                 namespace=operator_namespace)
    if instance:

        # CREATE DATABASE
        query_instance("CREATE DATABASE IF NOT EXISTS %s" % name, instance)
        logger.info("created database named %s" % name)

        # CREATE SERVICE IN CUSTOM RESOURCE DATABASE'S NAMESPACE
        service = yaml.safe_load(f"""
            kind: Service
            apiVersion: v1
            metadata:
              name: {name}
            spec:
              type: ExternalName
              externalName: {instance['spec']['address']}
              ports:
              - port: {instance['spec']['port']}
        """)
        kopf.adopt(service)
        try:
            service = client.CoreV1Api().create_namespaced_service(
                namespace=service['metadata']['namespace'], body=service)
        except ApiException as e:
            raise kopf.PermanentError(e)

        # RUN INITDB JOB IF SPECIFIED
        if 'initDb' in spec:
            logger.info(init_db(spec['initDb'], instance, name, namespace))

        # ADD USERS AND GRANT THEM PRIVILEGES
        for user in spec['users']:
            create_user(user, instance, name, namespace)

    else:
        raise kopf.PermanentError('no such instance %s' % spec['instance'])