예제 #1
0
def delete_all_resources(name, project):
    """Deletes all resources (dc, routes, etc.) for the given name and project

    Args:
        name (str): application name
        project (str): openshift project name
    """
    oc.selector(labels={"app": name}).delete()
예제 #2
0
def update(ctx, action, ns, name, release, custom_message, custom_reason,
           execute):
    patch = create_patch(action, custom_message, custom_reason)
    logger.debug(f'Generated oc patch:\n{json.dumps(patch, indent=4)}')

    with oc.options(ctx), oc.tracking(), oc.timeout(15):
        try:
            with oc.project(ns):
                tag = oc.selector(f'imagestreamtag/{name}:{release}').object(
                    ignore_not_found=True)
                if not tag:
                    raise Exception(
                        f'Unable to locate imagestreamtag: {ns}/{name}:{release}'
                    )

                logger.info(f'{action.capitalize()}ing: {ns}/{name}:{release}')
                if execute:
                    backup_file = write_backup_file(name, release,
                                                    tag.model._primitive())

                    tag.patch(patch)

                    logger.info(f'Release {release} updated successfully')
                    logger.info(f'Backup written to: {backup_file}')
                else:
                    logger.info(
                        f'[dry-run] Patching release {release} with patch:\n{json.dumps(patch, indent=4)}'
                    )
                    logger.warning(
                        'You must specify "--execute" to permanently apply these changes'
                    )

        except (ValueError, OpenShiftPythonException, Exception) as e:
            logger.error(f'Unable to update release: "{release}"')
            raise e
예제 #3
0
    def update_deployment(self,
                          name,
                          model_uri=None,
                          flavor=None,
                          config=None):
        """Updates an existing model deployment in openshift. It can either update
        the `model_uri` and/or the mandatory config items describing the container image,
        i.e `image`, `docker_registry`, `tag`.

        Notes:
            In case more configurations need to be changed, consider deleting and creating
            the deployment from scratch.
            Special treatment for different model flavors are not implemented.

        Args:
            name (str): name of the deployment
            model_uri (str): path where to find the mlflow packed model
            flavor (str, optional): mlflow deployment flavor. Defaults to None
            config (dict, optional): config items for the deployment. Defaults to {}

        Raises:
            MlflowException: if the updated deployment lead to an error in openshift

        Returns:
            dict: {'name': <name>, 'flavor': <flavor>}
        """

        if not model_uri and not config:
            raise MlflowException(
                "Provide at least a new *model_uri* or *config*")

        dc_obj = oc.selector("dc", labels={"app": name}).object()
        if config:
            if all(key in config
                   for key in ("image", "docker_registry", "tag")):
                dc_obj = oc_helper.update_container_image(dc_obj, config)
            else:
                raise MlflowException(
                    "Not all of the necessary *config* items for updating are provided. "
                    "You need to provide: image, docker_registry, tag, auth_user and auth_password"
                )

        if model_uri:
            dc_obj = oc_helper.update_model_uri(dc_obj, model_uri)

        # hotfix for bug in openshift-client library -> normal apply()
        dc_obj.modify_and_apply(lambda x: True, retries=0)

        route_host = oc_helper.get_route_name(name)
        auth_user, auth_password = oc_helper.get_authentication_info(name)

        try:
            oc_helper.check_succesful_deployment(name, route_host, auth_user,
                                                 auth_password)
        except MlflowException as mlflow_exception:
            self.delete_deployment(name)
            raise mlflow_exception

        return {'name': name, 'flavor': flavor}
예제 #4
0
def get_ca_bundle_from_hub():
    os.environ['KUBECONFIG'] = env_variables['installer_kubeconfig_path']
    with oc.project(env_variables['namespace']):
        ca_config_map_objects = oc.selector('configmap/registry-ca').objects()
        assert len(ca_config_map_objects) > 0
        ca_config_map_object = ca_config_map_objects[0]
        ca_bundle = ca_config_map_object.model.data['ca-bundle.crt']
    return ca_bundle
 def get_ca_bundle_from_hub(cls, spoke_namespace: str) -> str:
     os.environ["KUBECONFIG"] = global_variables.installer_kubeconfig_path
     with oc.project(spoke_namespace):
         ca_config_map_objects = oc.selector("configmap/registry-ca").objects()
         assert len(ca_config_map_objects) > 0
         ca_config_map_object = ca_config_map_objects[0]
         ca_bundle = ca_config_map_object.model.data["ca-bundle.crt"]
     return ca_bundle
예제 #6
0
 def _are_proxy_paramas_defined_in_clusterwide_proxy(
         self, cluster, http_proxy, https_proxy):
     cluster.download_kubeconfig()
     log.info(
         f'Verifying proxy parameters are deinfied in cluster wide proxy object for Cluster {cluster.id}'
     )
     proxy_object = oc.selector('proxy/cluster').objects()[0]
     assert proxy_object.model.spec.httpProxy == http_proxy
     assert proxy_object.model.spec.httpsProxy == https_proxy
예제 #7
0
 def _are_proxy_paramas_defined_in_clusterwide_proxy(
         self, cluster_id, api_client, http_proxy, https_proxy):
     api_client.download_kubeconfig(cluster_id,
                                    env_variables['kubeconfig_path'])
     log.info(
         f'Verifying proxy parameters are deinfied in cluster wide proxy object for Cluster {cluster_id}'
     )
     proxy_object = oc.selector('proxy/cluster').objects()[0]
     assert proxy_object.model.spec.httpProxy == http_proxy
     assert proxy_object.model.spec.httpsProxy == https_proxy
예제 #8
0
def get_raw_pod_info(name):
    """Gets full pod information json

    Args:
        name (str): name of application

    Returns:
        str: pod information encoded in json
    """
    try:
        pod_info = oc.selector("pods", labels={"app": name}).object().as_json()
    except OpenShiftPythonException:
        pod_info = None
    return pod_info
예제 #9
0
    def list_deployments(self):
        """Lists all mlflow deployments in the current openshift project.

        Notes:
            mlflow deployments are recognized by the label `mlflow` that
            is attached to all deplyoments generated by this plugin.

        Returns:
            list: containing dictionaries for each deployment,
                e.g. [{'name': 'deployment1'}]
        """
        mlflow_deployments = oc.selector("dc", labels={
            "template": "mlflow"
        }).names()
        return mlflow_deployments
예제 #10
0
    def __init__(self, entity_name='system_name', **kwargs):
        if 'spec' in kwargs:
            spec = kwargs.pop('spec')
            crd = kwargs.pop('crd')
            entity = {}
            for key, value in spec.items():
                for cey, walue in constants.KEYS_ACTIVE_DOC.items():
                    if key == walue:
                        entity[cey] = value
            entity['id'] = crd.as_dict().get('status').get('activeDocId')

            # if OAS is referenced by url:
            # 1) OAS is loaded to body
            # 2) when body is updated, secret is created and it replaces reference by url

            # if OAS is referenced by secret:
            # 1) OAS is loaded from secret and stored into body
            # 2) when body is updated, secret is changed

            if 'url' in spec['activeDocOpenAPIRef']:
                url = spec['activeDocOpenAPIRef']['url']
                entity['url'] = url
                res = requests.get(url)
                if url.endswith('.yaml') or url.endswith('.yml'):
                    entity['body'] = json.dumps(yaml.load(res.content))
                else:
                    entity['body'] = res.content
            elif 'secretRef' in spec['activeDocOpenAPIRef']:
                secret_name = spec['activeDocOpenAPIRef']['secretRef']['name']
                secret = ocp.selector('secret/' + secret_name).objects()[0]
                enc_body = list(secret.as_dict()['data'].values())[0]
                entity['body'] = base64.b64decode(enc_body).decode('ascii')

            threescale_api.resources.ActiveDoc.__init__(
                self, entity_name=entity_name, entity=entity)
            DefaultResourceCRD.__init__(self,
                                        crd=crd,
                                        entity_name=entity_name,
                                        entity=entity,
                                        **kwargs)
        else:
            # this is not here because of some backup, but because we need to have option
            # to creater empty object without any data. This is related to "lazy load"
            threescale_api.resources.ActiveDoc.__init__(
                self, entity_name=entity_name, **kwargs)
            DefaultResourceCRD.__init__(self,
                                        entity_name=entity_name,
                                        **kwargs)
def run_pods(pod_count=5, *, project_name=None):
    logger.info('Running in namespace: {}'.format(project_name))

    for i in range(pod_count):
        pod_name = 'pod-{}'.format(i)
        logger.info('Creating: {}'.format(pod_name))

        pod_selector = oc.create(
            oc.build_pod_simple(pod_name,
                                image='python:3',
                                command=['tail', '-f', '/dev/null']))
        pod_selector.until_all(1, success_func=oc.status.is_pod_running)

    pods = oc.selector('pods').objects()
    logger.info('Found {} pods'.format(len(pods)))
    assert len(pods) == pod_count
예제 #12
0
def create_resource(yaml, success, tries):
    with oc.tracking() as tracker:
        try:
            oc.create(yaml)
        except oc.OpenShiftPythonException:
            if 'AlreadyExists' in tracker.get_result().err():
                # if 'AlreadyExists' in oc.OpenShiftPythonException.get_result()
                print("Resource already exists")
            else:
                raise Exception(f'Failed: {tracker.get_result().err()}')
        except:
            raise Exception(f'Failed: {tracker.get_result().err()}')
    if success:
        try_count = 0
        while len(success) > 0 and try_count < tries:
            try_count += 1
            print(f'TRY: {try_count} of {tries}')
            for criteria in success:
                resource_type = criteria[0]
                resource_name = criteria[1]
                resource_count = criteria[2]
                found = oc.selector(resource_type)
                count = 0
                for item in found:
                    name = item.qname()
                    print(f'{resource_name} in {name}')
                    if resource_name in name:
                        if 'pod' in resource_type:
                            pod = item.as_dict()
                            status = pod['status']['phase']
                            print(f'Status: {status}')
                            if status == 'Running' or status == 'Succeeded':
                                count += 1
                                print(f'Found {count} of {resource_count}')
                        else:
                            count += 1
                            print(f'Found {count} of {resource_count}')
                        if count >= resource_count:
                            success.remove(criteria)
                            break
            if len(success) == 0:
                return
            time.sleep(10)
        else:
            if try_count >= tries:
                raise Exception('Failed to create resource in time')
예제 #13
0
 def create_secret_if_needed(params, namespace):
     body_ascii = str(params['body']).encode('ascii')
     body_enc = base64.b64encode(body_ascii)
     spec_sec = copy.deepcopy(constants.SPEC_SECRET)
     spec_sec['metadata']['name'] = params['name']
     spec_sec['metadata']['namespace'] = namespace
     spec_sec['data'][params['name']] = body_enc.decode('ascii')
     result = ocp.selector('secret/' + params['name'])
     if result.status() == 0:
         objs = result.objects()
         if objs:
             objs[0].delete()
     result = ocp.create(spec_sec)
     assert result.status() == 0
     if 'url' in params:
         del params['url']
     del params['body']
예제 #14
0
def get_route_name(name):
    """Retrieves the route name of the openshift application.

    Args:
        name (str): name of the openshift application

    Raises:
        MlflowException: route not found

    Returns:
        str: URL of the route associated with model endpoint
    """
    try:
        route_obj = oc.selector('routes', labels={"app": name}).object()
        return route_obj.model.spec.host
    except OpenShiftPythonException:
        raise MlflowException(f"could not find route information for {name}")
예제 #15
0
def get_pod_containers_usage(project):
    """
    Retorna um iterador para cada container com
    métricas
    """
    with oc.project(project), oc.timeout(2 * 60):
        for pod_obj in oc.selector('pods').objects():
            metric = get_pod_metrics(pod_obj)
            pod_name = pod_obj.model.metadata.name
            if metric:
                containers = metric.model.containers
                for container in containers:
                    app_name = container['name']
                    usage = get_container_usage(container)
                    containerUsage = ContainerUsage(app_name, pod_name, usage)
                    yield containerUsage
            else:
                msg = 'Nenhuma métrica para o pod {}'.format(pod_name)
                info_msg(msg)
예제 #16
0
def check_online_project_constraints():
    test_project_name = 'imperative-verify-test-project-constraints'

    with temp_project(test_project_name):
        time.sleep(2)

        oc.selector('limitrange').object()
        report_verified('New projects contain limit ranges')

        oc.selector('networkpolicy').objects()
        report_verified('New projects contain network policies')

        oc.selector('resourcequota').objects()
        report_verified('New projects contain resource quotas')

    report_verified("Template based project constraints are being created!")
예제 #17
0
    def get_bmc_addr(self, node_name):
        # Addresses in the config get higher priority.
        if self.bm_info is not None and node_name in self.bm_info and "bmc_addr" in self.bm_info[node_name]:
            return self.bm_info[node_name]["bmc_addr"]

        # Get the bmc addr from the BareMetalHost object.
        with oc.project("openshift-machine-api"):
            logging.info("Getting node with name: %s" % (node_name))
            node = self.get_node_object(node_name)
            provider_id = node.model.spec.providerID
            startOfUid = provider_id.rfind("/")  # The / before the uid
            startOfName = provider_id.rfind("/", 0, startOfUid) + 1
            bmh_name = provider_id[startOfName:startOfUid]
            bmh_resource_name = "baremetalhost.metal3.io/" + bmh_name
            bmh_object = oc.selector(bmh_resource_name).object()
            if len(bmh_object.model.spec.bmc.addr) == 0:
                logging.error(
                    'BMC addr empty for node "%s". Either fix the BMH object,'
                    " or specify the address in the scenario config" % node_name
                )
                sys.exit(1)
            return bmh_object.model.spec.bmc.address
예제 #18
0
def get_pod_info_from_app_name(name):
    """Retrieves the newest (startTime) Pod under the application with the *name*

    Args:
        name (str): name of the openshift application

    Raises:
        MlflowException: no container was started within the timeout period

    Returns:
        openshift.apiobject.APIOoject: containing pod description
    """
    newest_pod = None
    newest_pod_start_time = datetime.datetime(2000, 1, 1, 1, 1, 1)
    retries_left = RETRIES

    if retries_left > 0:
        pod_objs = oc.selector("pods", labels={"app": name}).objects()

        if not pod_objs:
            # no containers for that application, yet
            retries_left -= 1
            time.sleep(SLEEP_TIME)
        else:
            for pod_obj in pod_objs:
                # look for the newest pod if more then one is present
                start_time = pod_obj.model.status.startTime
                start_time_dt = datetime.datetime.strptime(
                    start_time, "%Y-%m-%dT%H:%M:%SZ")
                if start_time_dt > newest_pod_start_time:
                    newest_pod_start_time = start_time_dt
                    newest_pod = pod_obj
            return newest_pod
    else:
        timeout = RETRIES * SLEEP_TIME
        raise MlflowException(
            f"Timeout: No new pod was started for {name} within {timeout} seconds"
        )
예제 #19
0
#!/usr/bin/env python

from __future__ import absolute_import
import openshift as oc

if __name__ == '__main__':
    with oc.client_host():
        with oc.project('openshift-monitoring'):
            oc.selector(['dc', 'build', 'configmap']).print_report()

#!/usr/bin/python

import openshift as oc
from openshift import Missing
import traceback

with oc.tracking() as t:
    with oc.client_host(hostname="18.222.71.125", username="******", auto_add_host=True):  # free-stg
        with oc.project("openshift-monitoring"):
            try:

                result = oc.selector('pod/alertmanager-main-0').object().execute(['cat'],
                                                                                 container_name='alertmanager',
                                                                                 stdin='stdin for cat')
                print(result.out())
                exit(0)

                cr_rules = oc.selector("prometheusrules")
                print("CR has the following rule sets: {}".format(cr_rules.qnames()))

                if cr_rules.object().model.metadata.labels.cr_generated is Missing:
                    print("Rule was not generated by CR")

                oc.selector('pods').annotate(annotations={
                    'cr_annotation_test': None,
                })

                oc.selector('node/pod_ip-172-31-79-85.us-east-2.compute.internal').object().patch({
                    'metadata': {
                        'annotations': {
                            'cr_patch': 'yes'
예제 #21
0
def run(arch, release, confirm):
    """
    Sets annotations and deletes prowjobs to restart testing on a release.
    requires:  pip3 install openshift-client
    OR https://github.com/openshift/openshift-client-python must be setup in your PYTHONPATH.

    \b
    If openshift-client-python is in $HOME/projects/openshift-client-python:
    $ export PYTHONPATH=$PYTHONPATH:$HOME/projects/openshift-client-python/packages

    \b
    Example invocation:
    $ ./retest.py -r 4.4.0-rc.3
                  --confirm
    """

    arch_suffix = ''
    if arch != 'amd64' and arch != 'x86_64':
        arch_suffix = f'-{arch}'

    t1 = input('Enter a token for https://api.ci.l2s4.p1.openshiftapps.com: ')

    with oc.api_server(api_url='https://api.ci.l2s4.p1.openshiftapps.com:6443'
                       ), oc.options({'as': 'system:admin'}), oc.token(t1):
        with oc.project('ci'):
            print(f'Searching for prowjobs associated with {release}')
            prowjobs = oc.selector(
                'prowjobs').narrow(lambda obj: obj.model.metadata.annotations[
                    'release.openshift.io/tag'] == release and 'chat-bot'
                                   not in obj.model.metadata.name)
            print(f'Found prowjobs: {prowjobs.qnames()}')
            if confirm:
                print('Deleting associated prowjobs')
                prowjobs.delete()
            else:
                print(WARNING +
                      'Run with --confirm to delete these resources' + ENDC)

    with oc.api_server(api_url='https://api.ci.openshift.org'), oc.options(
        {'as': 'system:admin'}):
        with oc.project(f'ocp{arch_suffix}'):

            istag_qname = f'istag/release{arch_suffix}:{release}'
            istag = oc.selector(istag_qname).object(ignore_not_found=True)
            if not istag:
                raise IOError(f'Could not find {istag_qname}')

            def trigger_retest(obj):
                for annotations in (obj.model.image.metadata.annotations,
                                    obj.model.metadata.annotations,
                                    obj.model.tag.annotations):
                    annotations.pop('release.openshift.io/message', None)
                    annotations.pop('release.openshift.io/phase', None)
                    annotations.pop('release.openshift.io/reason', None)
                    annotations.pop('release.openshift.io/verify', None)

                print(json.dumps(obj.model._primitive(), indent=4))
                if confirm:
                    print('Attempting to apply this object.')
                    return True
                else:
                    print(
                        WARNING +
                        '--confirm was not specified. Run again to apply these changes.'
                        + ENDC)
                    exit(0)

            result, changed = istag.modify_and_apply(trigger_retest,
                                                     retries=10)
            if not changed:
                print(WARNING + 'No change was applied to the object' + ENDC)
                print(f'Details:\n{result.as_json()}')
                exit(1)

        print('Success!')
예제 #22
0
def main():

    configmaps = oc.selector("configmaps")
    Configmaps = configmaps.objects()
    conf = Configmaps[2]
    print('Annotations:\n{}\n'.format(conf.model.metadata))
예제 #23
0
    args = vars(parser.parse_args())

    skip_tls_verify = args['insecure_skip_tls_verify']

    if skip_tls_verify:
        oc.set_default_skip_tls_verify(True)

    bastion_hostname = args['bastion']
    if not bastion_hostname:
        logging.info('Running in local mode. Expecting "oc" in PATH')

    with oc.client_host(hostname=bastion_hostname,
                        username="******",
                        auto_add_host=True,
                        load_system_host_keys=False):
        # Ensure tests complete within 30 minutes and track all oc invocations
        with oc.timeout(60 * 30), oc.tracking() as t:
            try:
                with oc.project('default'):
                    bc = oc.selector('bc/does-not-exist')
                    bc.start_build()
            except (ValueError, OpenShiftPythonException, Exception):
                # Print out exception stack trace via the traceback module
                logger.info('Traceback output:\n{}\n'.format(
                    traceback.format_exc()))

                # Print out all oc interactions and do not redact secret information
                logger.info("OC tracking output:\n{}\n".format(
                    t.get_result().as_json(redact_streams=False)))
예제 #24
0
    nodes=oc.selector('nodes')
    print(f'Nodes: {nodes}')
    print(len(nodes.qnames()))
    count=0
    for node in nodes:
        if node.get_label('node-role.kubernetes.io/worker') != None:
            print(f"Name {node.name()} label:{node.get_label('node-role.kubernetes.io/worker')}")
            count+=1
    print(count)
    # nodecount=len(nodes)
    print(nodes.qnames())
    # print(nodes.count_existing())
"""

worker_count = 0
for node in oc.selector('nodes'):
    if node.get_label('node-role.kubernetes.io/worker') != None:
        node.label({"cluster.ocs.openshift.io/openshift-storage": ''})
        print(
            f"Name {node.name()} label:{node.get_label('node-role.kubernetes.io/worker')}"
        )
        worker_count += 1
    print("worker_count: {}".format(worker_count))
namespace = """
apiVersion: v1
kind: Namespace
metadata:
  name: local-storage
    """
created = [('namespace', 'local-storage', 1)]
create_resource(namespace, created, 5)
#!/usr/bin/env python

import openshift as oc
from openshift import APIObject


class MyCustomPodClass(APIObject):
    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)

    def super_cool_awesomeness(self):
        print('Calling: super_cool_awesomeness() on pod: {}/{}'.format(
            self.model.metadata.namespace, self.model.metadata.name))


if __name__ == '__main__':
    with oc.client_host():
        with oc.project('openshift-monitoring'):

            objs = oc.selector('pods', labels={
                'app': 'prometheus'
            }).objects(cls=MyCustomPodClass)

            for obj in objs:
                print(type(obj))
                obj.super_cool_awesomeness()
예제 #26
0
파일: accept.py 프로젝트: wking/aos-cd-jobs
def run(arch, release, upgrade_url, upgrade_minor_url, confirm):
    """
    Sets annotations to force OpenShift release acceptance.
    Requires https://github.com/openshift/openshift-client-python to be setup in your PYTHONPATH.

    \b
    If openshift-client-python is in $HOME/projects/openshift-client-python:
    $ export PYTHONPATH=$PYTHONPATH:$HOME/projects/openshift-client-python/packages

    \b
    Example invocation:
    $ ./accept.py -r 4.4.0-rc.3
                  -u 'https://prow.svc.ci.openshift.org/view/...origin-installer-e2e-gcp-upgrade/575'
                  -m 'https://prow.svc.ci.openshift.org/view/...origin-installer-e2e-gcp-upgrade/461'
                  --confirm
    """

    if not upgrade_minor_url and not upgrade_url:
        click.echo(
            'One or both upgrade urls must be specified in order to accept the release'
        )
        exit(1)

    arch_suffix = ''
    if arch != 'amd64' and arch != 'x86_64':
        arch_suffix = f'-{arch}'

    with oc.api_server(api_url='https://api.ci.openshift.org'), \
         oc.options({'as': 'system:admin'}), \
         oc.project(f'ocp{arch_suffix}'):

        istag_qname = f'istag/release{arch_suffix}:{release}'
        istag = oc.selector(istag_qname).object(ignore_not_found=True)
        if not istag:
            raise IOError(f'Could not find {istag_qname}')

        ts = int(round(time.time() * 1000))
        backup_filename = f'release{arch_suffix}_{release}.{ts}.json'
        if confirm:
            with open(backup_filename, mode='w+', encoding='utf-8') as backup:
                print(f'Creating backup file: {backup_filename}')
                backup.write(json.dumps(istag.model._primitive(), indent=4))

        def make_release_accepted(obj):
            for annotations in (obj.model.image.metadata.annotations,
                                obj.model.metadata.annotations,
                                obj.model.tag.annotations):
                annotations.pop('release.openshift.io/message', None)
                annotations.pop('release.openshift.io/reason', None)
                annotations['release.openshift.io/phase'] = 'Accepted'

                verify_str = annotations['release.openshift.io/verify']
                verify = oc.Model(json.loads(verify_str))
                verify.upgrade.state = 'Succeeded'
                if upgrade_url:
                    verify.upgrade.url = upgrade_url
                verify['upgrade-minor'].state = 'Succeeded'
                if upgrade_minor_url:
                    verify['upgrade-minor'].url = upgrade_minor_url
                annotations['release.openshift.io/verify'] = json.dumps(
                    verify._primitive(), indent=None)

            print(json.dumps(obj.model._primitive(), indent=4))
            if confirm:
                print('Attempting to apply this object.')
                return True
            else:
                print(
                    WARNING +
                    '--confirm was not specified. Run again to apply these changes.'
                    + ENDC)
                exit(0)

        result, changed = istag.modify_and_apply(make_release_accepted,
                                                 retries=10)
        if not changed:
            print(WARNING + 'No change was applied to the object' + ENDC)
            print(f'Details:\n{result.as_json()}')
            exit(1)

        print('Success!')
        print(f'Backup written to: {backup_filename}')
예제 #27
0
#!/usr/bin/python

from __future__ import print_function
from __future__ import absolute_import

import openshift as oc
from openshift import null, Missing, OpenShiftPythonException

try:

    print(
        "Projects created by users:",
        oc.selector("projects").narrow(lambda prj: prj.metadata.annotations[
            "openshift.io/requester"] is not Missing).qnames())

    oc.selector("projects").narrow(
        # Eliminate any projects created by the system
        lambda prj: prj.metadata.annotations["openshift.io/requester"
                                             ] is not Missing
    ).narrow(
        # Select from user projects any which violate privileged naming convention
        lambda prj: prj.metadata.qname == "openshift" or prj.metadata.qname.
        startswith("openshift-") or prj.metadata.qname == "kubernetes" or prj.
        metadata.qname.startswith("kube-") or prj.metadata.qname.
        startswith("kubernetes-")).for_each(
            lambda prj: oc.error("Invalid project: %s" % prj.metadata.qname))

    with oc.timeout(5):
        success, obj = oc.selector("pods").until_any(
            lambda pod: pod.status.phase == "Succeeded")
        if success:
예제 #28
0
#!/usr/bin/python

import openshift as oc
'''
This example will scan all the templates, on the cluster, and look specifically for the openshift/nginx-example
template.  If the template is located, it clears the namespace (to prevent an error when calling 'oc process'),
updates any template parameter(s), processes the template, and then creates the objects in the current namespace.
'''
if __name__ == '__main__':
    with oc.client_host():
        templates = oc.selector('templates', all_namespaces=True)

        for template in templates.objects():
            if template.model.metadata.namespace == 'openshift' and template.model.metadata.name == 'nginx-example':
                template.model.metadata.namespace = ''

                obj = oc.APIObject(dict_to_model=template.as_dict())

                parameters = {
                    'NAME': 'my-nginx',
                }

                processed_template = obj.process(parameters=parameters)
                obj_sel = oc.create(processed_template)

                for obj in obj_sel.objects():
                    print('Created: {}/{}'.format(obj.model.kind,
                                                  obj.model.metadata.name))
                    print(obj.as_json(indent=4))
예제 #29
0
 def get_node_object(self, node_name):
     with oc.project("openshift-machine-api"):
         return oc.selector("node/" + node_name).object()
예제 #30
0
 def read_crd(self, selector, obj_name=None):
     sel = self.SELECTOR + '.capabilities.3scale.net'
     if obj_name:
         sel += '/' + obj_name
     LOG.info('CRD read ' + sel)
     return ocp.selector(sel).objects()