def update(ctx, action, ns, name, release, custom_message, custom_reason, execute): patch = create_patch(action, custom_message, custom_reason) logger.debug(f'Generated oc patch:\n{json.dumps(patch, indent=4)}') with oc.options(ctx), oc.tracking(), oc.timeout(15): try: with oc.project(ns): tag = oc.selector(f'imagestreamtag/{name}:{release}').object( ignore_not_found=True) if not tag: raise Exception( f'Unable to locate imagestreamtag: {ns}/{name}:{release}' ) logger.info(f'{action.capitalize()}ing: {ns}/{name}:{release}') if execute: backup_file = write_backup_file(name, release, tag.model._primitive()) tag.patch(patch) logger.info(f'Release {release} updated successfully') logger.info(f'Backup written to: {backup_file}') else: logger.info( f'[dry-run] Patching release {release} with patch:\n{json.dumps(patch, indent=4)}' ) logger.warning( 'You must specify "--execute" to permanently apply these changes' ) except (ValueError, OpenShiftPythonException, Exception) as e: logger.error(f'Unable to update release: "{release}"') raise e
def get_ca_bundle_from_hub(cls, spoke_namespace: str) -> str: os.environ["KUBECONFIG"] = global_variables.installer_kubeconfig_path with oc.project(spoke_namespace): ca_config_map_objects = oc.selector("configmap/registry-ca").objects() assert len(ca_config_map_objects) > 0 ca_config_map_object = ca_config_map_objects[0] ca_bundle = ca_config_map_object.model.data["ca-bundle.crt"] return ca_bundle
def get_ca_bundle_from_hub(): os.environ['KUBECONFIG'] = env_variables['installer_kubeconfig_path'] with oc.project(env_variables['namespace']): ca_config_map_objects = oc.selector('configmap/registry-ca').objects() assert len(ca_config_map_objects) > 0 ca_config_map_object = ca_config_map_objects[0] ca_bundle = ca_config_map_object.model.data['ca-bundle.crt'] return ca_bundle
def get_pod_containers_usage(project): """ Retorna um iterador para cada container com métricas """ with oc.project(project), oc.timeout(2 * 60): for pod_obj in oc.selector('pods').objects(): metric = get_pod_metrics(pod_obj) pod_name = pod_obj.model.metadata.name if metric: containers = metric.model.containers for container in containers: app_name = container['name'] usage = get_container_usage(container) containerUsage = ContainerUsage(app_name, pod_name, usage) yield containerUsage else: msg = 'Nenhuma métrica para o pod {}'.format(pod_name) info_msg(msg)
def get_bmc_addr(self, node_name): # Addresses in the config get higher priority. if self.bm_info is not None and node_name in self.bm_info and "bmc_addr" in self.bm_info[node_name]: return self.bm_info[node_name]["bmc_addr"] # Get the bmc addr from the BareMetalHost object. with oc.project("openshift-machine-api"): logging.info("Getting node with name: %s" % (node_name)) node = self.get_node_object(node_name) provider_id = node.model.spec.providerID startOfUid = provider_id.rfind("/") # The / before the uid startOfName = provider_id.rfind("/", 0, startOfUid) + 1 bmh_name = provider_id[startOfName:startOfUid] bmh_resource_name = "baremetalhost.metal3.io/" + bmh_name bmh_object = oc.selector(bmh_resource_name).object() if len(bmh_object.model.spec.bmc.addr) == 0: logging.error( 'BMC addr empty for node "%s". Either fix the BMH object,' " or specify the address in the scenario config" % node_name ) sys.exit(1) return bmh_object.model.spec.bmc.address
#!/usr/bin/env python from __future__ import absolute_import import openshift as oc if __name__ == '__main__': with oc.client_host(): with oc.project('openshift-monitoring'): oc.selector(['dc', 'build', 'configmap']).print_report()
#!/usr/bin/python import openshift as oc from openshift import Missing import traceback with oc.tracking() as t: with oc.client_host(hostname="18.222.71.125", username="******", auto_add_host=True): # free-stg with oc.project("openshift-monitoring"): try: result = oc.selector('pod/alertmanager-main-0').object().execute(['cat'], container_name='alertmanager', stdin='stdin for cat') print(result.out()) exit(0) cr_rules = oc.selector("prometheusrules") print("CR has the following rule sets: {}".format(cr_rules.qnames())) if cr_rules.object().model.metadata.labels.cr_generated is Missing: print("Rule was not generated by CR") oc.selector('pods').annotate(annotations={ 'cr_annotation_test': None, }) oc.selector('node/pod_ip-172-31-79-85.us-east-2.compute.internal').object().patch({ 'metadata': { 'annotations': { 'cr_patch': 'yes'
node.label({"cluster.ocs.openshift.io/openshift-storage": ''}) print( f"Name {node.name()} label:{node.get_label('node-role.kubernetes.io/worker')}" ) worker_count += 1 print("worker_count: {}".format(worker_count)) namespace = """ apiVersion: v1 kind: Namespace metadata: name: local-storage """ created = [('namespace', 'local-storage', 1)] create_resource(namespace, created, 5) with oc.project("local-storage") as project: OperatorGroup = """ apiVersion: operators.coreos.com/v1alpha2 kind: OperatorGroup metadata: name: local-operator-group namespace: local-storage spec: targetNamespaces: - local-storage """ created = [('OperatorGroup', 'local-operator-group', 1)] create_resource(OperatorGroup, created, 5) Subscription = """ apiVersion: operators.coreos.com/v1alpha1 kind: Subscription
def run(arch, release, upgrade_url, upgrade_minor_url, confirm): """ Sets annotations to force OpenShift release acceptance. Requires https://github.com/openshift/openshift-client-python to be setup in your PYTHONPATH. \b If openshift-client-python is in $HOME/projects/openshift-client-python: $ export PYTHONPATH=$PYTHONPATH:$HOME/projects/openshift-client-python/packages \b Example invocation: $ ./accept.py -r 4.4.0-rc.3 -u 'https://prow.svc.ci.openshift.org/view/...origin-installer-e2e-gcp-upgrade/575' -m 'https://prow.svc.ci.openshift.org/view/...origin-installer-e2e-gcp-upgrade/461' --confirm """ if not upgrade_minor_url and not upgrade_url: click.echo( 'One or both upgrade urls must be specified in order to accept the release' ) exit(1) arch_suffix = '' if arch != 'amd64' and arch != 'x86_64': arch_suffix = f'-{arch}' with oc.api_server(api_url='https://api.ci.openshift.org'), \ oc.options({'as': 'system:admin'}), \ oc.project(f'ocp{arch_suffix}'): istag_qname = f'istag/release{arch_suffix}:{release}' istag = oc.selector(istag_qname).object(ignore_not_found=True) if not istag: raise IOError(f'Could not find {istag_qname}') ts = int(round(time.time() * 1000)) backup_filename = f'release{arch_suffix}_{release}.{ts}.json' if confirm: with open(backup_filename, mode='w+', encoding='utf-8') as backup: print(f'Creating backup file: {backup_filename}') backup.write(json.dumps(istag.model._primitive(), indent=4)) def make_release_accepted(obj): for annotations in (obj.model.image.metadata.annotations, obj.model.metadata.annotations, obj.model.tag.annotations): annotations.pop('release.openshift.io/message', None) annotations.pop('release.openshift.io/reason', None) annotations['release.openshift.io/phase'] = 'Accepted' verify_str = annotations['release.openshift.io/verify'] verify = oc.Model(json.loads(verify_str)) verify.upgrade.state = 'Succeeded' if upgrade_url: verify.upgrade.url = upgrade_url verify['upgrade-minor'].state = 'Succeeded' if upgrade_minor_url: verify['upgrade-minor'].url = upgrade_minor_url annotations['release.openshift.io/verify'] = json.dumps( verify._primitive(), indent=None) print(json.dumps(obj.model._primitive(), indent=4)) if confirm: print('Attempting to apply this object.') return True else: print( WARNING + '--confirm was not specified. Run again to apply these changes.' + ENDC) exit(0) result, changed = istag.modify_and_apply(make_release_accepted, retries=10) if not changed: print(WARNING + 'No change was applied to the object' + ENDC) print(f'Details:\n{result.as_json()}') exit(1) print('Success!') print(f'Backup written to: {backup_filename}')
def get_node_object(self, node_name): with oc.project("openshift-machine-api"): return oc.selector("node/" + node_name).object()
def deploy(_dcli, project, timeout): ''' (Unused) x str x int -> ((str) -> None) ''' with oc.project(project), oc.timeout(timeout): yield _deploy
def publish(dcli, project, timeout): ''' docker.DockerClient x str x int -> ((str) -> None) ''' with oc.project(project), oc.timeout(timeout): yield partial(_publish, dcli)
lambda pod: pod.status.phase == "Succeeded") if success: print("Found one pod was successful: " + str(obj)) with oc.timeout(5): success, obj = oc.selector("pods").narrow("pod").until_any( lambda pod: pod.status.conditions.can_match( { "type": "Ready", "status": False, "reason": "PodCompleted" })) if success: print("Found one pod was successful: " + str(obj)) with oc.project("myproject") as project: project.create_if_absent({ "apiVersion": "v1", "kind": "User", "fullName": "Jane Doe", "groups": null, "identities": ["github:19783215"], "metadata": { "name": "jane" } }) project.create_if_absent({ "apiVersion": "v1", "kind": "User",
args = vars(parser.parse_args()) skip_tls_verify = args['insecure_skip_tls_verify'] if skip_tls_verify: oc.set_default_skip_tls_verify(True) bastion_hostname = args['bastion'] if not bastion_hostname: logging.info('Running in local mode. Expecting "oc" in PATH') with oc.client_host(hostname=bastion_hostname, username="******", auto_add_host=True, load_system_host_keys=False): # Ensure tests complete within 30 minutes and track all oc invocations with oc.timeout(60 * 30), oc.tracking() as t: try: with oc.project('default'): bc = oc.selector('bc/does-not-exist') bc.start_build() except (ValueError, OpenShiftPythonException, Exception): # Print out exception stack trace via the traceback module logger.info('Traceback output:\n{}\n'.format( traceback.format_exc())) # Print out all oc interactions and do not redact secret information logger.info("OC tracking output:\n{}\n".format( t.get_result().as_json(redact_streams=False)))
def run(arch, release, confirm): """ Sets annotations and deletes prowjobs to restart testing on a release. requires: pip3 install openshift-client OR https://github.com/openshift/openshift-client-python must be setup in your PYTHONPATH. \b If openshift-client-python is in $HOME/projects/openshift-client-python: $ export PYTHONPATH=$PYTHONPATH:$HOME/projects/openshift-client-python/packages \b Example invocation: $ ./retest.py -r 4.4.0-rc.3 --confirm """ arch_suffix = '' if arch != 'amd64' and arch != 'x86_64': arch_suffix = f'-{arch}' t1 = input('Enter a token for https://api.ci.l2s4.p1.openshiftapps.com: ') with oc.api_server(api_url='https://api.ci.l2s4.p1.openshiftapps.com:6443' ), oc.options({'as': 'system:admin'}), oc.token(t1): with oc.project('ci'): print(f'Searching for prowjobs associated with {release}') prowjobs = oc.selector( 'prowjobs').narrow(lambda obj: obj.model.metadata.annotations[ 'release.openshift.io/tag'] == release and 'chat-bot' not in obj.model.metadata.name) print(f'Found prowjobs: {prowjobs.qnames()}') if confirm: print('Deleting associated prowjobs') prowjobs.delete() else: print(WARNING + 'Run with --confirm to delete these resources' + ENDC) with oc.api_server(api_url='https://api.ci.openshift.org'), oc.options( {'as': 'system:admin'}): with oc.project(f'ocp{arch_suffix}'): istag_qname = f'istag/release{arch_suffix}:{release}' istag = oc.selector(istag_qname).object(ignore_not_found=True) if not istag: raise IOError(f'Could not find {istag_qname}') def trigger_retest(obj): for annotations in (obj.model.image.metadata.annotations, obj.model.metadata.annotations, obj.model.tag.annotations): annotations.pop('release.openshift.io/message', None) annotations.pop('release.openshift.io/phase', None) annotations.pop('release.openshift.io/reason', None) annotations.pop('release.openshift.io/verify', None) print(json.dumps(obj.model._primitive(), indent=4)) if confirm: print('Attempting to apply this object.') return True else: print( WARNING + '--confirm was not specified. Run again to apply these changes.' + ENDC) exit(0) result, changed = istag.modify_and_apply(trigger_retest, retries=10) if not changed: print(WARNING + 'No change was applied to the object' + ENDC) print(f'Details:\n{result.as_json()}') exit(1) print('Success!')