def update(ctx, action, ns, name, release, custom_message, custom_reason, execute): patch = create_patch(action, custom_message, custom_reason) logger.debug(f'Generated oc patch:\n{json.dumps(patch, indent=4)}') with oc.options(ctx), oc.tracking(), oc.timeout(15): try: with oc.project(ns): tag = oc.selector(f'imagestreamtag/{name}:{release}').object( ignore_not_found=True) if not tag: raise Exception( f'Unable to locate imagestreamtag: {ns}/{name}:{release}' ) logger.info(f'{action.capitalize()}ing: {ns}/{name}:{release}') if execute: backup_file = write_backup_file(name, release, tag.model._primitive()) tag.patch(patch) logger.info(f'Release {release} updated successfully') logger.info(f'Backup written to: {backup_file}') else: logger.info( f'[dry-run] Patching release {release} with patch:\n{json.dumps(patch, indent=4)}' ) logger.warning( 'You must specify "--execute" to permanently apply these changes' ) except (ValueError, OpenShiftPythonException, Exception) as e: logger.error(f'Unable to update release: "{release}"') raise e
def validate_server_connection(ctx): with oc.options(ctx), oc.tracking(), oc.timeout(60): try: username = oc.whoami() version = oc.get_server_version() logger.debug( f'Connected to APIServer running version: {version}, as: {username}' ) except (ValueError, OpenShiftPythonException, Exception) as e: logger.error( f"Unable to verify cluster connection using context: \"{ctx['context']}\"" ) raise e
def main(): import openshift as oc script = module.params["script"] time = module.params["timeout"] oc.ansible.reset() oc.ansible.vars = module.params["vars"] if time is not None: time = int(time) # Allow time to come in as a string if module.params["project"] is not None: oc.context.default_project = module.params["project"] with oc.timeout(time): with oc.tracking() as ct: try: with oc.util.OutputCapture() as capture: exec(script) module.debug( "openshift_client_python module invocation result:\n" + str(ct.get_result())) module.exit_json(rc=ct.get_result().status(), changed=module.params['changes'], ansible_facts=oc.ansible.new_facts, stdout=capture.out.getvalue().decode('UTF-8'), stderr=capture.err.getvalue().decode('UTF-8'), result=ct.get_result().as_dict()) except oc.OpenShiftPythonException as ose: module.debug( "openshift_client_python module invocation exception: " + str(ose)) module.debug( "openshift_client_python module invocation result:\n" + str(ct.get_result())) module.fail_json(msg=ose.msg, rc=ose.result.status(), exception_attributes=ose.attributes(), changed=module.params['changes'] or oc.ansible.changed, ansible_facts=oc.ansible.new_facts, stdout=capture.out.getvalue().decode('UTF-8'), stderr=capture.err.getvalue().decode('UTF-8'), result=ct.get_result().as_dict()) except KeyboardInterrupt: print('Received KeyboardInterrupt during module', file=sys.stderr) pprint.pprint(ct.get_result().as_dict(), stream=sys.stderr) raise
def create_resource(yaml, success, tries): with oc.tracking() as tracker: try: oc.create(yaml) except oc.OpenShiftPythonException: if 'AlreadyExists' in tracker.get_result().err(): # if 'AlreadyExists' in oc.OpenShiftPythonException.get_result() print("Resource already exists") else: raise Exception(f'Failed: {tracker.get_result().err()}') except: raise Exception(f'Failed: {tracker.get_result().err()}') if success: try_count = 0 while len(success) > 0 and try_count < tries: try_count += 1 print(f'TRY: {try_count} of {tries}') for criteria in success: resource_type = criteria[0] resource_name = criteria[1] resource_count = criteria[2] found = oc.selector(resource_type) count = 0 for item in found: name = item.qname() print(f'{resource_name} in {name}') if resource_name in name: if 'pod' in resource_type: pod = item.as_dict() status = pod['status']['phase'] print(f'Status: {status}') if status == 'Running' or status == 'Succeeded': count += 1 print(f'Found {count} of {resource_count}') else: count += 1 print(f'Found {count} of {resource_count}') if count >= resource_count: success.remove(criteria) break if len(success) == 0: return time.sleep(10) else: if try_count >= tries: raise Exception('Failed to create resource in time')
#!/usr/bin/python import openshift as oc from openshift import Missing import traceback with oc.tracking() as t: with oc.client_host(hostname="18.222.71.125", username="******", auto_add_host=True): # free-stg with oc.project("openshift-monitoring"): try: result = oc.selector('pod/alertmanager-main-0').object().execute(['cat'], container_name='alertmanager', stdin='stdin for cat') print(result.out()) exit(0) cr_rules = oc.selector("prometheusrules") print("CR has the following rule sets: {}".format(cr_rules.qnames())) if cr_rules.object().model.metadata.labels.cr_generated is Missing: print("Rule was not generated by CR") oc.selector('pods').annotate(annotations={ 'cr_annotation_test': None, }) oc.selector('node/pod_ip-172-31-79-85.us-east-2.compute.internal').object().patch({ 'metadata': { 'annotations': { 'cr_patch': 'yes'
#!/usr/bin/env python import openshift as oc if __name__ == '__main__': with oc.tracking() as tracker: try: print('Current project: {}'.format(oc.get_project_name())) print('Current user: {}'.format(oc.whoami())) except: print('Error acquire details about project/user') # Print out details about the invocations made within this context. print tracker.get_result()
) parser.set_defaults(insecure_skip_tls_verify=False) args = vars(parser.parse_args()) skip_tls_verify = args['insecure_skip_tls_verify'] if skip_tls_verify: oc.set_default_skip_tls_verify(True) bastion_hostname = args['bastion'] if not bastion_hostname: print('Running in local mode. Expecting "oc" in PATH') with oc.client_host(hostname=bastion_hostname, username="******", auto_add_host=True, load_system_host_keys=False): # Ensure tests complete within 30 minutes and track all oc invocations with oc.timeout(60 * 30), oc.tracking() as t: try: check_online_network_multitenant() check_prevents_cron_jobs() check_online_project_constraints except: logging.fatal('Error occurred during tests') traceback.print_exc() # print out all oc interactions and do not redact secret information print("Tracking:\n{}\n\n".format( t.get_result().as_json(redact_streams=False)))