def update(ctx, action, ns, name, release, custom_message, custom_reason, execute): patch = create_patch(action, custom_message, custom_reason) logger.debug(f'Generated oc patch:\n{json.dumps(patch, indent=4)}') with oc.options(ctx), oc.tracking(), oc.timeout(15): try: with oc.project(ns): tag = oc.selector(f'imagestreamtag/{name}:{release}').object( ignore_not_found=True) if not tag: raise Exception( f'Unable to locate imagestreamtag: {ns}/{name}:{release}' ) logger.info(f'{action.capitalize()}ing: {ns}/{name}:{release}') if execute: backup_file = write_backup_file(name, release, tag.model._primitive()) tag.patch(patch) logger.info(f'Release {release} updated successfully') logger.info(f'Backup written to: {backup_file}') else: logger.info( f'[dry-run] Patching release {release} with patch:\n{json.dumps(patch, indent=4)}' ) logger.warning( 'You must specify "--execute" to permanently apply these changes' ) except (ValueError, OpenShiftPythonException, Exception) as e: logger.error(f'Unable to update release: "{release}"') raise e
def validate_server_connection(ctx): with oc.options(ctx), oc.tracking(), oc.timeout(60): try: username = oc.whoami() version = oc.get_server_version() logger.debug( f'Connected to APIServer running version: {version}, as: {username}' ) except (ValueError, OpenShiftPythonException, Exception) as e: logger.error( f"Unable to verify cluster connection using context: \"{ctx['context']}\"" ) raise e
def main(): import openshift as oc script = module.params["script"] time = module.params["timeout"] oc.ansible.reset() oc.ansible.vars = module.params["vars"] if time is not None: time = int(time) # Allow time to come in as a string if module.params["project"] is not None: oc.context.default_project = module.params["project"] with oc.timeout(time): with oc.tracking() as ct: try: with oc.util.OutputCapture() as capture: exec(script) module.debug( "openshift_client_python module invocation result:\n" + str(ct.get_result())) module.exit_json(rc=ct.get_result().status(), changed=module.params['changes'], ansible_facts=oc.ansible.new_facts, stdout=capture.out.getvalue().decode('UTF-8'), stderr=capture.err.getvalue().decode('UTF-8'), result=ct.get_result().as_dict()) except oc.OpenShiftPythonException as ose: module.debug( "openshift_client_python module invocation exception: " + str(ose)) module.debug( "openshift_client_python module invocation result:\n" + str(ct.get_result())) module.fail_json(msg=ose.msg, rc=ose.result.status(), exception_attributes=ose.attributes(), changed=module.params['changes'] or oc.ansible.changed, ansible_facts=oc.ansible.new_facts, stdout=capture.out.getvalue().decode('UTF-8'), stderr=capture.err.getvalue().decode('UTF-8'), result=ct.get_result().as_dict()) except KeyboardInterrupt: print('Received KeyboardInterrupt during module', file=sys.stderr) pprint.pprint(ct.get_result().as_dict(), stream=sys.stderr) raise
def get_pod_containers_usage(project): """ Retorna um iterador para cada container com métricas """ with oc.project(project), oc.timeout(2 * 60): for pod_obj in oc.selector('pods').objects(): metric = get_pod_metrics(pod_obj) pod_name = pod_obj.model.metadata.name if metric: containers = metric.model.containers for container in containers: app_name = container['name'] usage = get_container_usage(container) containerUsage = ContainerUsage(app_name, pod_name, usage) yield containerUsage else: msg = 'Nenhuma métrica para o pod {}'.format(pod_name) info_msg(msg)
) parser.set_defaults(insecure_skip_tls_verify=False) args = vars(parser.parse_args()) skip_tls_verify = args['insecure_skip_tls_verify'] if skip_tls_verify: oc.set_default_skip_tls_verify(True) bastion_hostname = args['bastion'] if not bastion_hostname: print('Running in local mode. Expecting "oc" in PATH') with oc.client_host(hostname=bastion_hostname, username="******", auto_add_host=True, load_system_host_keys=False): # Ensure tests complete within 30 minutes and track all oc invocations with oc.timeout(60 * 30), oc.tracking() as t: try: check_online_network_multitenant() check_prevents_cron_jobs() check_online_project_constraints except: logging.fatal('Error occurred during tests') traceback.print_exc() # print out all oc interactions and do not redact secret information print("Tracking:\n{}\n\n".format( t.get_result().as_json(redact_streams=False)))
def deploy(_dcli, project, timeout): ''' (Unused) x str x int -> ((str) -> None) ''' with oc.project(project), oc.timeout(timeout): yield _deploy
def publish(dcli, project, timeout): ''' docker.DockerClient x str x int -> ((str) -> None) ''' with oc.project(project), oc.timeout(timeout): yield partial(_publish, dcli)
oc.selector("projects").narrow(lambda prj: prj.metadata.annotations[ "openshift.io/requester"] is not Missing).qnames()) oc.selector("projects").narrow( # Eliminate any projects created by the system lambda prj: prj.metadata.annotations["openshift.io/requester" ] is not Missing ).narrow( # Select from user projects any which violate privileged naming convention lambda prj: prj.metadata.qname == "openshift" or prj.metadata.qname. startswith("openshift-") or prj.metadata.qname == "kubernetes" or prj. metadata.qname.startswith("kube-") or prj.metadata.qname. startswith("kubernetes-")).for_each( lambda prj: oc.error("Invalid project: %s" % prj.metadata.qname)) with oc.timeout(5): success, obj = oc.selector("pods").until_any( lambda pod: pod.status.phase == "Succeeded") if success: print("Found one pod was successful: " + str(obj)) with oc.timeout(5): success, obj = oc.selector("pods").narrow("pod").until_any( lambda pod: pod.status.conditions.can_match( { "type": "Ready", "status": False, "reason": "PodCompleted" })) if success: print("Found one pod was successful: " + str(obj))