def create_test_project(suffix, port): project_name = 'imperative-verify-test-project-network-{}'.format(suffix) # Delete any existing resources oc.delete_project(project_name, ignore_not_found=True, grace_period=1) server_name = 'server-{}'.format(suffix) client_name = 'client-{}'.format(suffix) with oc.new_project(project_name): # Create a simple http server running in project-A # It will be exposed by a service and route of the same name report_progress("Creating server in: " + project_name) server_sel = oc.create( simple_http_server_resources(server_name, port, create_service=True, create_route=True) ) report_progress("Created: {}".format(server_sel.qnames())) report_progress("Waiting for resources readiness...") server_sel.narrow('pod').until_all(1, success_func=oc.status.is_pod_running) server_sel.narrow('route').until_all(1, success_func=oc.status.is_route_admitted) # Create a passive pod that blocks forever so we exec commands within it client_sel = oc.create( oc.build_pod_simple(client_name, image='python:3', command=['tail', '-f', '/dev/null'])) client_sel.until_all(1, success_func=oc.status.is_pod_running) server_pod = server_sel.narrow('pod').object() service = server_sel.narrow('service').object() route = server_sel.narrow('route').object() client_pod = client_sel.narrow('pod').object() report_progress('Ensure client pod can communicate to server pod IP in same namespace') client_pod.execute(cmd_to_exec=['curl', 'http://{}:{}'.format(server_pod.model.status.podIP, port)], auto_raise=True) report_progress('Ensure client pod can communicate to server service IP in same namespace') client_pod.execute(cmd_to_exec=['curl', 'http://{}:{}'.format(service.model.spec.clusterIP, port)], auto_raise=True) report_progress('Ensure client pod can communicate to server service DNS in same namespace') client_pod.execute(cmd_to_exec=['curl', 'http://{}:{}'.format(server_name, port)], auto_raise=True) report_progress('Ensure client pod can communicate to server route in same namespace') client_pod.execute(cmd_to_exec=['curl', 'http://{}'.format(route.model.spec.host)], auto_raise=True) # Return a selector for server resources and client resources return project_name, server_pod, service, route, client_pod
def check_prevents_cron_jobs(): """ In our cluster configuration, cronjobs can only be created in privileged projects. Validate this. """ cronjob = { 'apiVersion': 'batch/v1beta1', 'kind': 'CronJob', 'metadata': { 'name': 'prohibited-cron', }, 'spec': { 'schedule': '@weekly', 'jobTemplate': { 'spec': { 'template': { 'spec': { 'containers': [ { 'name': 'container0', 'image': 'busybox', } ], 'restartPolicy': 'Never', } } } } } } user_project_name = 'imperative-verify-test-project-scheduled-jobs' with temp_project(user_project_name): try: report_progress('Creating cron job in normal project') oc.create(cronjob) assert False, 'Cronjob created but should have been prohibited' except: report_verified('Could not create cronjob in user project') pass priv_project_name = 'openshift-imperative-verify-test-project-scheduled-jobs' with temp_project(priv_project_name, adm=True): # In openshift-*, we should be able to create the cronjob. report_progress('Creating cron job in privileged project') oc.create(cronjob) report_verified('Able to create cronjob in privileged project')
def create_resource(yaml, success, tries): with oc.tracking() as tracker: try: oc.create(yaml) except oc.OpenShiftPythonException: if 'AlreadyExists' in tracker.get_result().err(): # if 'AlreadyExists' in oc.OpenShiftPythonException.get_result() print("Resource already exists") else: raise Exception(f'Failed: {tracker.get_result().err()}') except: raise Exception(f'Failed: {tracker.get_result().err()}') if success: try_count = 0 while len(success) > 0 and try_count < tries: try_count += 1 print(f'TRY: {try_count} of {tries}') for criteria in success: resource_type = criteria[0] resource_name = criteria[1] resource_count = criteria[2] found = oc.selector(resource_type) count = 0 for item in found: name = item.qname() print(f'{resource_name} in {name}') if resource_name in name: if 'pod' in resource_type: pod = item.as_dict() status = pod['status']['phase'] print(f'Status: {status}') if status == 'Running' or status == 'Succeeded': count += 1 print(f'Found {count} of {resource_count}') else: count += 1 print(f'Found {count} of {resource_count}') if count >= resource_count: success.remove(criteria) break if len(success) == 0: return time.sleep(10) else: if try_count >= tries: raise Exception('Failed to create resource in time')
def _deploy(image): ''' str -> None Deploys the given image to Openshift using the oc API ''' with pushd(os.path.join(IMAGE_ROOT, image)): if os.path.isfile(OPENSHIFT_TEMPLATE): ocobj = oc.create( oc.APIObject( string_to_model=readfile(OPENSHIFT_TEMPLATE)).process()) for obj in ocobj.objects(): print(f"Created: {obj.model.kind}/{obj.model.metadata.name}") print(obj.as_json(indent=4)) else: print( f"WARN: Missing oc template {OPENSHIFT_TEMPLATE} for image {image}" )
def run_pods(pod_count=5, *, project_name=None): logger.info('Running in namespace: {}'.format(project_name)) for i in range(pod_count): pod_name = 'pod-{}'.format(i) logger.info('Creating: {}'.format(pod_name)) pod_selector = oc.create( oc.build_pod_simple(pod_name, image='python:3', command=['tail', '-f', '/dev/null'])) pod_selector.until_all(1, success_func=oc.status.is_pod_running) pods = oc.selector('pods').objects() logger.info('Found {} pods'.format(len(pods))) assert len(pods) == pod_count
def create_secret_if_needed(params, namespace): body_ascii = str(params['body']).encode('ascii') body_enc = base64.b64encode(body_ascii) spec_sec = copy.deepcopy(constants.SPEC_SECRET) spec_sec['metadata']['name'] = params['name'] spec_sec['metadata']['namespace'] = namespace spec_sec['data'][params['name']] = body_enc.decode('ascii') result = ocp.selector('secret/' + params['name']) if result.status() == 0: objs = result.objects() if objs: objs[0].delete() result = ocp.create(spec_sec) assert result.status() == 0 if 'url' in params: del params['url'] del params['body']
def create(self, params: dict = None, **kwargs) -> 'DefaultResource': LOG.info( self._log_message("[CREATE] Create CRD new ", body=params, args=kwargs)) if self.__class__.CRD_IMPLEMENTED: spec = copy.deepcopy(self.__class__.SPEC) name = params.get('name') or params.get( 'username') # Developer User exception if name is not None: name = self.normalize(name) if params.get('name'): params['name'] = name else: params['username'] = name else: name = self.normalize(''.join( random.choice(string.ascii_letters) for _ in range(16))) if not self.__class__.NESTED: spec['metadata']['namespace'] = self.crd_client.ocp_namespace spec['metadata']['name'] = name spec['spec']['providerAccountRef'][ 'name'] = self.crd_client.ocp_provider_ref import pdb pdb.set_trace() self.before_create(params, spec) spec['spec'].update(self.translate_to_crd(params)) for key, value in self.__class__.KEYS.items(): if params.get(key, None) is None and \ value in spec['spec'] and \ spec['spec'][value] is None: del spec['spec'][value] if self.__class__.NESTED: if self.__class__.__name__ in [ 'MappingRules', 'BackendMappingRules' ]: if 'metric_id' not in params.keys(): spec['spec']['metricMethodRef'] = 'hits' elif isinstance(params['metric_id'], int): met = self.parent.metrics.read(int( params['metric_id'])) # exception because of backend mapping rules name = met.entity.get('system_name', met.entity.get('name')) if '.' in met['system_name']: spec['spec']['metricMethodRef'] = name.split( '.')[0] spec['spec']['metricMethodRef'] = name else: # metric id is tuple spec['spec']['metricMethodRef'] = params['metric_id'][ 0] mapsi = self.get_list() maps = {} if self.__class__.__name__ in [ 'MappingRules', 'BackendMappingRules' ]: maps = [] for mapi in mapsi: if self.__class__.__name__ in [ 'MappingRules', 'BackendMappingRules' ]: maps.append( self.translate_to_crd(mapi.entity, self.trans_item)) elif self.__class__.__name__ in [ 'Metrics', 'BackendMetrics' ]: name = mapi['name'] maps[name] = self.translate_to_crd( mapi.entity, self.trans_item) elif self.__class__.__name__ in ['BackendUsages']: map_ret = self.translate_to_crd( mapi.entity, self.trans_item) backend_id = mapi['backend_id'] back = self.parent.parent.backends.read( int(backend_id)) maps[back['name']] = map_ret elif self.__class__.__name__ in ['ApplicationPlans']: name = mapi['name'] maps[name] = self.translate_to_crd( mapi.entity, self.trans_item) if self.__class__.__name__ in [ 'MappingRules', 'BackendMappingRules' ]: resources.MappingRules.insert_into_position( maps, params, spec) par = self.parent.update({'mapping_rules': maps}) maps = self.parent.mapping_rules.list() return resources.MappingRules.get_from_position( maps, params) elif self.__class__.__name__ in ['Metrics', 'BackendMetrics']: name = params.get('name', params.get('system_name', 'hits')) if 'name' in spec['spec']: spec['spec'].pop('name') maps[name] = spec['spec'] par = self.parent.update({'metrics': maps}) maps = self.get_list() elif self.__class__.__name__ in ['BackendUsages']: backend_id = spec['spec'].pop('backend_id') back = self.parent.parent.backends.read(int(backend_id)) maps[back['name']] = spec['spec'] par = self.parent.update({'backend_usages': maps}) maps = self.parent.backend_usages.list() elif self.__class__.__name__ in ['ApplicationPlans']: params['name'] = DefaultClientCRD.normalize( spec['spec'].pop('name')) maps[params['name']] = spec['spec'] par = self.parent.update({'application_plans': maps}) maps = self.get_list() for mapi in maps: if all([params[key] == mapi[key] for key in params.keys()]): return mapi return None else: result = ocp.create(spec) assert result.status() == 0 list_objs = self.read_crd(self._entity_collection, result.out().strip().split('/')[1]) return (self._create_instance(response=list_objs)[:1] or [None])[0] return threescale_api.defaults.DefaultClient.create( self, params, **kwargs)
#!/usr/bin/python import openshift as oc ''' This example will scan all the templates, on the cluster, and look specifically for the openshift/nginx-example template. If the template is located, it clears the namespace (to prevent an error when calling 'oc process'), updates any template parameter(s), processes the template, and then creates the objects in the current namespace. ''' if __name__ == '__main__': with oc.client_host(): templates = oc.selector('templates', all_namespaces=True) for template in templates.objects(): if template.model.metadata.namespace == 'openshift' and template.model.metadata.name == 'nginx-example': template.model.metadata.namespace = '' obj = oc.APIObject(dict_to_model=template.as_dict()) parameters = { 'NAME': 'my-nginx', } processed_template = obj.process(parameters=parameters) obj_sel = oc.create(processed_template) for obj in obj_sel.objects(): print('Created: {}/{}'.format(obj.model.kind, obj.model.metadata.name)) print(obj.as_json(indent=4))
"metadata": { "name": "bark" } } bite_obj = { "apiVersion": "v1", "kind": "User", "fullName": "Bite Doe", "groups": null, "identities": ["github:10000"], "metadata": { "name": "bite" } } bark_bite_sel = oc.create([bark_obj, bite_obj]) print("How were they created?\n" + str(bark_bite_sel)) try: oc.create(bark_obj) # Should create an error assert False except OpenShiftPythonException as create_err: print("What went wrong?: " + str(create_err)) bark_bite_sel.until_any(lambda obj: obj.metadata.qname == "bite") except OpenShiftPythonException as e: print("An exception occurred: " + str(e))