def _get_structure_as_dict(self): functions = { ENV: self._get_env, EXPOSE: self._get_general, VOLUME: self._get_volume, LABEL: self._get_label, FROM: self._get_general, RUN: self._get_general, USER: self._get_general, COPY: self._get_general, ADD: self._get_general, } self.docker_dict[LABEL] = {} for label in self.dfp.labels: self.docker_dict[LABEL][label] = self.dfp.labels[label] for struct in self.dfp.structure: key = struct[INSTRUCT] val = struct["value"] if key != LABEL: if key not in self.docker_dict: self.docker_dict[key] = [] try: ret_val = functions[key](val) for v in ret_val: self.docker_dict[key].append(v) except KeyError: print_info("Dockerfile tag %s is not parsed by MTF" % key)
def _create_app_by_template(self): """ It creates an application in OpenShift environment by OpenShift template Steps: * oc cluster up * oc create -f <template> -n openshift * oc new-app memcached -p APPLICATION_NAME=memcached :return: """ self._register_docker_to_openshift_registry() self.runHost('oc get is') oc_template_app = self.runHost('oc process -f "%s"' % self.template, verbose=common.is_not_silent()) self._change_openshift_account() oc_template_create = None try: oc_template_create = self.runHost( 'oc create -f %s -n %s' % (self.template, self.project_name), verbose=common.is_not_silent()) except CmdError as cme: common.print_info('oc create -f failed with traceback %s' % cme.message) self.runHost('oc status') self._oc_get_output('all') return False self._change_openshift_account(account=common.get_openshift_user(), password=common.get_openshift_passwd()) template_name = self._get_openshift_template() time.sleep(1) self._create_app(template=template_name) self.runHost('oc status') return True
def setUp(self): """ It is called by child class and it is same method as Avocado/Unittest has. It prepares environment for systemd nspawn based testing * installing dependencies from config * setup environment from config :return: None """ trans_dict["ROOT"] = self.chrootpath print_info("name of CHROOT directory:", self.chrootpath) self.setRepositoriesAndWhatToInstall() # never move this line to __init__ this localtion can change before setUp (set repositories) self.chrootpath_baseimage = os.path.abspath( self.baseprefix + self.component_name + "_image_" + hashlib.md5(" ".join(self.repos)).hexdigest()) self.__image_base = Image(location=self.chrootpath_baseimage, packageset=self.whattoinstallrpm, repos=self.repos, ignore_installed=True) self.__image = self.__image_base.create_snapshot(self.chrootpath) self.__container = Container(image=self.__image, name=self.name) self._callSetupFromConfig() self.__container.boot_machine()
def __init__(self, args, unknown): # choose between TESTS and ADDITIONAL ENVIRONMENT from options if args.linter: self.tests.append("{MTF_TOOLS}/*.py".format( MTF_TOOLS=metadata_common.MetadataLoaderMTF.MTF_LINTER_PATH)) self.args = args for param in unknown: # take care of this, see tags for safe/unsafe: # http://avocado-framework.readthedocs.io/en/52.0/WritingTests.html#categorizing-tests if os.path.exists(param): # this is list of tests in local file self.tests.append(param) else: # this is additional avocado param self.additionalAvocadoArg += " {0} ".format(param) if self.args.metadata: common.print_info("Using Metadata loader for tests and filtering") metadata_tests = filtertests(backend="mtf", location=os.getcwd(), linters=False, tests=[], tags=[], relevancy="") tests_dict = [x[metadata_common.SOURCE] for x in metadata_tests] self.tests += tests_dict common.print_debug("Loaded tests via metadata file: %s" % tests_dict) common.print_debug("tests = {0}".format(self.tests)) common.print_debug("additionalAvocadoArg = {0}".format( self.additionalAvocadoArg))
def start(self, command="/bin/bash"): """ starts the OpenShift application :param command: Do not use it directly (It is defined in config.yaml) :return: None """ # Clean environment before running tests try: self._app_remove() except Exception as e: common.print_info(e, "OpenShift applications were removed") pass project = self.runHost('oc new-project %s' % self.project_name, ignore_status=True, verbose=common.is_not_silent()) if self.template is None: if not self._app_exists(): # This part is used for running an application without template or s2i self._create_app() else: common.print_debug(self.template) self._change_openshift_account( account=common.get_openshift_user(), password=common.get_openshift_passwd()) self._remove_apps_from_openshift_resources(common.TEMPLATE) if not self._create_app_by_template(): return False # Verify application is really deploy and prepared for testing. if not self._verify_pod(): return False self._get_ip_instance()
def cli(): # unknown options are forwarded to avocado run args, unknown = mtfparser().parse_known_args() if args.version: print "0.7.7" exit(0) # uses additional arguments, set up variable asap, its used afterwards: if args.debug: os.environ['DEBUG'] = 'yes' os.environ['AVOCADO_LOG_DEBUG'] = 'yes' if args.config: os.environ['CONFIG'] = args.config if args.url: os.environ['URL'] = args.url if args.modulemdurl: os.environ['MODULEMDURL'] = args.modulemdurl common.print_debug( "Options: linter={0}, setup={1}, action={2}, module={3}".format( args.linter, args.setup, args.action, args.module)) common.print_debug( "remaining options for avocado or test files: {0}".format(unknown)) # environment usage: # read: os.environ.get('MODULE') # write: os.environ['MODULE'] # MODULE could be from: # 1. common.get_module_type() ... it reads config.yaml and treceback if it doesn't exist # 2. environment ... MODULE=docker etc # 3. argument ... --module=docker try: args.module = common.get_module_type() # TODO it wrongly writes: 'Using default minimal config ...', change in common.py except moduleframework.mtfexceptions.ModuleFrameworkException: pass if os.environ.get('MODULE') is not None: # environment is the highest priority because mtf uses environment (too much) args.module = os.environ['MODULE'] if args.module: # setup also environment os.environ['MODULE'] = args.module if args.module in common.get_backend_list(): # for debug purposes, to be sure about module variables or options common.print_debug("MODULE={0}, options={1}".format( os.environ.get('MODULE'), args.module)) else: # TODO: what to do here? whats the defaults value for MODULE, do I know it? common.print_info( "MODULE={0} ; we support {1} \n === expecting your magic, enjoy! === " .format(os.environ.get('MODULE'), common.get_backend_list())) common.print_debug("MODULE={0}".format(os.environ.get('MODULE'))) return args, unknown
def test1(self): self.start() time.sleep(5) s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.connect((self.ip_address, self.getConfig()['service']['port'])) s.sendall('set Test 0 100 4\r\n\n') s.sendall('JournalDev\r\n\n') data = s.recv(1024) common.print_info(data) s.close()
def _create_app(self): """ It creates an application in OpenShift environment """ # Switching to system user oc_new_app = self.runHost( "oc new-app -l mtf_testing=true %s --name=%s" % (self.container_name, self.app_name), ignore_status=True) common.print_info(oc_new_app.stdout) time.sleep(1)
def test_connection(self): self.start() session = pexpect.spawn( "telnet %s %s " % (self.ip_address, self.getConfig()['service']['port'])) session.sendline('set Test 0 100 4\r\n\n') session.sendline('JournalDev\r\n\n') common.print_info("Expecting STORED") session.expect('STORED') common.print_info("STORED was catched") session.close()
def test_smoke(self): self.start() time.sleep(1) session = pexpect.spawn( "telnet %s %s" % (self.ip_address, self.getConfig()['service']['port'])) session.sendline('set Test 0 100 4\r\n\n') session.sendline('JournalDev\r\n\n') common.print_info('Expecting STORED') session.expect('STORED') session.close()
def __stop_service(self): """ Internal method, do not use it anyhow :return: None """ if os.path.exists('/var/run/docker.sock'): print_info("Stopping Docker") service_manager = service.ServiceManager() service_manager.stop('docker')
def __stop_openshift_cluster(self): """ Internal method, do not use it anyhow. It stops OpenShift cluster :return: None """ if common.get_openshift_local(): if int(self.__oc_status()) == 0: common.print_info("Stopping OpenShift") self.runHost("oc cluster down", verbose=common.is_not_silent()) else: common.print_info("OpenShift is already stopped.")
def tearDown(self): """ Cleanup environment and call also cleanup from config :return: None """ super(OpenShiftHelper, self).tearDown() try: self._app_remove() except Exception as e: common.print_info(e, "OpenShift application already removed") pass
def stop(self): """ This method checks if the application is deployed within OpenShift environment and removes service, deployment config and imagestream from OpenShift. :return: None """ if self._app_exists(): try: self._app_remove() except Exception as e: common.print_info(e, "OpenShift application already removed") pass
def show_error(self): if os.path.exists(self.json_tmppath): try: # file has to by json, otherwise it fails json_data = open(self.json_tmppath).read() data = json.loads(json_data) except (IOError, ValueError) as e: # file is not readable as json: No JSON object could be decoded print(e) # remove file as its not readable os.remove(self.json_tmppath) # fatal error when this command fails, its unexpected exit(127) # errors follow after 'normal' output with no delimiter, then with ------- delimiter = "" for testcase in data['tests']: if testcase.get('status') in ['ERROR', 'FAIL']: common.print_info(delimiter) common.print_info("TEST: {0}".format(testcase.get('id'))) common.print_info("ERROR: {0}".format( testcase.get('fail_reason'))) common.print_info(" {0}".format( testcase.get('logfile'))) delimiter = "-------------------------" os.remove(self.json_tmppath)
def __prepare_selinux(self): # disable selinux by default if not turned off if not os.environ.get('MTF_SKIP_DISABLING_SELINUX'): # https://github.com/fedora-modularity/meta-test-family/issues/53 # workaround because systemd nspawn is now working well in F-26 if not os.path.exists(selinux_state_file): print_info("Disabling selinux") actual_state = self.runHost("getenforce", ignore_status=True).stdout.strip() with open(selinux_state_file, 'w') as openfile: openfile.write(actual_state) if setseto not in actual_state: self.runHost("setenforce %s" % setseto, verbose=is_not_silent(), sudo=True)
def avocado_general(self): # additional parameters # self.additionalAvocadoArg: its from cmd line, whats unknown to this tool avocado_args = "" # when needed => specify HERE your additional stuff avocadoAction = "avocado {ACTION} {AVOCADO_ARGS}".format( ACTION=self.args.action, AVOCADO_ARGS=avocado_args) bash = process.run("{AVOCADO} {a} {b}".format( AVOCADO=avocadoAction, a=self.additionalAvocadoArg, b=" ".join(self.tests)), shell=True, ignore_status=True) common.print_info(bash.stdout, bash.stderr) common.print_debug("Command used: ", bash.command) return bash.exit_status
def stop(self): """ This method checks if the application is deployed within OpenShift environment and removes service, deployment config and imagestream from OpenShift. :return: None """ self._change_openshift_account(account=common.get_openshift_user(), password=common.get_openshift_passwd()) self._oc_get_output('all') if self._app_exists(): try: self._app_remove() except Exception as e: common.print_info(e, "OpenShift application already removed") pass
def add_insecure_registry(self, registry): """ https://github.com/fedora-modularity/meta-test-family/issues/52 Deprecated: Append registry into inserure registry. :param registry: string cotain value to add to insecure registry variable to config file :return: """ if registry not in open('/etc/sysconfig/docker', 'r').read(): print_info("Adding %s to insecure registry" % registry) with open("/etc/sysconfig/docker", "a") as myfile: myfile.write( "INSECURE_REGISTRY='--insecure-registry $REGISTRY %s'" % registry)
def check_copy_files_exist(self): """ Function checks if COPY instructions contain files which really exist :return: True if all files/directories exist False otherwise """ dir_name = os.getcwd() files = self._get_copy_add_files(os.path.dirname(self.dockerfile)) f_exists = False for f in files: if f.startswith('http'): f_exists = True continue if os.path.exists(os.path.join(dir_name, f)): f_exists = True else: print_info("The file %s does not exist." % f) return f_exists
def _get_copy_add_files(self, dirname): """ Function gets all COPY and ADD files from Dockerfile into list. It contains only source files not target files :param dirname: Dirname where we look for COPY and ADD files. :return: list """ files = [] for instruction in [COPY, ADD]: try: # Get only source files, not the target for x in self.docker_dict[instruction]: if not x.startswith('/'): files.extend(glob.glob(os.path.join(dirname, x))) except KeyError: print_info("Instruction %s is not present in Dockerfile" % instruction) return files
def avocado_run(self): self.json_tmppath = tempfile.mktemp() avocado_args = "--json {JSON_LOG}".format(JSON_LOG=self.json_tmppath) if self.args.xunit: avocado_args += " --xunit {XUNIT} ".format(XUNIT=self.args.xunit) avocadoAction = "avocado {ACTION} {AVOCADO_ARGS}".format( ACTION=self.args.action, AVOCADO_ARGS=avocado_args) # run avocado with right cmd arguments bash = process.run("{AVOCADO} {a} {b}".format( AVOCADO=avocadoAction, a=self.additionalAvocadoArg, b=" ".join(self.tests)), shell=True, ignore_status=True) common.print_info(bash.stdout, bash.stderr) common.print_debug("Command used: ", bash.command) return bash.exit_status
def _app_exists(self): """ It checks if an application already exists in OpenShift environment :return: True, application exists False, application does not exist """ oc_status = self.runHost("oc get dc %s -o json" % self.app_name, ignore_status=True) if int(oc_status.exit_status) == 0: common.print_info("Application already exists.") return True oc_pods = self._oc_get_output('pods') # Check if 'items' in json output is empty or not if not oc_pods: return False # check if 'items', which is not empty, in json output contains app_name if not self._check_resource_in_json(oc_pods): return False return True
def tearDown(self): """ cleanup environment after test is finished and call cleanup section in config file :return: None """ if get_if_do_cleanup() and not get_if_reuse(): try: self.__container.stop() except: pass try: self.__container.rm() except: pass else: print_info("tearDown skipped", "running nspawn: %s" % self.name) print_info("To connect to a machine use:", "machinectl shell root@%s /bin/bash" % self.name)
def check_helpmd_is_present(self): """ Function checks if helpmd. is present in COPY or ADD directives :return: True if help.md is present False if help.md is not specified in Dockerfile """ helpmd_present = False for instruction in [COPY, ADD]: try: helpmd = [ help for help in self.docker_dict[instruction] if "help.md" in help ] if helpmd: helpmd_present = True except KeyError: print_info("Instruction %s is not present in Dockerfile", instruction) return helpmd_present
def _get_ip_instance(self): """ This method verifies that we can obtain an IP address of the application deployed within OpenShift. :return: True: getting IP address was successful False: getting IP address was not successful """ service = self._oc_get_output('service') try: for svc in service: if svc.get('metadata').get('labels').get( 'app') == self.app_name: self.ip_address = svc.get('spec').get("clusterIP") common.trans_dict['GUESTIPADDR'] = self.ip_address return True except KeyError as e: common.print_info(e.message) return False except IndexError as e: common.print_info(e.message) return False
def __start_openshift_cluster(self): """ Internal method, do not use it anyhow. It starts OpenShift cluster :return: None """ if common.get_openshift_local(): if int(self.__oc_status()) == 0: common.print_info("Seems like OpenShift is already started.") else: oc_run = self.runHost("oc cluster up", ignore_status=True) common.print_info(oc_run.stdout) common.print_info(oc_run.stderr)
def __cleanup(self): if not os.environ.get('MTF_SKIP_DISABLING_SELINUX'): print_info("Turning back selinux to previous state") actual_state = self.runHost("getenforce", ignore_status=True).stdout.strip() if os.path.exists(selinux_state_file): print_info("Turning back selinux to previous state") with open(selinux_state_file, 'r') as openfile: stored_state = openfile.readline() if stored_state != actual_state: self.runHost("setenforce %s" % stored_state, ignore_status=True, verbose=is_not_silent(), sudo=True) os.remove(selinux_state_file) else: print_info("Selinux state is not stored, skipping.")
def cleanup_env(self): print_info("WARNING: No cleanup as it can destroy this machine") pass
def prepare_env(self): print_info('Loaded config for name: {}'.format(self.config['name'])) self.installTestDependencies() self.__prepare_selinux() self.__install_machined()