def test_get_json_files(): container = 'container1' mytest_dir = '%s/realm/validation/%s' % (TESTSDIR, container) files = get_json_files(mytest_dir, 'snapshot') assert True == isinstance(files, list) files = get_json_files(mytest_dir, 'test') assert True == isinstance(files, list) and len(files) > 0 files = get_json_files('/a/b/c', 'txt') assert True == isinstance(files, list)
def container_snapshots_filesystem(container): """ Get snapshot and mastersnapshot list used in all test/mastertest files of a container from the filesystem. This gets list of all the snapshots/mastersnapshots used in the container. The list will be used to not populate the snapshots/mastersnapshots multiple times, if the same snapshots/mastersnapshots are used in different test/mastertest files of a container. The configuration of the default path is configured in config.ini. """ snapshots = [] logger.info("Starting to get list of snapshots") reporting_path = config_value('REPORTING', 'reportOutputFolder') json_dir = '%s/%s/%s' % (framework_dir(), reporting_path, container) logger.info(json_dir) singletest = get_from_currentdata(SINGLETEST) test_files = get_json_files(json_dir, JSONTEST) logger.info('\n'.join(test_files)) for test_file in test_files: test_json_data = json_from_file(test_file) if test_json_data: snapshot = test_json_data['snapshot'] if 'snapshot' in test_json_data else '' if snapshot: file_name = snapshot if snapshot.endswith('.json') else '%s.json' % snapshot if singletest: testsets = get_field_value_with_default(test_json_data, 'testSet', []) for testset in testsets: for testcase in testset['cases']: if ('testId' in testcase and testcase['testId'] == singletest) or \ ('masterTestId' in testcase and testcase['masterTestId'] == singletest): if file_name not in snapshots: snapshots.append(file_name) else: snapshots.append(file_name) test_files = get_json_files(json_dir, MASTERTEST) logger.info('\n'.join(test_files)) for test_file in test_files: test_json_data = json_from_file(test_file) if test_json_data: snapshot = test_json_data['masterSnapshot'] if 'masterSnapshot' in test_json_data else '' if snapshot: file_name = snapshot if snapshot.endswith('.json') else '%s.json' % snapshot parts = file_name.split('.') file_name = '%s_gen.%s' % (parts[0], parts[-1]) if singletest: testsets = get_field_value_with_default(test_json_data, 'testSet', []) for testset in testsets: for testcase in testset['cases']: if ('testId' in testcase and testcase['testId'] == singletest) or \ ('masterTestId' in testcase and testcase['masterTestId'] == singletest): if file_name not in snapshots: snapshots.append(file_name) else: snapshots.append(file_name) return list(set(snapshots))
def mastersnapshots_used_in_mastertests_filesystem(container): """ Get mastersnapshot list used in all mastertest files of a container from the filesystem. This gets list of all the mastersnapshots used in the container. The list will be used to make sure the snapshots are not generated multiple times, if the same mastersnapshots are used in different mastertest files of a container. The configuration of the default path is configured in config.ini. """ snapshots = [] # logger.info("Starting to get list of mastersnapshots used in test files.") reporting_path = config_value('REPORTING', 'reportOutputFolder') json_dir = '%s/%s/%s' % (framework_dir(), reporting_path, container) # logger.info(json_dir) # Only get list of mastertest files. test_files = get_json_files(json_dir, MASTERTEST) # logger.info('\n'.join(test_files)) for test_file in test_files: logger.info('\tMASTERTEST:%s', test_file) test_json_data = json_from_file(test_file) if test_json_data: snapshot = test_json_data[ 'masterSnapshot'] if 'masterSnapshot' in test_json_data else '' if snapshot: file_name = snapshot if snapshot.endswith( '.json') else '%s.json' % snapshot snapshots.append(file_name) return list( set(snapshots)) # set so that unique list of files are returned.
def get_used_snapshots_in_tests(self): """ Iterate through all snapshot and mastersnapshot and list the used snapshots in tests or mastertest.""" snapshots = [] logger.info("%s Fetching files for %s from container dir: %s", Snapshot.LOGPREFIX, self.container, self.container_dir) for testType, snapshotType, replace in ((TEST, SNAPSHOT, False), (MASTERTEST, MASTERSNAPSHOT, True)): test_files = get_json_files(self.container_dir, testType) logger.info('%s fetched %s number of files from %s container: %s', Snapshot.LOGPREFIX, snapshotType, self.container, len(test_files)) snapshots.extend( self.process_files(test_files, snapshotType, replace)) return list(set(snapshots))
def container_snapshots_filesystem(container): """Get snapshot list used in test files from the filesystem.""" snapshots = [] logger.info("Starting to get list of snapshots") reporting_path = config_value('REPORTING', 'reportOutputFolder') json_dir = '%s/%s/%s' % (framework_dir(), reporting_path, container) logger.info(json_dir) test_files = get_json_files(json_dir, JSONTEST) logger.info('\n'.join(test_files)) for test_file in test_files: test_json_data = json_from_file(test_file) if test_json_data: snapshot = test_json_data['snapshot'] if 'snapshot' in test_json_data else '' if snapshot: snapshots.append(snapshot) return snapshots
def run_container_validation_tests_filesystem(container, snapshot_status=None): """Get test files from the filesystem.""" # logger.info("Starting validation tests") logger.info("VALIDATION:") logger.info("\tCollection: %s, Type: FILESYSTEM", container) reporting_path = config_value('REPORTING', 'reportOutputFolder') json_dir = '%s/%s/%s' % (framework_dir(), reporting_path, container) logger.info('\tLOCATION: %s', json_dir) test_files = get_json_files(json_dir, JSONTEST) # logger.info('\n'.join(test_files)) result = True for test_file in test_files: logger.info('\tCOLLECTION: %s', test_file) val = run_file_validation_tests(test_file, container, True, snapshot_status) result = result and val if test_files: # return the result value if "test" file is processed collection logger.critical("VALIDATION COMPLETE:") return result # mastertest files test_files = get_json_files(json_dir, MASTERTEST) # logger.info('\n'.join(test_files)) if not test_files: logger.error("ERROR: No `test` or `mastertest` file found. collection should contain either `test` or `mastertest` file") return False finalresult = result for test_file in test_files: logger.info('\tCOLLECTION: %s', test_file) # logger.info("*" * 50) # logger.info("validator tests: %s", test_file) dirpath = None test_json_data = json_from_file(test_file) if not test_json_data: logger.info("Test file %s looks to be empty, next!...", test_file) continue if "connector" in test_json_data and "remoteFile" in test_json_data and test_json_data["connector"] and test_json_data["remoteFile"]: dirpath, pull_response = pull_json_data(test_json_data) if not pull_response: return {} snapshot_key = '%s_gen' % test_json_data['masterSnapshot'] mastersnapshots = defaultdict(list) snapshot_data = snapshot_status[snapshot_key] if snapshot_key in snapshot_status else {} for snapshot_id, mastersnapshot_id in snapshot_data.items(): if isinstance(mastersnapshot_id, list): for master_snapshot_id in mastersnapshot_id: mastersnapshots[master_snapshot_id].append(snapshot_id) elif isinstance(mastersnapshot_id, str): mastersnapshots[mastersnapshot_id].append(snapshot_id) if not mastersnapshots: logger.error("No generated snapshots found for validation.") continue test_json_data['snapshot'] = snapshot_key testsets = get_field_value_with_default(test_json_data, 'testSet', []) for testset in testsets: testcases = get_field_value_with_default(testset, 'cases', []) testset['cases'] = _get_new_testcases(testcases, mastersnapshots) # print(json.dumps(test_json_data, indent=2)) singletest = get_from_currentdata(SINGLETEST) if singletest: for testset in testsets: newtestcases = [] for testcase in testset['cases']: if ('testId' in testcase and testcase['testId'] == singletest) or \ ('masterTestId' in testcase and testcase['masterTestId'] == singletest): newtestcases.append(testcase) testset['cases'] = newtestcases resultset = run_json_validation_tests(test_json_data, container, True, snapshot_status, dirpath=dirpath) if test_json_data.get('testSet') and not resultset: logger.error('\tERROR: Testset does not contains any testcases or all testcases are skipped due to invalid rules.') elif resultset: snapshot = test_json_data['snapshot'] if 'snapshot' in test_json_data else '' if singletest: print(json.dumps(resultset, indent=2)) else: dump_output_results(resultset, container, test_file, snapshot, True) for result in resultset: if 'result' in result: if not re.match(r'passed', result['result'], re.I): finalresult = False break else: logger.error('\tERROR: No mastertest Documents found!') finalresult = False logger.critical("VALIDATION COMPLETE:") return finalresult
def run_container_validation_tests_filesystem(container, snapshot_status=None): """Get test files from the filesystem.""" logger.info("Starting validation tests") reporting_path = config_value('REPORTING', 'reportOutputFolder') json_dir = '%s/%s/%s' % (framework_dir(), reporting_path, container) logger.info(json_dir) test_files = get_json_files(json_dir, JSONTEST) logger.info('\n'.join(test_files)) result = True for test_file in test_files: val = run_file_validation_tests(test_file, container, True, snapshot_status) result = result and val # mastertest files test_files = get_json_files(json_dir, MASTERTEST) logger.info('\n'.join(test_files)) finalresult = True for test_file in test_files: logger.info("*" * 50) logger.info("validator tests: %s", test_file) test_json_data = json_from_file(test_file) if not test_json_data: logger.info("Test file %s looks to be empty, next!...", test_file) continue snapshot_key = '%s_gen' % test_json_data['masterSnapshot'] mastersnapshots = defaultdict(list) snapshot_data = snapshot_status[ snapshot_key] if snapshot_key in snapshot_status else {} for snapshot_id, mastersnapshot_id in snapshot_data.items(): if isinstance(mastersnapshot_id, list): for master_snapshot_id in mastersnapshot_id: mastersnapshots[master_snapshot_id].append(snapshot_id) elif isinstance(mastersnapshot_id, str): mastersnapshots[mastersnapshot_id].append(snapshot_id) test_json_data['snapshot'] = snapshot_key testsets = get_field_value_with_default(test_json_data, 'testSet', []) for testset in testsets: testcases = get_field_value_with_default(testset, 'cases', []) testset['cases'] = _get_new_testcases(testcases, mastersnapshots) # print(json.dumps(test_json_data, indent=2)) singletest = get_from_currentdata(SINGLETEST) if singletest: for testset in testsets: newtestcases = [] for testcase in testset['cases']: if ('testId' in testcase and testcase['testId'] == singletest) or \ ('masterTestId' in testcase and testcase['masterTestId'] == singletest): newtestcases.append(testcase) testset['cases'] = newtestcases resultset = run_json_validation_tests(test_json_data, container, False, snapshot_status) if resultset: snapshot = test_json_data[ 'snapshot'] if 'snapshot' in test_json_data else '' if singletest: print(json.dumps(resultset, indent=2)) else: dump_output_results(resultset, container, test_file, snapshot, True) for result in resultset: if 'result' in result: if not re.match(r'passed', result['result'], re.I): finalresult = False break else: logger.info('No mastertest Documents found!') finalresult = False return finalresult