def test_dump_output_results(monkeypatch, create_temp_dir): monkeypatch.setattr('processor.reporting.json_output.insert_one_document', mock_insert_one_document) monkeypatch.setattr('processor.reporting.json_output.config_value', mock_config_value) newpath = create_temp_dir() fname = '%s/test1.json' % newpath new_fname = '%s/output-a1.json' % newpath dump_output_results([], fname, 'test1', 'snapshot') file_exists = os.path.exists(new_fname) assert False == file_exists val = dump_output_results([], fname, 'test1', 'snapshot', False) assert val is None
def test_dump_output_results(): container = 'container1' test_file = '%s/realm/validation/%s/test1.json' % (TESTSDIR, container) outputtest_file = '%s/realm/validation/%s/output-test1.json' % (TESTSDIR, container) file_exists = os.path.exists(outputtest_file) if file_exists: os.remove(outputtest_file) dump_output_results([], container, test_file, 'snapshot') file_exists = os.path.exists(outputtest_file) assert True == file_exists os.remove(outputtest_file)
def get_snapshots(self): """Populate the used snapshots in test and mastertest for this container.""" snapshots_status = {} docs = get_documents(self.collection(SNAPSHOT), dbname=self.dbname, sort=self.sort, query=self.qry, _id=True) if docs and len(docs): logger.info('%s fetched %s number of documents: %s', Snapshot.LOGPREFIX, SNAPSHOT, len(docs)) used_snapshots = self.get_used_snapshots_in_tests() if not used_snapshots: raise SnapshotsException( "No snapshots for this container: %s, add and run again!..." % self.container) populated = [] for doc in docs: if doc['json']: snapshot = doc['name'] try: pull_response, git_connector_json = self.check_and_fetch_remote_snapshots( doc['json']) if git_connector_json and not pull_response: logger.info('%s Fetching remote snapshots failed.', Snapshot.LOGPREFIX) break if snapshot in used_snapshots and snapshot not in populated: # Take the snapshot and populate whether it was successful or not. # Then pass it back to the validation tests, so that tests for those # snapshots that have been susccessfully fetched shall be executed. snapshot_file_data = self.populate_snapshots( doc['json']) if not git_connector_json: update_one_document(doc, self.collection(SNAPSHOT), self.dbname) populated.append(snapshot) snapshots_status[snapshot] = snapshot_file_data except Exception as e: dump_output_results([], self.container, "-", snapshot, False) raise e if not snapshots_status: raise SnapshotsException( "No snapshots for this container: %s, add and run again!..." % self.container) return snapshots_status
def run_file_validation_tests(test_file, container, filesystem=True, snapshot_status=None): # logger.info("*" * 50) logger.info("\tTEST: %s", test_file) dirpath = None test_json_data = json_from_file(test_file) if not test_json_data: logger.info("\t\tTest file %s looks to be empty, next!...", test_file) if test_json_data and "connector" in test_json_data and "remoteFile" in test_json_data and test_json_data["connector"] and test_json_data["remoteFile"]: dirpath, pull_response = pull_json_data(test_json_data) if not pull_response: return {} singletest = get_from_currentdata(SINGLETEST) if singletest: testsets = get_field_value_with_default(test_json_data, 'testSet', []) for testset in testsets: newtestcases = [] for testcase in testset['cases']: if ('testId' in testcase and testcase['testId'] == singletest) or \ ('masterTestId' in testcase and testcase['masterTestId'] == singletest): newtestcases.append(testcase) testset['cases'] = newtestcases resultset = run_json_validation_tests(test_json_data, container, filesystem, snapshot_status, dirpath=dirpath) finalresult = True if resultset: snapshot = test_json_data['snapshot'] if 'snapshot' in test_json_data else '' if singletest: print(json.dumps(resultset, indent=2)) else: dump_output_results(resultset, container, test_file, snapshot, filesystem) for result in resultset: if 'result' in result: if not re.match(r'passed', result['result'], re.I): finalresult = False break else: # TODO: NO test cases in this file. # LOG HERE that no test cases are present in this file. finalresult = False return finalresult
def run_container_validation_tests_database(container, snapshot_status=None): """ Get the test files from the database""" dirpath = None dbname = config_value(DATABASE, DBNAME) test_files_found = True mastertest_files_found = True # For test files collection = config_value(DATABASE, collectiontypes[TEST]) qry = {'container': container} sort = [sort_field('timestamp', False)] docs = get_documents(collection, dbname=dbname, sort=sort, query=qry) finalresult = True if docs and len(docs): logger.info('Number of test Documents: %s', len(docs)) for doc in docs: if doc['json']: try: snapshot = doc['json']['snapshot'] if 'snapshot' in doc['json'] else '' if "connector" in doc['json'] and "remoteFile" in doc['json'] and doc['json']["connector"] and doc['json']["remoteFile"]: dirpath, pull_response = pull_json_data(doc['json']) if not pull_response: return {} resultset = run_json_validation_tests(doc['json'], container, False, dirpath=dirpath) if resultset: test_file = doc['name'] if 'name' in doc else '' dump_output_results(resultset, container, test_file, snapshot, False) for result in resultset: if 'result' in result: if not re.match(r'passed', result['result'], re.I): finalresult = False break except Exception as e: dump_output_results([], container, "-", snapshot, False) raise e else: logger.info('No test Documents found!') test_files_found = False finalresult = False # For mastertest files collection = config_value(DATABASE, collectiontypes[MASTERTEST]) docs = get_documents(collection, dbname=dbname, sort=sort, query=qry) # snapshots_details_map = _get_snapshot_type_map(container) if docs and len(docs): logger.info('Number of mastertest Documents: %s', len(docs)) for doc in docs: test_json_data = doc['json'] if test_json_data: snapshot = doc['json']['snapshot'] if 'snapshot' in doc['json'] else '' test_file = doc['name'] if 'name' in doc else '-' try: if "connector" in test_json_data and "remoteFile" in test_json_data and test_json_data["connector"] and test_json_data["remoteFile"]: dirpath, pull_response = pull_json_data(test_json_data) if not pull_response: return {} snapshot_key = '%s_gen' % test_json_data['masterSnapshot'] mastersnapshots = defaultdict(list) snapshot_data = snapshot_status[snapshot_key] if snapshot_key in snapshot_status else {} for snapshot_id, mastersnapshot_id in snapshot_data.items(): if isinstance(mastersnapshot_id, list): for msnp_id in mastersnapshot_id: mastersnapshots[msnp_id].append(snapshot_id) else: mastersnapshots[mastersnapshot_id].append(snapshot_id) test_json_data['snapshot'] = snapshot_key testsets = get_field_value_with_default(test_json_data, 'testSet', []) for testset in testsets: testcases = get_field_value_with_default(testset, 'cases', []) testset['cases'] = _get_new_testcases(testcases, mastersnapshots) # print(json.dumps(test_json_data, indent=2)) resultset = run_json_validation_tests(test_json_data, container, False, snapshot_status, dirpath=dirpath) if resultset: dump_output_results(resultset, container, test_file, snapshot, False) for result in resultset: if 'result' in result: if not re.match(r'passed', result['result'], re.I): finalresult = False break except Exception as e: dump_output_results([], container, test_file, snapshot, False) raise e else: logger.info('No mastertest Documents found!') mastertest_files_found = False finalresult = False if not test_files_found and not mastertest_files_found: raise Exception("No complaince tests for this container: %s, add and run!", container) return finalresult
def run_container_validation_tests_filesystem(container, snapshot_status=None): """Get test files from the filesystem.""" # logger.info("Starting validation tests") logger.info("VALIDATION:") logger.info("\tCollection: %s, Type: FILESYSTEM", container) reporting_path = config_value('REPORTING', 'reportOutputFolder') json_dir = '%s/%s/%s' % (framework_dir(), reporting_path, container) logger.info('\tLOCATION: %s', json_dir) test_files = get_json_files(json_dir, JSONTEST) # logger.info('\n'.join(test_files)) result = True for test_file in test_files: logger.info('\tCOLLECTION: %s', test_file) val = run_file_validation_tests(test_file, container, True, snapshot_status) result = result and val if test_files: # return the result value if "test" file is processed collection logger.critical("VALIDATION COMPLETE:") return result # mastertest files test_files = get_json_files(json_dir, MASTERTEST) # logger.info('\n'.join(test_files)) if not test_files: logger.error("ERROR: No `test` or `mastertest` file found. collection should contain either `test` or `mastertest` file") return False finalresult = result for test_file in test_files: logger.info('\tCOLLECTION: %s', test_file) # logger.info("*" * 50) # logger.info("validator tests: %s", test_file) dirpath = None test_json_data = json_from_file(test_file) if not test_json_data: logger.info("Test file %s looks to be empty, next!...", test_file) continue if "connector" in test_json_data and "remoteFile" in test_json_data and test_json_data["connector"] and test_json_data["remoteFile"]: dirpath, pull_response = pull_json_data(test_json_data) if not pull_response: return {} snapshot_key = '%s_gen' % test_json_data['masterSnapshot'] mastersnapshots = defaultdict(list) snapshot_data = snapshot_status[snapshot_key] if snapshot_key in snapshot_status else {} for snapshot_id, mastersnapshot_id in snapshot_data.items(): if isinstance(mastersnapshot_id, list): for master_snapshot_id in mastersnapshot_id: mastersnapshots[master_snapshot_id].append(snapshot_id) elif isinstance(mastersnapshot_id, str): mastersnapshots[mastersnapshot_id].append(snapshot_id) if not mastersnapshots: logger.error("No generated snapshots found for validation.") continue test_json_data['snapshot'] = snapshot_key testsets = get_field_value_with_default(test_json_data, 'testSet', []) for testset in testsets: testcases = get_field_value_with_default(testset, 'cases', []) testset['cases'] = _get_new_testcases(testcases, mastersnapshots) # print(json.dumps(test_json_data, indent=2)) singletest = get_from_currentdata(SINGLETEST) if singletest: for testset in testsets: newtestcases = [] for testcase in testset['cases']: if ('testId' in testcase and testcase['testId'] == singletest) or \ ('masterTestId' in testcase and testcase['masterTestId'] == singletest): newtestcases.append(testcase) testset['cases'] = newtestcases resultset = run_json_validation_tests(test_json_data, container, True, snapshot_status, dirpath=dirpath) if test_json_data.get('testSet') and not resultset: logger.error('\tERROR: Testset does not contains any testcases or all testcases are skipped due to invalid rules.') elif resultset: snapshot = test_json_data['snapshot'] if 'snapshot' in test_json_data else '' if singletest: print(json.dumps(resultset, indent=2)) else: dump_output_results(resultset, container, test_file, snapshot, True) for result in resultset: if 'result' in result: if not re.match(r'passed', result['result'], re.I): finalresult = False break else: logger.error('\tERROR: No mastertest Documents found!') finalresult = False logger.critical("VALIDATION COMPLETE:") return finalresult
def run_container_validation_tests_database(container, snapshot_status=None): """ Get the test files from the database""" dbname = config_value(DATABASE, DBNAME) # For test files collection = config_value(DATABASE, collectiontypes[TEST]) qry = {'container': container} sort = [sort_field('timestamp', False)] docs = get_documents(collection, dbname=dbname, sort=sort, query=qry) finalresult = True if docs and len(docs): logger.info('Number of test Documents: %s', len(docs)) for doc in docs: if doc['json']: resultset = run_json_validation_tests(doc['json'], container, False) if resultset: snapshot = doc['json']['snapshot'] if 'snapshot' in doc[ 'json'] else '' test_file = doc['name'] if 'name' in doc else '' dump_output_results(resultset, container, test_file, snapshot, False) for result in resultset: if 'result' in result: if not re.match(r'passed', result['result'], re.I): finalresult = False break else: logger.info('No test Documents found!') finalresult = False # For mastertest files collection = config_value(DATABASE, collectiontypes[MASTERTEST]) docs = get_documents(collection, dbname=dbname, sort=sort, query=qry) # snapshots_details_map = _get_snapshot_type_map(container) if docs and len(docs): logger.info('Number of mastertest Documents: %s', len(docs)) for doc in docs: test_json_data = doc['json'] if test_json_data: snapshot_key = '%s_gen' % test_json_data['masterSnapshot'] mastersnapshots = defaultdict(list) snapshot_data = snapshot_status[ snapshot_key] if snapshot_key in snapshot_status else {} for snapshot_id, mastersnapshot_id in snapshot_data.items(): if isinstance(mastersnapshot_id, list): for msnp_id in mastersnapshot_id: mastersnapshots[msnp_id].append(snapshot_id) else: mastersnapshots[mastersnapshot_id].append(snapshot_id) test_json_data['snapshot'] = snapshot_key testsets = get_field_value_with_default( test_json_data, 'testSet', []) for testset in testsets: testcases = get_field_value_with_default( testset, 'cases', []) testset['cases'] = _get_new_testcases( testcases, mastersnapshots) # print(json.dumps(test_json_data, indent=2)) resultset = run_json_validation_tests(test_json_data, container, False, snapshot_status) if resultset: snapshot = doc['json']['snapshot'] if 'snapshot' in doc[ 'json'] else '' test_file = doc['name'] if 'name' in doc else '' dump_output_results(resultset, container, test_file, snapshot, False) for result in resultset: if 'result' in result: if not re.match(r'passed', result['result'], re.I): finalresult = False break else: logger.info('No mastertest Documents found!') finalresult = False return finalresult
def run_container_validation_tests_filesystem(container, snapshot_status=None): """Get test files from the filesystem.""" logger.info("Starting validation tests") reporting_path = config_value('REPORTING', 'reportOutputFolder') json_dir = '%s/%s/%s' % (framework_dir(), reporting_path, container) logger.info(json_dir) test_files = get_json_files(json_dir, JSONTEST) logger.info('\n'.join(test_files)) result = True for test_file in test_files: val = run_file_validation_tests(test_file, container, True, snapshot_status) result = result and val # mastertest files test_files = get_json_files(json_dir, MASTERTEST) logger.info('\n'.join(test_files)) finalresult = True for test_file in test_files: logger.info("*" * 50) logger.info("validator tests: %s", test_file) test_json_data = json_from_file(test_file) if not test_json_data: logger.info("Test file %s looks to be empty, next!...", test_file) continue snapshot_key = '%s_gen' % test_json_data['masterSnapshot'] mastersnapshots = defaultdict(list) snapshot_data = snapshot_status[ snapshot_key] if snapshot_key in snapshot_status else {} for snapshot_id, mastersnapshot_id in snapshot_data.items(): if isinstance(mastersnapshot_id, list): for master_snapshot_id in mastersnapshot_id: mastersnapshots[master_snapshot_id].append(snapshot_id) elif isinstance(mastersnapshot_id, str): mastersnapshots[mastersnapshot_id].append(snapshot_id) test_json_data['snapshot'] = snapshot_key testsets = get_field_value_with_default(test_json_data, 'testSet', []) for testset in testsets: testcases = get_field_value_with_default(testset, 'cases', []) testset['cases'] = _get_new_testcases(testcases, mastersnapshots) # print(json.dumps(test_json_data, indent=2)) singletest = get_from_currentdata(SINGLETEST) if singletest: for testset in testsets: newtestcases = [] for testcase in testset['cases']: if ('testId' in testcase and testcase['testId'] == singletest) or \ ('masterTestId' in testcase and testcase['masterTestId'] == singletest): newtestcases.append(testcase) testset['cases'] = newtestcases resultset = run_json_validation_tests(test_json_data, container, False, snapshot_status) if resultset: snapshot = test_json_data[ 'snapshot'] if 'snapshot' in test_json_data else '' if singletest: print(json.dumps(resultset, indent=2)) else: dump_output_results(resultset, container, test_file, snapshot, True) for result in resultset: if 'result' in result: if not re.match(r'passed', result['result'], re.I): finalresult = False break else: logger.info('No mastertest Documents found!') finalresult = False return finalresult