def get_call_kwargs(node):
    """Get argument names and their values in kwargs"""
    kwargs = {"params": {}}
    logger.info("Get node's kwargs")
    params_source = config_value('GOOGLE', 'params')
    paramsversions = None
    if json_source():
        dbname = config_value(DATABASE, DBNAME)
        collection = config_value(DATABASE, collectiontypes[STRUCTURE])
        parts = params_source.rsplit('/')
        name = parts[-1].split('.')
        qry = {'name': name[0]}
        sort = [sort_field('timestamp', False)]
        docs = get_documents(collection,
                             dbname=dbname,
                             sort=sort,
                             query=qry,
                             limit=1)
        logger.info('Number of Google Params versions: %s', len(docs))
        if docs and len(docs):
            paramsversions = docs[0]['json']
    else:
        paramsversions_file = '%s/%s' % (framework_dir(), params_source)
        logger.info(paramsversions_file)
        if exists_file(paramsversions_file):
            paramsversions = json_from_file(paramsversions_file)

    path = node['path']
    if paramsversions and "queryprameters" in paramsversions:
        if node['type'] in paramsversions["queryprameters"]:
            for param, parameter_type in paramsversions["queryprameters"][
                    node['type']].items():
                add_argument_parameter(path, kwargs, param, parameter_type)

    return kwargs
Exemplo n.º 2
0
def get_google_data(snapshot_source):
    """
    The Google source object to be fetched from database or the filesystem
    The initial configuration for database is 'validator' and collection
    is 'structures', whereas for the filesystem the path to fetch the
    'structures' is  $SOLUTIONDIR/realm/<structure>.json
    """
    sub_data = {}
    if json_source():
        dbname = config_value(DATABASE, DBNAME)
        collection = config_value(DATABASE, collectiontypes[STRUCTURE])
        parts = snapshot_source.split('.')
        qry = {'name': parts[0]}
        sort = [sort_field('timestamp', False)]
        docs = get_documents(collection, dbname=dbname, sort=sort, query=qry, limit=1)
        logger.info('Number of Google structure Documents: %d', len(docs))
        if docs and len(docs):
            sub_data = docs[0]['json']
    else:
        json_test_dir = get_test_json_dir()
        file_name = '%s.json' % snapshot_source if snapshot_source and not \
            snapshot_source.endswith('.json') else snapshot_source
        google_source = '%s/../%s' % (json_test_dir, file_name)
        logger.info('Google source: %s', google_source)
        if exists_file(google_source):
            sub_data = json_from_file(google_source)

    if not sub_data:
        logger.error("Google connector file %s does not exist, or it does not contains the valid JSON.", snapshot_source)
    return sub_data
Exemplo n.º 3
0
def mastersnapshots_used_in_mastertests_database(container):
    """
    Get mastersnapshot list used in mastertest files of a container from the database.
    The mastersnapshots list are read from database. The default configuration of database and
    snapshot collections is configured in config.ini file.
    """
    snapshots = []
    logger.info("Starting to get list of mastersnapshots from database")
    dbname = config_value(DATABASE, DBNAME)
    collection = config_value(DATABASE, collectiontypes[MASTERTEST])
    qry = {'container': container}
    sort = [sort_field('timestamp', False)]
    docs = get_documents(collection, dbname=dbname, sort=sort, query=qry)
    logger.info('Number of mastertest Documents: %s', len(docs))
    if docs and len(docs):
        for doc in docs:
            if doc['json']:
                snapshot = doc['json'][
                    'masterSnapshot'] if 'masterSnapshot' in doc['json'] else ''
                if snapshot:
                    if snapshot.endswith('.json'):
                        parts = snapshot.split('.')
                        snapshots.append(parts[0])
                    else:
                        snapshots.append(snapshot)
    return list(set(snapshots))
def get_version_for_type(node):
    """Url version of the resource."""
    version = None
    apiversions = None
    logger.info("Get type's version")
    api_source = config_value('AZURE', 'api')
    if json_source():
        dbname = config_value(DATABASE, DBNAME)
        collection = config_value(DATABASE, collectiontypes[STRUCTURE])
        parts = api_source.rsplit('/')
        name = parts[-1].split('.')
        qry = {'name': name[0]}
        sort = [sort_field('timestamp', False)]
        docs = get_documents(collection, dbname=dbname, sort=sort, query=qry, limit=1)
        logger.info('Number of Azure API versions: %s', len(docs))
        if docs and len(docs):
            apiversions = docs[0]['json']
    else:
        apiversions_file = '%s/%s' % (framework_dir(), api_source)
        logger.info(apiversions_file)
        if exists_file(apiversions_file):
            apiversions = json_from_file(apiversions_file)
    if apiversions:
        if node and 'type' in node and node['type'] in apiversions:
            version = apiversions[node['type']]['version']
    return version
Exemplo n.º 5
0
 def __init__(self, container, snapshot_refactored_fns):
     """"DB is true, will be usefule to make checks."""
     super().__init__(container)
     self.dbname = config_value(DATABASE, DBNAME)
     self.qry = {'container': container}
     self.sort = [sort_field('timestamp', False)]
     self.isDb = True
Exemplo n.º 6
0
def get_azure_data(snapshot_source):
    sub_data = {}
    if json_source():
        dbname = config_value(DATABASE, DBNAME)
        collection = config_value(DATABASE, collectiontypes[STRUCTURE])
        parts = snapshot_source.split('.')
        qry = {'name': parts[0]}
        sort = [sort_field('timestamp', False)]
        docs = get_documents(collection,
                             dbname=dbname,
                             sort=sort,
                             query=qry,
                             limit=1)
        logger.info('Number of Snapshot Documents: %s', len(docs))
        if docs and len(docs):
            sub_data = docs[0]['json']
    else:
        json_test_dir = get_test_json_dir()
        file_name = '%s.json' % snapshot_source if snapshot_source and not \
            snapshot_source.endswith('.json') else snapshot_source
        azure_source = '%s/../%s' % (json_test_dir, file_name)
        logger.info('Azure source: %s', azure_source)
        if exists_file(azure_source):
            sub_data = json_from_file(azure_source)
    return sub_data
def populate_container_snapshots_database(container):
    """
    Get the snapshot files from the container with storage system as database.
    The table or collection and database is configured in the config.ini, for the default
    location configuration is "validator" database with "snapshots" as its collections.
    """
    snapshots_status = {}
    dbname = config_value(DATABASE, DBNAME)
    collection = config_value(DATABASE, collectiontypes[SNAPSHOT])
    qry = {'container': container}
    sort = [sort_field('timestamp', False)]
    docs = get_documents(collection, dbname=dbname, sort=sort, query=qry, _id=True)
    if docs and len(docs):
        logger.info('Number of Snapshot Documents: %s', len(docs))
        snapshots = container_snapshots_database(container)
        populated = []
        for doc in docs:
            if doc['json']:
                snapshot = doc['name']
                if snapshot in snapshots and snapshot not in populated:
                    # Take the snapshot and populate whether it was successful or not.
                    # Then pass it back to the validation tests, so that tests for those
                    # snapshots that have been susccessfully fetched shall be executed.
                    snapshot_file_data = populate_snapshots_from_json(doc['json'], container)
                    update_one_document(doc, collection, dbname)
                    populated.append(snapshot)
                    snapshots_status[snapshot] = snapshot_file_data
    return snapshots_status
Exemplo n.º 8
0
def generate_crawler_run_output(container):
    """
    This creates a entry in the output collection, whenever a crawler runs
    to fetch data. 
    """
    timestamp = int(time.time() * 1000)
    sort = [sort_field('timestamp', False)]
    qry = {'container': container}
    output_collection = config_value(DATABASE, collectiontypes[OUTPUT])
    dbname = config_value(DATABASE, DBNAME)

    collection = config_value(DATABASE, collectiontypes[MASTERTEST])
    tests = get_documents(collection,
                          dbname=dbname,
                          sort=sort,
                          query=qry,
                          _id=True)
    master_tests = [{
        "id": str(test['_id']),
        "name": test['name']
    } for test in tests]

    mastersp_collection = config_value(DATABASE,
                                       collectiontypes[MASTERSNAPSHOT])
    snapshots = get_documents(mastersp_collection,
                              dbname=dbname,
                              sort=sort,
                              query=qry,
                              _id=True)
    master_snapshots = [{
        "id": str(snapshot['_id']),
        "name": snapshot['name']
    } for snapshot in snapshots]

    db_record = {
        "timestamp": timestamp,
        "checksum": hashlib.md5("{}".encode('utf-8')).hexdigest(),
        "collection": output_collection,
        "container": container,
        "name": "Crawlertest_%s" % (container),
        "type": "crawlerOutput",
        "json": {
            "container": container,
            "contentVersion": "",
            "fileType": "output",
            "snapshot": "",
            "test": "Crawlertest_%s" % (container),
            "log": get_dblog_handler().get_log_collection(),
            "timestamp": timestamp,
            "master_test_list": master_tests,
            "master_snapshot_list": master_snapshots,
            "output_type": "crawlerrun",
            "results": []
        }
    }
    insert_one_document(db_record, db_record['collection'], dbname, False)
Exemplo n.º 9
0
def get_snapshot_id_to_collection_dict(snapshot_file,
                                       container,
                                       dbname,
                                       filesystem=True):
    snapshot_data = {}
    snapshot_json_data = {}
    if filesystem:
        file_name = '%s.json' % snapshot_file if snapshot_file and not \
            snapshot_file.endswith('.json') else snapshot_file
        snapshot_file = '%s/%s/%s' % (get_test_json_dir(), container,
                                      file_name)
        snapshot_json_data = json_from_file(snapshot_file)
    else:
        parts = snapshot_file.split('.')
        collection = config_value(DATABASE, collectiontypes[SNAPSHOT])
        qry = {'container': container, 'name': parts[0]}
        sort = [sort_field('timestamp', False)]
        docs = get_documents(collection,
                             dbname=dbname,
                             sort=sort,
                             query=qry,
                             limit=1)
        logger.info('Number of Snapshot Documents: %s', len(docs))
        if docs and len(docs):
            snapshot_json_data = docs[0]['json']
    snapshots = get_field_value(snapshot_json_data, 'snapshots')
    if not snapshots:
        logger.info("Snapshot does not contain snapshots...")
        return snapshot_data
    for snapshot in snapshots:
        nodes = get_field_value(snapshot, 'nodes')
        if not nodes:
            logger.info("No nodes in snapshot, continuing to next!...")
            continue
        for node in nodes:
            sid = get_field_value(node, 'snapshotId')
            coll = node['collection'] if 'collection' in node else COLLECTION
            collection = coll.replace('.', '').lower()
            snapshot_data[sid] = collection
            if get_dbtests():
                create_indexes(collection, dbname,
                               [('timestamp', pymongo.TEXT)])
    return snapshot_data
def get_service_name(node_type):
    """
    Get service name for init compute function
    """
    service = None
    params_source = config_value('GOOGLE', 'params')
    paramsversions = None
    if json_source():
        dbname = config_value(DATABASE, DBNAME)
        collection = config_value(DATABASE, collectiontypes[STRUCTURE])
        parts = params_source.rsplit('/')
        name = parts[-1].split('.')
        qry = {'name': name[0]}
        sort = [sort_field('timestamp', False)]
        docs = get_documents(collection,
                             dbname=dbname,
                             sort=sort,
                             query=qry,
                             limit=1)
        logger.info('Number of Google Params versions: %s', len(docs))
        if docs and len(docs):
            paramsversions = docs[0]['json']
    else:
        paramsversions_file = '%s/%s' % (framework_dir(), params_source)
        logger.info(paramsversions_file)
        if exists_file(paramsversions_file):
            paramsversions = json_from_file(paramsversions_file)

    check_node_type = node_type
    node_type_list = node_type.split(".")
    if len(node_type_list) > 1:
        del node_type_list[-1]
        check_node_type = ".".join(node_type_list)

    if paramsversions and "serviceName" in paramsversions:
        for service_name, resource_list in paramsversions['serviceName'].items(
        ):
            if check_node_type in resource_list:
                service = service_name

    return service
Exemplo n.º 11
0
def get_kubernetes_structure_data(snapshot_source):
    """
    get_kubernetes_structure_data going to get structure data from connector 
    file which specified in snapshot as source field.
    Return structure data as json dictionary
    """
    kubernetes_structure_data = {}
    if json_source():
        qry = {'name': snapshot_source}
        dbname = config_value('MONGODB', 'dbname')
        sort = [sort_field('timestamp', False)]
        collection =config_value(DATABASE, collectiontypes[STRUCTURE])
        structure_docs = get_documents(collection=collection , dbname=dbname, sort= sort, query=qry, limit=1)
        logger.info('%s fetched %s number of documents: %s', Snapshot.LOGPREFIX, STRUCTURE, len(structure_docs))
        if structure_docs and len(structure_docs):
            kubernetes_structure_data = structure_docs[0]['json']
    else:
        kubernetes_structure_path = get_kubernetes_structure_path(snapshot_source)
        kubernetes_structure_data = json_from_file(kubernetes_structure_path)

    return kubernetes_structure_data
Exemplo n.º 12
0
def get_api_versions():
    """ get api versions dict """
    global apiversions
    if not apiversions:
        api_source = config_value('AZURE', 'api')
        if json_source():
            dbname = config_value(DATABASE, DBNAME)
            collection = config_value(DATABASE, collectiontypes[STRUCTURE])
            parts = api_source.rsplit('/')
            name = parts[-1].split('.')
            qry = {'name': name[0]}
            sort = [sort_field('timestamp', False)]
            docs = get_documents(collection, dbname=dbname, sort=sort, query=qry, limit=1)
            logger.info('Number of Azure API versions: %s', len(docs))
            if docs and len(docs):
                apiversions = docs[0]['json']
        else:
            apiversions_file = '%s/%s' % (framework_dir(), api_source)
            # logger.info(apiversions_file)
            if exists_file(apiversions_file):
                apiversions = json_from_file(apiversions_file)
    return apiversions
def get_call_kwargs_for_crawler(node, project_id):
    """Get argument names and their values in kwargs for Crawler"""
    kwargs = {}
    logger.info("Get node's kwargs")
    params_source = config_value('GOOGLE', 'params')
    paramsversions = None
    if json_source():
        dbname = config_value(DATABASE, DBNAME)
        collection = config_value(DATABASE, collectiontypes[STRUCTURE])
        parts = params_source.rsplit('/')
        name = parts[-1].split('.')
        qry = {'name': name[0]}
        sort = [sort_field('timestamp', False)]
        docs = get_documents(collection,
                             dbname=dbname,
                             sort=sort,
                             query=qry,
                             limit=1)
        logger.info('Number of Google Params versions: %s', len(docs))
        if docs and len(docs):
            paramsversions = docs[0]['json']
    else:
        paramsversions_file = '%s/%s' % (framework_dir(), params_source)
        logger.info(paramsversions_file)
        if exists_file(paramsversions_file):
            paramsversions = json_from_file(paramsversions_file)
    if paramsversions:
        if node and 'type' in node and "crawler_queryprameters" in paramsversions:
            for prameter in paramsversions["crawler_queryprameters"]:
                if node['type'] in prameter['services']:
                    for param in prameter['params']:
                        if param == "project":
                            kwargs['project'] = project_id
                        elif param == "projectId":
                            kwargs['projectId'] = project_id
                        elif param == "zone":
                            kwargs['zone'] = "-"

    return kwargs
Exemplo n.º 14
0
def get_custom_data(snapshot_source, tabs=2):
    sub_data = {}
    if json_source():
        container = get_from_currentdata('container')
        dbname = config_value(DATABASE, DBNAME)
        collection = config_value(DATABASE, collectiontypes[STRUCTURE])
        parts = snapshot_source.split('.')
        qry = {'name': parts[0], 'container' : container }
        sort = [sort_field('timestamp', False)]
        docs = get_documents(collection, dbname=dbname, sort=sort, query=qry, limit=1)
        logger.info('Number of Custom Documents: %d', len(docs))
        if docs and len(docs):
            sub_data = docs[0]['json']
    else:
        json_test_dir = get_test_json_dir()
        file_name = '%s.json' % snapshot_source if snapshot_source and not \
            snapshot_source.endswith('.json') else snapshot_source
        custom_source = '%s/../%s' % (json_test_dir, file_name)
        logger.info('\t\tCUSTOM CONNECTOR: %s ', custom_source)
        # logger.info('Custom source: %s', custom_source)
        if exists_file(custom_source):
            sub_data = json_from_file(custom_source)
    return sub_data
Exemplo n.º 15
0
def get_google_parameters():
    """
    Return the google parameter object read from database or the filesystem
    """
    global google_parameters
    if not google_parameters:
        params_source = config_value('GOOGLE', 'params')
        if json_source():
            dbname = config_value(DATABASE, DBNAME)
            collection = config_value(DATABASE, collectiontypes[STRUCTURE])
            parts = params_source.rsplit('/')
            name = parts[-1].split('.')
            qry = {'name': name[0]}
            sort = [sort_field('timestamp', False)]
            docs = get_documents(collection, dbname=dbname, sort=sort, query=qry, limit=1)
            logger.info('Number of Google Params versions: %s', len(docs))
            if docs and len(docs):
                google_parameters = docs[0]['json']
        else:
            params_file = '%s/%s' % (framework_dir(), params_source)
            logger.info(params_file)
            if exists_file(params_file):
                google_parameters = json_from_file(params_file)
    return google_parameters
Exemplo n.º 16
0
def run_container_validation_tests_database(container, snapshot_status=None):
    """ Get the test files from the database"""
    dbname = config_value(DATABASE, DBNAME)
    # For test files
    collection = config_value(DATABASE, collectiontypes[TEST])
    qry = {'container': container}
    sort = [sort_field('timestamp', False)]
    docs = get_documents(collection, dbname=dbname, sort=sort, query=qry)
    finalresult = True
    if docs and len(docs):
        logger.info('Number of test Documents: %s', len(docs))
        for doc in docs:
            if doc['json']:
                resultset = run_json_validation_tests(doc['json'], container,
                                                      False)
                if resultset:
                    snapshot = doc['json']['snapshot'] if 'snapshot' in doc[
                        'json'] else ''
                    test_file = doc['name'] if 'name' in doc else ''
                    dump_output_results(resultset, container, test_file,
                                        snapshot, False)
                    for result in resultset:
                        if 'result' in result:
                            if not re.match(r'passed', result['result'], re.I):
                                finalresult = False
                                break
    else:
        logger.info('No test Documents found!')
        finalresult = False
    # For mastertest files
    collection = config_value(DATABASE, collectiontypes[MASTERTEST])
    docs = get_documents(collection, dbname=dbname, sort=sort, query=qry)
    # snapshots_details_map = _get_snapshot_type_map(container)
    if docs and len(docs):
        logger.info('Number of mastertest Documents: %s', len(docs))
        for doc in docs:
            test_json_data = doc['json']
            if test_json_data:
                snapshot_key = '%s_gen' % test_json_data['masterSnapshot']
                mastersnapshots = defaultdict(list)
                snapshot_data = snapshot_status[
                    snapshot_key] if snapshot_key in snapshot_status else {}
                for snapshot_id, mastersnapshot_id in snapshot_data.items():
                    if isinstance(mastersnapshot_id, list):
                        for msnp_id in mastersnapshot_id:
                            mastersnapshots[msnp_id].append(snapshot_id)
                    else:
                        mastersnapshots[mastersnapshot_id].append(snapshot_id)
                test_json_data['snapshot'] = snapshot_key
                testsets = get_field_value_with_default(
                    test_json_data, 'testSet', [])
                for testset in testsets:
                    testcases = get_field_value_with_default(
                        testset, 'cases', [])
                    testset['cases'] = _get_new_testcases(
                        testcases, mastersnapshots)
                # print(json.dumps(test_json_data, indent=2))
                resultset = run_json_validation_tests(test_json_data,
                                                      container, False,
                                                      snapshot_status)
                if resultset:
                    snapshot = doc['json']['snapshot'] if 'snapshot' in doc[
                        'json'] else ''
                    test_file = doc['name'] if 'name' in doc else ''
                    dump_output_results(resultset, container, test_file,
                                        snapshot, False)
                    for result in resultset:
                        if 'result' in result:
                            if not re.match(r'passed', result['result'], re.I):
                                finalresult = False
                                break
    else:
        logger.info('No mastertest Documents found!')
        finalresult = False
    return finalresult
Exemplo n.º 17
0
def generate_container_mastersnapshots_database(container):
    """
    Get the mastersnapshot files from the container with storage system as database.
    The table or collection and database is configured in the config.ini, for the default
    location configuration is "validator" database with "mastersnapshots" as its collections.
    """
    snapshots_status = {}
    dbname = config_value(DATABASE, DBNAME)
    collection = config_value(DATABASE, collectiontypes[MASTERSNAPSHOT])
    snp_collection = config_value(DATABASE, collectiontypes[SNAPSHOT])
    qry = {'container': container}
    sort = [sort_field('timestamp', False)]
    docs = get_documents(collection, dbname=dbname, sort=sort, query=qry)
    try:
        if docs and len(docs):
            logger.info('Number of mastersnapshot Documents: %s', len(docs))
            snapshots = mastersnapshots_used_in_mastertests_database(container)
            populated = []
            for doc in docs:
                if doc['json']:
                    snapshot = doc['name']
                    if "connector" in doc['json'] and "remoteFile" in doc[
                            'json'] and doc['json']["connector"] and doc[
                                'json']["remoteFile"]:
                        _, pull_response = pull_json_data(doc['json'])
                        if not pull_response:
                            logger.info(
                                "Failed to populate master snapshot json from the git repository"
                            )
                            break

                    if snapshot in snapshots:
                        if snapshot not in populated:
                            snp_collection = config_value(
                                DATABASE, collectiontypes[SNAPSHOT])
                            snp_name = '%s_gen' % snapshot
                            snp_qry = {
                                'container': container,
                                'name': snp_name
                            }
                            snp_sort = [sort_field('timestamp', False)]
                            snp_docs = get_documents(snp_collection,
                                                     dbname=dbname,
                                                     sort=snp_sort,
                                                     query=snp_qry,
                                                     _id=True)
                            snp_json_data = {}
                            if snp_docs and len(snp_docs):
                                logger.info('Number of snapshot Documents: %s',
                                            len(snp_docs))
                                snp_json_data = snp_docs[0]
                            # Take the mastersnapshot and populate the mastersnapshot
                            snapshot_file_data = generate_mastersnapshots_from_json(
                                doc['json'], snp_json_data)
                            # Insert or update the new generated snapshot document with name='*_gen' and same container name.
                            generate_snapshot(doc['json'], snapshot_file_data)
                            if snp_json_data:
                                set_snapshot_activate_and_validate_data(
                                    doc['json'], snp_json_data['json'])
                                snp_json_data['json'] = doc['json']
                                snp_json_data["timestamp"] = int(time.time() *
                                                                 1000)
                                update_one_document(
                                    snp_json_data, snp_json_data['collection'],
                                    dbname)
                            else:
                                db_record = {
                                    "timestamp":
                                    int(time.time() * 1000),
                                    "container":
                                    container,
                                    "checksum":
                                    hashlib.md5(
                                        "{}".encode('utf-8')).hexdigest(),
                                    "type":
                                    "snapshot",
                                    "name":
                                    snp_name,
                                    "collection":
                                    "snapshots",
                                    "json":
                                    doc['json']
                                }
                                insert_one_document(db_record,
                                                    db_record['collection'],
                                                    dbname, False)
                            populated.append(snapshot)
                            snapshots_status[snapshot] = snapshot_file_data
                    else:
                        logger.error("No master testcase found for %s " %
                                     snapshot)
    except Exception as e:
        generate_crawler_run_output(container)
        raise e
    generate_crawler_run_output(container)
    return snapshots_status
Exemplo n.º 18
0
def run_container_validation_tests_database(container, snapshot_status=None):
    """ Get the test files from the database"""
    dirpath = None
    dbname = config_value(DATABASE, DBNAME)
    test_files_found = True
    mastertest_files_found = True
    # For test files
    collection = config_value(DATABASE, collectiontypes[TEST])
    qry = {'container': container}
    sort = [sort_field('timestamp', False)]
    docs = get_documents(collection, dbname=dbname, sort=sort, query=qry)
    finalresult = True
    if docs and len(docs):
        logger.info('Number of test Documents: %s', len(docs))
        for doc in docs:
            if doc['json']:
                try:
                    snapshot = doc['json']['snapshot'] if 'snapshot' in doc['json'] else ''
                    if "connector" in doc['json'] and "remoteFile" in doc['json'] and doc['json']["connector"] and doc['json']["remoteFile"]:
                        dirpath, pull_response = pull_json_data(doc['json'])
                        if not pull_response:
                            return {}
                    resultset = run_json_validation_tests(doc['json'], container, False, dirpath=dirpath)
                    if resultset:
                        test_file = doc['name'] if 'name' in doc else ''
                        dump_output_results(resultset, container, test_file, snapshot, False)
                        for result in resultset:
                            if 'result' in result:
                                if not re.match(r'passed', result['result'], re.I):
                                    finalresult = False
                                    break
                except Exception as e:
                    dump_output_results([], container, "-", snapshot, False)
                    raise e
    else:
        logger.info('No test Documents found!')
        test_files_found = False
        finalresult = False
    # For mastertest files
    collection = config_value(DATABASE, collectiontypes[MASTERTEST])
    docs = get_documents(collection, dbname=dbname, sort=sort, query=qry)
    # snapshots_details_map = _get_snapshot_type_map(container)
    if docs and len(docs):
        logger.info('Number of mastertest Documents: %s', len(docs))
        for doc in docs:
            test_json_data = doc['json']
            if test_json_data:
                snapshot = doc['json']['snapshot'] if 'snapshot' in doc['json'] else ''
                test_file = doc['name'] if 'name' in doc else '-'
                try:
                    if "connector" in test_json_data and "remoteFile" in test_json_data and test_json_data["connector"] and test_json_data["remoteFile"]:
                        dirpath, pull_response = pull_json_data(test_json_data)
                        if not pull_response:
                            return {}
                    snapshot_key = '%s_gen' % test_json_data['masterSnapshot']
                    mastersnapshots = defaultdict(list)
                    snapshot_data = snapshot_status[snapshot_key] if snapshot_key in snapshot_status else {}
                    for snapshot_id, mastersnapshot_id in snapshot_data.items():
                        if isinstance(mastersnapshot_id, list):
                            for msnp_id in mastersnapshot_id:
                                mastersnapshots[msnp_id].append(snapshot_id)    
                        else:
                            mastersnapshots[mastersnapshot_id].append(snapshot_id)
                    test_json_data['snapshot'] = snapshot_key
                    testsets = get_field_value_with_default(test_json_data, 'testSet', [])
                    for testset in testsets:
                        testcases = get_field_value_with_default(testset, 'cases', [])
                        testset['cases'] = _get_new_testcases(testcases, mastersnapshots)
                    # print(json.dumps(test_json_data, indent=2))
                    resultset = run_json_validation_tests(test_json_data, container, False, snapshot_status, dirpath=dirpath)
                    if resultset:
                        dump_output_results(resultset, container, test_file, snapshot, False)
                        for result in resultset:
                            if 'result' in result:
                                if not re.match(r'passed', result['result'], re.I):
                                    finalresult = False
                                    break
                except Exception as e:
                    dump_output_results([], container, test_file, snapshot, False)
                    raise e
    else:
        logger.info('No mastertest Documents found!')
        mastertest_files_found = False
        finalresult = False
    if not test_files_found and not mastertest_files_found:
        raise Exception("No complaince tests for this container: %s, add and run!", container)
    return finalresult