示例#1
0
def get_kubernetes_structure_path(snapshot_source):
    """
    get_kubernetes_structure_path will get kubernetes connector file path
    from configuration file.
    """
    folder = config_value('KUBERNETES','kubernetesStructureFolder')
    if folder:
        connector_path = '%s/%s/%s.json' % (framework_dir(), folder, snapshot_source)
    else:
        connector_path = '%s/%s.json' % (framework_dir(), snapshot_source)
    return connector_path
示例#2
0
def test_framework_dir():
    os.chdir(TESTSDIR)
    tests_curdir = os.getcwd()
    fw_dir = framework_dir()
    os.chdir(fw_dir)
    prod_curdir = os.getcwd()
    assert tests_curdir == prod_curdir
示例#3
0
 def __init__(self, container, snapshot_refactored_fns, singleTest=None):
     """ Default isDb is false, singletest shall be set to the test that needs to be run."""
     super().__init__(container, snapshot_refactored_fns)
     self.singleTest = singleTest
     reporting_path = config_value('REPORTING', 'reportOutputFolder')
     self.container_dir = '%s/%s/%s' % (framework_dir(), reporting_path,
                                        container)
示例#4
0
def get_logdir(fw_cfg):
    log_writeable = True
    if not fw_cfg:
        cfgini = framework_config()
        fw_cfg = get_config_data(cfgini)
    logdir = '%s' % framework_dir()
    if fw_cfg and 'LOGGING' in fw_cfg:
        fwconf = fw_cfg['LOGGING']
        if 'logFolder' in fwconf and fwconf['logFolder'] and os.path.isdir(logdir):
            logdir = '%s/%s' % (logdir, fwconf['logFolder'])
            try:
                if not os.path.exists(logdir):
                    os.makedirs(logdir)
            except:
                log_writeable = False
    try:
        if log_writeable:
            from pathlib import Path
            testfile = '%s/%d' % (logdir, int(time.time()))
            Path(testfile).touch()
            if os.path.exists(testfile):
                os.remove(testfile)
            else:
                log_writeable = False
    except:
        log_writeable = False
    return log_writeable, logdir
示例#5
0
def ini_logging_config(fwconfigfile):
    """logging config"""
    from processor.helper.config.config_utils import framework_config, get_config_data, framework_dir, get_base_log_dir
    if not fwconfigfile:
        fwconfigfile = framework_config()
    fw_cfg = get_config_data(fwconfigfile)
    log_config = {
        "level": logging.INFO,
        "propagate": True,
        "size": 10,
        "backups": 10,
        "db": None,
        'logpath': None
    }
    if fw_cfg and 'LOGGING' in fw_cfg:
        base_log_dir = get_base_log_dir()
        if base_log_dir is None:
            base_log_dir = framework_dir()
        logwriteable, logpath = get_logdir(fw_cfg, base_log_dir)
        if logwriteable and logpath:
            log_config['logpath'] = logpath
        fwconf = fw_cfg['LOGGING']
        log_config['level'] = get_loglevel(fwconf)
        log_config['size'] = fwconf.getint('size') if 'size' in fwconf else 10
        log_config['backups'] = fwconf.getint('backups') if 'backups' in fwconf else 10
        log_config['propagate'] = fwconf.getboolean('propagate') if 'propagate' in fwconf else True
        log_config['db'] = fwconf['dbname'] if 'dbname' in fwconf else None
    return log_config
def get_call_kwargs(node):
    """Get argument names and their values in kwargs"""
    kwargs = {"params": {}}
    logger.info("Get node's kwargs")
    params_source = config_value('GOOGLE', 'params')
    paramsversions = None
    if json_source():
        dbname = config_value(DATABASE, DBNAME)
        collection = config_value(DATABASE, collectiontypes[STRUCTURE])
        parts = params_source.rsplit('/')
        name = parts[-1].split('.')
        qry = {'name': name[0]}
        sort = [sort_field('timestamp', False)]
        docs = get_documents(collection,
                             dbname=dbname,
                             sort=sort,
                             query=qry,
                             limit=1)
        logger.info('Number of Google Params versions: %s', len(docs))
        if docs and len(docs):
            paramsversions = docs[0]['json']
    else:
        paramsversions_file = '%s/%s' % (framework_dir(), params_source)
        logger.info(paramsversions_file)
        if exists_file(paramsversions_file):
            paramsversions = json_from_file(paramsversions_file)

    path = node['path']
    if paramsversions and "queryprameters" in paramsversions:
        if node['type'] in paramsversions["queryprameters"]:
            for param, parameter_type in paramsversions["queryprameters"][
                    node['type']].items():
                add_argument_parameter(path, kwargs, param, parameter_type)

    return kwargs
def get_version_for_type(node):
    """Url version of the resource."""
    version = None
    apiversions = None
    logger.info("Get type's version")
    api_source = config_value('AZURE', 'api')
    if json_source():
        dbname = config_value(DATABASE, DBNAME)
        collection = config_value(DATABASE, collectiontypes[STRUCTURE])
        parts = api_source.rsplit('/')
        name = parts[-1].split('.')
        qry = {'name': name[0]}
        sort = [sort_field('timestamp', False)]
        docs = get_documents(collection, dbname=dbname, sort=sort, query=qry, limit=1)
        logger.info('Number of Azure API versions: %s', len(docs))
        if docs and len(docs):
            apiversions = docs[0]['json']
    else:
        apiversions_file = '%s/%s' % (framework_dir(), api_source)
        logger.info(apiversions_file)
        if exists_file(apiversions_file):
            apiversions = json_from_file(apiversions_file)
    if apiversions:
        if node and 'type' in node and node['type'] in apiversions:
            version = apiversions[node['type']]['version']
    return version
示例#8
0
def get_node_version(node, snapshot):
    """Url version of the resource."""
    version = None
    apiversions = None
    logger.info("Get type's version")
    api_source = config_value('AZURE', 'api')
    if snapshot.isDb:
        parts = api_source.rsplit('/')
        name = parts[-1].split('.')
        qry = {'name': name[0]}
        docs = get_documents(snapshot.collection(STRUCTURE),
                             dbname=snapshot.dbname,
                             sort=snapshot.sort,
                             query=qry,
                             limit=1)
        logger.info('Number of Azure API versions: %s', len(docs))
        if docs and len(docs):
            apiversions = docs[0]['json']
    else:
        apiversions_file = '%s/%s' % (framework_dir(), api_source)
        logger.info(apiversions_file)
        if exists_file(apiversions_file):
            apiversions = json_from_file(apiversions_file)
    if apiversions:
        if node and 'type' in node and node['type'] in apiversions:
            version = apiversions[node['type']]['version']
    return version
示例#9
0
def mastersnapshots_used_in_mastertests_filesystem(container):
    """
    Get mastersnapshot list used in all mastertest files of a container from the filesystem.
    This gets list of all the mastersnapshots used in the container.
    The list will be used to make sure the snapshots are not generated multiple times, if the same
    mastersnapshots are used in different mastertest files of a container.
    The configuration of the default path is configured in config.ini.
    """
    snapshots = []
    # logger.info("Starting to get list of mastersnapshots used in test files.")
    reporting_path = config_value('REPORTING', 'reportOutputFolder')
    json_dir = '%s/%s/%s' % (framework_dir(), reporting_path, container)
    # logger.info(json_dir)
    # Only get list of mastertest files.
    test_files = get_json_files(json_dir, MASTERTEST)
    # logger.info('\n'.join(test_files))
    for test_file in test_files:
        logger.info('\tMASTERTEST:%s', test_file)
        test_json_data = json_from_file(test_file)
        if test_json_data:
            snapshot = test_json_data[
                'masterSnapshot'] if 'masterSnapshot' in test_json_data else ''
            if snapshot:
                file_name = snapshot if snapshot.endswith(
                    '.json') else '%s.json' % snapshot
                snapshots.append(file_name)
    return list(
        set(snapshots))  # set so that unique list of files are returned.
def container_snapshots_filesystem(container):
    """
    Get snapshot and mastersnapshot list used in all test/mastertest files of a container from the filesystem.
    This gets list of all the snapshots/mastersnapshots used in the container.
    The list will be used to not populate the snapshots/mastersnapshots multiple times, if the same
    snapshots/mastersnapshots are used in different test/mastertest files of a container.
    The configuration of the default path is configured in config.ini.
    """
    snapshots = []
    logger.info("Starting to get list of snapshots")
    reporting_path = config_value('REPORTING', 'reportOutputFolder')
    json_dir = '%s/%s/%s' % (framework_dir(), reporting_path, container)
    logger.info(json_dir)
    singletest = get_from_currentdata(SINGLETEST)
    test_files = get_json_files(json_dir, JSONTEST)
    logger.info('\n'.join(test_files))
    for test_file in test_files:
        test_json_data = json_from_file(test_file)
        if test_json_data:
            snapshot = test_json_data['snapshot'] if 'snapshot' in test_json_data else ''
            if snapshot:
                file_name = snapshot if snapshot.endswith('.json') else '%s.json' % snapshot
                if singletest:
                    testsets = get_field_value_with_default(test_json_data, 'testSet', [])
                    for testset in testsets:
                        for testcase in testset['cases']:
                            if ('testId' in testcase and testcase['testId'] == singletest) or \
                                    ('masterTestId' in testcase and testcase['masterTestId'] == singletest):
                                if file_name not in snapshots:
                                    snapshots.append(file_name)
                else:
                    snapshots.append(file_name)

    test_files = get_json_files(json_dir, MASTERTEST)
    logger.info('\n'.join(test_files))
    for test_file in test_files:
        test_json_data = json_from_file(test_file)
        if test_json_data:
            snapshot = test_json_data['masterSnapshot'] if 'masterSnapshot' in test_json_data else ''
            if snapshot:
                file_name = snapshot if snapshot.endswith('.json') else '%s.json' % snapshot
                parts = file_name.split('.')
                file_name = '%s_gen.%s' % (parts[0], parts[-1])
                if singletest:
                    testsets = get_field_value_with_default(test_json_data, 'testSet', [])
                    for testset in testsets:
                        for testcase in testset['cases']:
                            if ('testId' in testcase and testcase['testId'] == singletest) or \
                                    ('masterTestId' in testcase and testcase['masterTestId'] == singletest):
                                if file_name not in snapshots:
                                    snapshots.append(file_name)
                else:
                    snapshots.append(file_name)
    return list(set(snapshots))
示例#11
0
def container_snapshots_filesystem(container):
    """Get snapshot list used in test files from the filesystem."""
    snapshots = []
    logger.info("Starting to get list of snapshots")
    reporting_path = config_value('REPORTING', 'reportOutputFolder')
    json_dir = '%s/%s/%s' % (framework_dir(), reporting_path, container)
    logger.info(json_dir)
    test_files = get_json_files(json_dir, JSONTEST)
    logger.info('\n'.join(test_files))
    for test_file in test_files:
        test_json_data = json_from_file(test_file)
        if test_json_data:
            snapshot = test_json_data['snapshot'] if 'snapshot' in test_json_data else ''
            if snapshot:
                snapshots.append(snapshot)
    return snapshots
def get_service_name(node_type):
    """
    Get service name for init compute function
    """
    service = None
    params_source = config_value('GOOGLE', 'params')
    paramsversions = None
    if json_source():
        dbname = config_value(DATABASE, DBNAME)
        collection = config_value(DATABASE, collectiontypes[STRUCTURE])
        parts = params_source.rsplit('/')
        name = parts[-1].split('.')
        qry = {'name': name[0]}
        sort = [sort_field('timestamp', False)]
        docs = get_documents(collection,
                             dbname=dbname,
                             sort=sort,
                             query=qry,
                             limit=1)
        logger.info('Number of Google Params versions: %s', len(docs))
        if docs and len(docs):
            paramsversions = docs[0]['json']
    else:
        paramsversions_file = '%s/%s' % (framework_dir(), params_source)
        logger.info(paramsversions_file)
        if exists_file(paramsversions_file):
            paramsversions = json_from_file(paramsversions_file)

    check_node_type = node_type
    node_type_list = node_type.split(".")
    if len(node_type_list) > 1:
        del node_type_list[-1]
        check_node_type = ".".join(node_type_list)

    if paramsversions and "serviceName" in paramsversions:
        for service_name, resource_list in paramsversions['serviceName'].items(
        ):
            if check_node_type in resource_list:
                service = service_name

    return service
示例#13
0
def get_api_versions():
    """ get api versions dict """
    global apiversions
    if not apiversions:
        api_source = config_value('AZURE', 'api')
        if json_source():
            dbname = config_value(DATABASE, DBNAME)
            collection = config_value(DATABASE, collectiontypes[STRUCTURE])
            parts = api_source.rsplit('/')
            name = parts[-1].split('.')
            qry = {'name': name[0]}
            sort = [sort_field('timestamp', False)]
            docs = get_documents(collection, dbname=dbname, sort=sort, query=qry, limit=1)
            logger.info('Number of Azure API versions: %s', len(docs))
            if docs and len(docs):
                apiversions = docs[0]['json']
        else:
            apiversions_file = '%s/%s' % (framework_dir(), api_source)
            # logger.info(apiversions_file)
            if exists_file(apiversions_file):
                apiversions = json_from_file(apiversions_file)
    return apiversions
def get_call_kwargs_for_crawler(node, project_id):
    """Get argument names and their values in kwargs for Crawler"""
    kwargs = {}
    logger.info("Get node's kwargs")
    params_source = config_value('GOOGLE', 'params')
    paramsversions = None
    if json_source():
        dbname = config_value(DATABASE, DBNAME)
        collection = config_value(DATABASE, collectiontypes[STRUCTURE])
        parts = params_source.rsplit('/')
        name = parts[-1].split('.')
        qry = {'name': name[0]}
        sort = [sort_field('timestamp', False)]
        docs = get_documents(collection,
                             dbname=dbname,
                             sort=sort,
                             query=qry,
                             limit=1)
        logger.info('Number of Google Params versions: %s', len(docs))
        if docs and len(docs):
            paramsversions = docs[0]['json']
    else:
        paramsversions_file = '%s/%s' % (framework_dir(), params_source)
        logger.info(paramsversions_file)
        if exists_file(paramsversions_file):
            paramsversions = json_from_file(paramsversions_file)
    if paramsversions:
        if node and 'type' in node and "crawler_queryprameters" in paramsversions:
            for prameter in paramsversions["crawler_queryprameters"]:
                if node['type'] in prameter['services']:
                    for param in prameter['params']:
                        if param == "project":
                            kwargs['project'] = project_id
                        elif param == "projectId":
                            kwargs['projectId'] = project_id
                        elif param == "zone":
                            kwargs['zone'] = "-"

    return kwargs
示例#15
0
def get_google_parameters():
    """
    Return the google parameter object read from database or the filesystem
    """
    global google_parameters
    if not google_parameters:
        params_source = config_value('GOOGLE', 'params')
        if json_source():
            dbname = config_value(DATABASE, DBNAME)
            collection = config_value(DATABASE, collectiontypes[STRUCTURE])
            parts = params_source.rsplit('/')
            name = parts[-1].split('.')
            qry = {'name': name[0]}
            sort = [sort_field('timestamp', False)]
            docs = get_documents(collection, dbname=dbname, sort=sort, query=qry, limit=1)
            logger.info('Number of Google Params versions: %s', len(docs))
            if docs and len(docs):
                google_parameters = docs[0]['json']
        else:
            params_file = '%s/%s' % (framework_dir(), params_source)
            logger.info(params_file)
            if exists_file(params_file):
                google_parameters = json_from_file(params_file)
    return google_parameters
示例#16
0
def run_container_validation_tests_filesystem(container, snapshot_status=None):
    """Get test files from the filesystem."""
    # logger.info("Starting validation tests")
    logger.info("VALIDATION:")
    logger.info("\tCollection: %s,  Type: FILESYSTEM", container)
    reporting_path = config_value('REPORTING', 'reportOutputFolder')
    json_dir = '%s/%s/%s' % (framework_dir(), reporting_path, container)
    logger.info('\tLOCATION: %s', json_dir)
    test_files = get_json_files(json_dir, JSONTEST)
    # logger.info('\n'.join(test_files))
    result = True
    for test_file in test_files:
        logger.info('\tCOLLECTION: %s', test_file)
        val = run_file_validation_tests(test_file, container, True, snapshot_status)
        result = result and val
    if test_files:
        # return the result value if "test" file is processed collection
        logger.critical("VALIDATION COMPLETE:")
        return result

    # mastertest files
    test_files = get_json_files(json_dir, MASTERTEST)
    # logger.info('\n'.join(test_files))
    if not test_files:
        logger.error("ERROR: No `test` or `mastertest` file found. collection should contain either `test` or `mastertest` file")
        return False

    finalresult = result
    for test_file in test_files:
        logger.info('\tCOLLECTION: %s', test_file)
        # logger.info("*" * 50)
        # logger.info("validator tests: %s", test_file)
        dirpath = None
        test_json_data = json_from_file(test_file)
        if not test_json_data:
            logger.info("Test file %s looks to be empty, next!...", test_file)
            continue

        if "connector" in test_json_data and "remoteFile" in test_json_data and test_json_data["connector"] and test_json_data["remoteFile"]:
            dirpath, pull_response = pull_json_data(test_json_data)
            if not pull_response:
                return {}

        snapshot_key = '%s_gen' % test_json_data['masterSnapshot']
        mastersnapshots = defaultdict(list)
        snapshot_data = snapshot_status[snapshot_key] if snapshot_key in snapshot_status else {}
        for snapshot_id, mastersnapshot_id in snapshot_data.items():
            if isinstance(mastersnapshot_id, list):
                for master_snapshot_id in mastersnapshot_id:
                    mastersnapshots[master_snapshot_id].append(snapshot_id)
            elif isinstance(mastersnapshot_id, str):
                mastersnapshots[mastersnapshot_id].append(snapshot_id)
        if not mastersnapshots:
            logger.error("No generated snapshots found for validation.")
            continue
        test_json_data['snapshot'] = snapshot_key
        testsets = get_field_value_with_default(test_json_data, 'testSet', [])
        for testset in testsets:
            testcases = get_field_value_with_default(testset, 'cases', [])
            testset['cases'] = _get_new_testcases(testcases, mastersnapshots)
        # print(json.dumps(test_json_data, indent=2))
        singletest = get_from_currentdata(SINGLETEST)
        if singletest:
            for testset in testsets:
                newtestcases = []
                for testcase in testset['cases']:
                    if ('testId' in testcase and  testcase['testId'] == singletest) or \
                            ('masterTestId' in testcase and testcase['masterTestId'] == singletest):
                        newtestcases.append(testcase)
                testset['cases'] = newtestcases
        resultset = run_json_validation_tests(test_json_data, container, True, snapshot_status, dirpath=dirpath)
        if test_json_data.get('testSet') and not resultset:
            logger.error('\tERROR: Testset does not contains any testcases or all testcases are skipped due to invalid rules.')
        elif resultset:
            snapshot = test_json_data['snapshot'] if 'snapshot' in test_json_data else ''
            if singletest:
                print(json.dumps(resultset, indent=2))
            else:
                dump_output_results(resultset, container, test_file, snapshot, True)
            for result in resultset:
                if 'result' in result:
                    if not re.match(r'passed', result['result'], re.I):
                        finalresult = False
                        break
        else:
            logger.error('\tERROR: No mastertest Documents found!')
            finalresult = False
    logger.critical("VALIDATION COMPLETE:")
    return finalresult
def get_dir_path(folder_path):
    """ Path to get the deployment and parameter JSON files """
    fw_dir = framework_dir()
    deployment_dir_path = '%s/%s' % (fw_dir, folder_path)
    return deployment_dir_path.replace('//', '/')
示例#18
0
def test_get_solution_dir():
    os.chdir(TESTSDIR)
    tests_curdir = os.getcwd()
    os.chdir(framework_dir())
    prod_curdir = os.getcwd()
    assert tests_curdir == prod_curdir
示例#19
0
def validator_main(arg_vals=None, delete_rundata=True):
    """
    Main driver utility for running validator tests
    The arg_vals, if passed should be array of string. A set of examples are:
      1) arg_vals = ['container1'] - Use container1 to process test files from filesystem
      2) args_vals = ['container1', '--db'] - Use container1 to process test documents from database.
    When arg_vals is None, the argparse library parses from sys.argv array list.
    The successful argument parsing initializes the system for the run.
    On exit will run cleanup. The return values of this main entry function are as:
       0 - Success, tests executed.
       1 - Failure, Tests execution error.
       2 - Exception, missing config.ini, Mongo connection failure or http connection exception,
           the tests execution could not be started or completed.
    """
    cmd_parser = argparse.ArgumentParser(
        "prancer",
        formatter_class=argparse.RawDescriptionHelpFormatter,
        epilog='''\
Example: prancer collection1
Runs the prancer framework based on the configuration files available in collection1 folder
                                         ''')
    cmd_parser.add_argument('-v',
                            '--version',
                            action='version',
                            version=("Prancer %s" % __version__),
                            help='Show prancer version')
    cmd_parser.add_argument(
        'container',
        metavar='collection',
        action='store',
        help=
        'The name of the folder which contains the collection of files related to one scenario'
    )
    cmd_parser.add_argument(
        '--db',
        action='store',
        default=None,
        choices=['NONE', 'SNAPSHOT', 'FULL'],
        help=
        '''NONE - Database will not be used, all the files reside on file system,
                            SNAPSHOT - Resource snapshots will be stored in db, everything else will be on file system,
                            FULL - tests, configurations, outputs and snapshots will be stored in the database'''
    )
    cmd_parser.add_argument('--crawler',
                            action='store_true',
                            default=False,
                            help='Crawls and generates snapshot files only')
    cmd_parser.add_argument('--test',
                            action='store',
                            default=None,
                            help='Run a single test in NODB mode')
    cmd_parser.add_argument('--customer',
                            action='store',
                            default=None,
                            help='customer name for config')
    cmd_parser.add_argument(
        '--connector',
        action='store',
        default=None,
        help=
        'specify the name of the connector which you want to run from the collection'
    )
    cmd_parser.add_argument(
        '--branch',
        action='store',
        default=None,
        help=
        'specify the name of the branch to populate snapshots, for the filesystem connector'
    )
    args = cmd_parser.parse_args(arg_vals)

    retval = 2
    set_customer()
    cfg_error, config_ini = search_config_ini()
    if cfg_error:
        return retval

    if args.customer:
        set_customer(args.customer)
    if args.db:
        if args.db.upper() in DBVALUES:
            args.db = DBVALUES.index(args.db.upper())
        else:
            args.db = DBVALUES.index(SNAPSHOT)
    else:
        nodb = config_value(TESTS, DBTESTS)
        if nodb and nodb.upper() in DBVALUES:
            args.db = DBVALUES.index(nodb.upper())
        else:
            args.db = DBVALUES.index(SNAPSHOT)

    if args.test:
        args.db = DBVALUES.index(NONE)

    # Check if we want to run in NO DATABASE MODE
    if args.db:
        # returns the db connection handle and status, handle is ignored.
        from processor.database.database import init_db, TIMEOUT
        _, db_init_res = init_db()
        if not db_init_res:
            msg = "Mongo DB connection timed out after %d ms, check the mongo server, exiting!....." % TIMEOUT
            console_log(msg, currentframe())
            return retval

    # Check the log directory and also check if it is writeable.
    from processor.logging.log_handler import init_logger, get_logdir, default_logging, add_file_logging
    fw_cfg = get_config_data(framework_config())
    log_writeable, logdir = get_logdir(fw_cfg, framework_dir())
    if not log_writeable:
        console_log(
            'Logging directory(%s) is not writeable, exiting....' % logdir,
            currentframe())
        return retval

    # Alls well from this point, check container exists in the directory configured
    retval = 0
    logger = init_logger(args.db, framework_config())
    # logger = add_file_logging(config_ini)
    logger.info("START: Argument parsing and Run Initialization. Version %s",
                __version__)

    from processor.connector.snapshot import populate_container_snapshots
    from processor.connector.validation import run_container_validation_tests
    from processor.crawler.master_snapshot import generate_container_mastersnapshots
    try:
        from processor_enterprise.notifications.notification import check_send_notification
    except:
        check_send_notification = lambda container, db: None

    logger.info("Command: '%s %s'",
                sys.executable.rsplit('/', 1)[-1], ' '.join(sys.argv))
    try:
        from processor.helper.config.rundata_utils import init_currentdata, \
            delete_currentdata, put_in_currentdata
        # Delete the rundata at the end of the script as per caller, default is True.
        if delete_rundata:
            atexit.register(delete_currentdata)
        init_currentdata()

        logger.info("Using Framework dir: %s", framework_dir())
        logger.info("Args: %s", args)
        logger.debug("Running tests from %s.", DBVALUES[args.db])
        fs = True if args.db > DBVALUES.index(SNAPSHOT) else False
        put_in_currentdata('jsonsource', fs)
        put_in_currentdata(DBTESTS, args.db)
        put_in_currentdata('container', args.container)
        # if args.db == DBVALUES.index(FULL):
        #     from processor.logging.log_handler import get_dblogger
        #     log_name = get_dblogger()
        #     if log_name:
        #         pid = open('/tmp/pid_%s' % os.getpid(), 'w')
        #         pid.write(log_name)
        #         pid.close()
        if args.customer:
            put_in_currentdata(CUSTOMER, args.customer)
        if args.test:
            put_in_currentdata(SINGLETEST, args.test)
        else:
            put_in_currentdata(SINGLETEST, False)
        if args.connector:
            put_in_currentdata("connector", args.connector)
        if args.branch:
            put_in_currentdata("branch", args.branch)
        if not args.db:
            retval = 0 if container_exists(args.container) else 2
            if retval:
                logger.critical(
                    "Container(%s) is not present in Framework dir: %s",
                    args.container,
                    framework_dir(),
                    extra={"type": "critical"})
                # TODO: Log the path the framework looked for.
                return retval
        if args.crawler:
            # Generate snapshot files from here.
            generate_container_mastersnapshots(args.container, fs)
        else:
            # Normal flow
            snapshot_status = populate_container_snapshots(args.container, fs)
            logger.debug(json.dumps(snapshot_status, indent=2))
            if snapshot_status:
                status = run_container_validation_tests(
                    args.container, fs, snapshot_status)
                retval = 0 if status else 1
            else:
                retval = 1
            check_send_notification(args.container, args.db)
    except (Exception, KeyboardInterrupt) as ex:
        logger.error("Execution exception: %s", ex)
        print(traceback.format_exc())
        retval = 2
    return retval
示例#20
0
def run_container_validation_tests_filesystem(container, snapshot_status=None):
    """Get test files from the filesystem."""
    logger.info("Starting validation tests")
    reporting_path = config_value('REPORTING', 'reportOutputFolder')
    json_dir = '%s/%s/%s' % (framework_dir(), reporting_path, container)
    logger.info(json_dir)
    test_files = get_json_files(json_dir, JSONTEST)
    logger.info('\n'.join(test_files))
    result = True
    for test_file in test_files:
        val = run_file_validation_tests(test_file, container, True,
                                        snapshot_status)
        result = result and val
    # mastertest files
    test_files = get_json_files(json_dir, MASTERTEST)
    logger.info('\n'.join(test_files))
    finalresult = True
    for test_file in test_files:
        logger.info("*" * 50)
        logger.info("validator tests: %s", test_file)
        test_json_data = json_from_file(test_file)
        if not test_json_data:
            logger.info("Test file %s looks to be empty, next!...", test_file)
            continue
        snapshot_key = '%s_gen' % test_json_data['masterSnapshot']
        mastersnapshots = defaultdict(list)
        snapshot_data = snapshot_status[
            snapshot_key] if snapshot_key in snapshot_status else {}
        for snapshot_id, mastersnapshot_id in snapshot_data.items():
            if isinstance(mastersnapshot_id, list):
                for master_snapshot_id in mastersnapshot_id:
                    mastersnapshots[master_snapshot_id].append(snapshot_id)
            elif isinstance(mastersnapshot_id, str):
                mastersnapshots[mastersnapshot_id].append(snapshot_id)
        test_json_data['snapshot'] = snapshot_key
        testsets = get_field_value_with_default(test_json_data, 'testSet', [])
        for testset in testsets:
            testcases = get_field_value_with_default(testset, 'cases', [])
            testset['cases'] = _get_new_testcases(testcases, mastersnapshots)
        # print(json.dumps(test_json_data, indent=2))
        singletest = get_from_currentdata(SINGLETEST)
        if singletest:
            for testset in testsets:
                newtestcases = []
                for testcase in testset['cases']:
                    if ('testId' in testcase and  testcase['testId'] == singletest) or \
                            ('masterTestId' in testcase and testcase['masterTestId'] == singletest):
                        newtestcases.append(testcase)
                testset['cases'] = newtestcases
        resultset = run_json_validation_tests(test_json_data, container, False,
                                              snapshot_status)
        if resultset:
            snapshot = test_json_data[
                'snapshot'] if 'snapshot' in test_json_data else ''
            if singletest:
                print(json.dumps(resultset, indent=2))
            else:
                dump_output_results(resultset, container, test_file, snapshot,
                                    True)
            for result in resultset:
                if 'result' in result:
                    if not re.match(r'passed', result['result'], re.I):
                        finalresult = False
                        break
        else:
            logger.info('No mastertest Documents found!')
            finalresult = False
    return finalresult