示例#1
0
def test_delete_run_config():
    runcfg = framework_currentdata()
    init_currentdata()
    assert True == os.path.exists(runcfg)
    put_in_currentdata('token', 'abcd')
    delete_currentdata()
    assert False == os.path.exists(runcfg)
示例#2
0
def populate_json_main(arg_vals=None):
    """Main driver utility for running validator tests."""
    logger.info("Comand: '%s %s'",
                sys.executable.rsplit('/', 1)[-1], ' '.join(sys.argv))
    cmd_parser = argparse.ArgumentParser("Populate json files")
    cmd_parser.add_argument('container',
                            action='store',
                            help='Container name for the json files.')
    # cmd_parser.add_argument('--dir', action='store', default=None,
    #                         help='Populate all json files from this directory.')
    cmd_parser.add_argument('--file',
                            action='store',
                            default=None,
                            help='Populate only this file')
    # cmd_parser.add_argument('--type', action='store', default='structure',
    #                         choices=['test', 'structure', 'snapshot', 'output', 'notifications'])

    args = cmd_parser.parse_args(arg_vals)
    # Delete the rundata at the end of the script.
    atexit.register(delete_currentdata)
    logger.info(args)
    init_currentdata()
    dbname, db_init_res = init_db()
    if db_init_res:
        for _, collection in collectiontypes.items():
            create_indexes(config_value(DATABASE, collection), dbname,
                           [('timestamp', TEXT)])
        populate_json_files(args)
    else:
        logger.error("Error initializing DB, exiting....!")
    return 0
示例#3
0
def test_delete_from_run_config(load_json_file):
    runcfg = framework_currentdata()
    init_currentdata()
    assert True == os.path.exists(runcfg)
    put_in_currentdata('a', 'val1')
    runconfig = load_json_file(runcfg)
    result = True if runconfig and 'a' in runconfig and runconfig[
        'a'] == 'val1' else False
    assert result == True
    delete_from_currentdata('a')
    runconfig = load_json_file(runcfg)
    result = False if runconfig and 'a' in runconfig else True
    assert result == True
示例#4
0
def main(arg_vals=None):
    """Main driver utility for running validator tests."""
    logger.info("Comand: '%s %s'",
                sys.executable.rsplit('/', 1)[-1], ' '.join(sys.argv))
    cmd_parser = argparse.ArgumentParser("Comparator functional tests.")
    cmd_parser.add_argument('container',
                            action='store',
                            help='Container tests directory.')
    cmd_parser.add_argument('testfile',
                            action='store',
                            help='test file in the container')

    args = cmd_parser.parse_args(arg_vals)
    # Delete the rundata at the end of the script.
    atexit.register(delete_currentdata)
    logger.info(args)
    init_currentdata()
    init_db()
    snapshot_dir, snapshot_files = get_container_snapshot_json_files(
        args.container)
    if not snapshot_files:
        logger.info("No Snapshot files in %s, exiting!...", snapshot_dir)
        return False
    logger.info('Snapshot files: %s', snapshot_files)
    dbname = config_value(DATABASE, DBNAME)
    snapshot_ids = []
    for fl in snapshot_files:
        snapshot_ids = populate_snapshots_from_file(fl)
    logger.debug(snapshot_ids)
    for sid, coll in snapshot_ids.items():
        docs = get_documents(coll, {'snapshotId': sid},
                             dbname,
                             sort=[('timestamp', pymongo.DESCENDING)],
                             limit=1)
        logger.debug('Number of Snapshot Documents: %s', len(docs))
        if docs and len(docs):
            doc = docs[0]['json']
            logger.info('#' * 80)
            logger.info(json.dumps(doc, indent=2))
    test6 = '%s/%s' % (get_container_dir(args.container), args.testfile)
    test_json = json_from_file(test6)
    if not test_json:
        return
    logger.debug(test_json)
    otherdata = {'dbname': dbname, 'snapshots': snapshot_ids}
    # for testcase in test_json['testSet'][0]['cases']:
    for testset in test_json['testSet']:
        for testcase in testset['cases']:
            rulestr = get_field_value(testcase, 'rule')
            if rulestr:
                main_comparator(rulestr, otherdata)
示例#5
0
def test_init_config():
    runcfg = framework_currentdata()
    rundir = os.path.dirname(runcfg)
    # if os.path.exists(rundir):
    #    shutil.rmtree(rundir)
    # assert False == os.path.exists(rundir)
    assert True == os.path.exists(runcfg)
    init_currentdata()
    assert True == os.path.exists(rundir)
    assert True == os.path.exists(runcfg)
    os.remove(runcfg)
    assert True == os.path.exists(rundir)
    assert False == os.path.exists(runcfg)
    init_currentdata()
    assert True == os.path.exists(rundir)
    assert True == os.path.exists(runcfg)
示例#6
0
def terraform_to_json_main(arg_vals=None):
    """Main driver utility for converting terraform to json files."""
    logger = getlogger()
    logger.info("Comand: '%s %s'",
                sys.executable.rsplit('/', 1)[-1], ' '.join(sys.argv))
    cmd_parser = argparse.ArgumentParser("Convert terraform to json files")
    cmd_parser.add_argument('terraform',
                            action='store',
                            help='Full path of the terraform file.')
    cmd_parser.add_argument('--output',
                            action='store',
                            default=None,
                            help='Path to store the file.')
    args = cmd_parser.parse_args(arg_vals)
    # Delete the rundata at the end of the script.
    atexit.register(delete_currentdata)
    logger.info(args)
    init_currentdata()
    convert_terraform_to_json(args.terraform, args.output)
    return 0
示例#7
0
def test_add_to_run_config(load_json_file):
    runcfg = framework_currentdata()
    init_currentdata()
    assert True == os.path.exists(runcfg)
    put_in_currentdata('a', 'val1')
    runconfig = load_json_file(runcfg)
    result = True if runconfig and 'a' in runconfig and runconfig[
        'a'] == 'val1' else False
    assert result == True
    put_in_currentdata('b', ['val1'])
    runconfig = load_json_file(runcfg)
    result = True if runconfig and 'b' in runconfig and runconfig['b'] == [
        'val1'
    ] else False
    assert result == True
    put_in_currentdata('b', 'val2')
    runconfig = load_json_file(runcfg)
    result = True if runconfig and 'b' in runconfig and runconfig['b'] == [
        'val1', 'val2'
    ] else False
    assert result == True
示例#8
0
def validator_main(arg_vals=None, delete_rundata=True):
    """
    Main driver utility for running validator tests
    The arg_vals, if passed should be array of string. A set of examples are:
      1) arg_vals = ['container1'] - Use container1 to process test files from filesystem
      2) args_vals = ['container1', '--db'] - Use container1 to process test documents from database.
    When arg_vals is None, the argparse library parses from sys.argv array list.
    The successful argument parsing initializes the system for the run.
    On exit will run cleanup. The return values of this main entry function are as:
       0 - Success, tests executed.
       1 - Failure, Tests execution error.
       2 - Exception, missing config.ini, Mongo connection failure or http connection exception,
           the tests execution could not be started or completed.
    """
    cmd_parser = argparse.ArgumentParser(
        "prancer",
        formatter_class=argparse.RawDescriptionHelpFormatter,
        epilog='''\
Example: prancer collection1
Runs the prancer framework based on the configuration files available in collection1 folder
                                         ''')
    cmd_parser.add_argument('-v',
                            '--version',
                            action='version',
                            version=("Prancer %s" % __version__),
                            help='Show prancer version')
    cmd_parser.add_argument(
        'container',
        metavar='collection',
        action='store',
        help=
        'The name of the folder which contains the collection of files related to one scenario'
    )
    cmd_parser.add_argument(
        '--db',
        action='store',
        default=None,
        choices=['NONE', 'SNAPSHOT', 'FULL'],
        help=
        '''NONE - Database will not be used, all the files reside on file system,
                            SNAPSHOT - Resource snapshots will be stored in db, everything else will be on file system,
                            FULL - tests, configurations, outputs and snapshots will be stored in the database'''
    )
    cmd_parser.add_argument('--crawler',
                            action='store_true',
                            default=False,
                            help='Crawls and generates snapshot files only')
    cmd_parser.add_argument('--test',
                            action='store',
                            default=None,
                            help='Run a single test in NODB mode')
    cmd_parser.add_argument('--customer',
                            action='store',
                            default=None,
                            help='customer name for config')
    cmd_parser.add_argument(
        '--connector',
        action='store',
        default=None,
        help=
        'specify the name of the connector which you want to run from the collection'
    )
    cmd_parser.add_argument(
        '--branch',
        action='store',
        default=None,
        help=
        'specify the name of the branch to populate snapshots, for the filesystem connector'
    )
    args = cmd_parser.parse_args(arg_vals)

    retval = 2
    set_customer()
    cfg_error, config_ini = search_config_ini()
    if cfg_error:
        return retval

    if args.customer:
        set_customer(args.customer)
    if args.db:
        if args.db.upper() in DBVALUES:
            args.db = DBVALUES.index(args.db.upper())
        else:
            args.db = DBVALUES.index(SNAPSHOT)
    else:
        nodb = config_value(TESTS, DBTESTS)
        if nodb and nodb.upper() in DBVALUES:
            args.db = DBVALUES.index(nodb.upper())
        else:
            args.db = DBVALUES.index(SNAPSHOT)

    if args.test:
        args.db = DBVALUES.index(NONE)

    # Check if we want to run in NO DATABASE MODE
    if args.db:
        # returns the db connection handle and status, handle is ignored.
        from processor.database.database import init_db, TIMEOUT
        _, db_init_res = init_db()
        if not db_init_res:
            msg = "Mongo DB connection timed out after %d ms, check the mongo server, exiting!....." % TIMEOUT
            console_log(msg, currentframe())
            return retval

    # Check the log directory and also check if it is writeable.
    from processor.logging.log_handler import init_logger, get_logdir, default_logging, add_file_logging
    fw_cfg = get_config_data(framework_config())
    log_writeable, logdir = get_logdir(fw_cfg, framework_dir())
    if not log_writeable:
        console_log(
            'Logging directory(%s) is not writeable, exiting....' % logdir,
            currentframe())
        return retval

    # Alls well from this point, check container exists in the directory configured
    retval = 0
    logger = init_logger(args.db, framework_config())
    # logger = add_file_logging(config_ini)
    logger.info("START: Argument parsing and Run Initialization. Version %s",
                __version__)

    from processor.connector.snapshot import populate_container_snapshots
    from processor.connector.validation import run_container_validation_tests
    from processor.crawler.master_snapshot import generate_container_mastersnapshots
    try:
        from processor_enterprise.notifications.notification import check_send_notification
    except:
        check_send_notification = lambda container, db: None

    logger.info("Command: '%s %s'",
                sys.executable.rsplit('/', 1)[-1], ' '.join(sys.argv))
    try:
        from processor.helper.config.rundata_utils import init_currentdata, \
            delete_currentdata, put_in_currentdata
        # Delete the rundata at the end of the script as per caller, default is True.
        if delete_rundata:
            atexit.register(delete_currentdata)
        init_currentdata()

        logger.info("Using Framework dir: %s", framework_dir())
        logger.info("Args: %s", args)
        logger.debug("Running tests from %s.", DBVALUES[args.db])
        fs = True if args.db > DBVALUES.index(SNAPSHOT) else False
        put_in_currentdata('jsonsource', fs)
        put_in_currentdata(DBTESTS, args.db)
        put_in_currentdata('container', args.container)
        # if args.db == DBVALUES.index(FULL):
        #     from processor.logging.log_handler import get_dblogger
        #     log_name = get_dblogger()
        #     if log_name:
        #         pid = open('/tmp/pid_%s' % os.getpid(), 'w')
        #         pid.write(log_name)
        #         pid.close()
        if args.customer:
            put_in_currentdata(CUSTOMER, args.customer)
        if args.test:
            put_in_currentdata(SINGLETEST, args.test)
        else:
            put_in_currentdata(SINGLETEST, False)
        if args.connector:
            put_in_currentdata("connector", args.connector)
        if args.branch:
            put_in_currentdata("branch", args.branch)
        if not args.db:
            retval = 0 if container_exists(args.container) else 2
            if retval:
                logger.critical(
                    "Container(%s) is not present in Framework dir: %s",
                    args.container,
                    framework_dir(),
                    extra={"type": "critical"})
                # TODO: Log the path the framework looked for.
                return retval
        if args.crawler:
            # Generate snapshot files from here.
            generate_container_mastersnapshots(args.container, fs)
        else:
            # Normal flow
            snapshot_status = populate_container_snapshots(args.container, fs)
            logger.debug(json.dumps(snapshot_status, indent=2))
            if snapshot_status:
                status = run_container_validation_tests(
                    args.container, fs, snapshot_status)
                retval = 0 if status else 1
            else:
                retval = 1
            check_send_notification(args.container, args.db)
    except (Exception, KeyboardInterrupt) as ex:
        logger.error("Execution exception: %s", ex)
        print(traceback.format_exc())
        retval = 2
    return retval